Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 587832ed

History | View | Annotate | Download (138.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ADD_RESERVED_IPS_OPT",
58
  "ALLOCATABLE_OPT",
59
  "ALLOC_POLICY_OPT",
60
  "ALL_OPT",
61
  "ALLOW_FAILOVER_OPT",
62
  "AUTO_PROMOTE_OPT",
63
  "AUTO_REPLACE_OPT",
64
  "BACKEND_OPT",
65
  "BLK_OS_OPT",
66
  "CAPAB_MASTER_OPT",
67
  "CAPAB_VM_OPT",
68
  "CLEANUP_OPT",
69
  "CLUSTER_DOMAIN_SECRET_OPT",
70
  "CONFIRM_OPT",
71
  "CP_SIZE_OPT",
72
  "DEBUG_OPT",
73
  "DEBUG_SIMERR_OPT",
74
  "DISKIDX_OPT",
75
  "DISK_OPT",
76
  "DISK_PARAMS_OPT",
77
  "DISK_TEMPLATE_OPT",
78
  "DRAINED_OPT",
79
  "DRY_RUN_OPT",
80
  "DRBD_HELPER_OPT",
81
  "DST_NODE_OPT",
82
  "EARLY_RELEASE_OPT",
83
  "ENABLED_HV_OPT",
84
  "ENABLED_DISK_TEMPLATES_OPT",
85
  "ERROR_CODES_OPT",
86
  "FAILURE_ONLY_OPT",
87
  "FIELDS_OPT",
88
  "FILESTORE_DIR_OPT",
89
  "FILESTORE_DRIVER_OPT",
90
  "FORCE_FAILOVER_OPT",
91
  "FORCE_FILTER_OPT",
92
  "FORCE_OPT",
93
  "FORCE_VARIANT_OPT",
94
  "GATEWAY_OPT",
95
  "GATEWAY6_OPT",
96
  "GLOBAL_FILEDIR_OPT",
97
  "HID_OS_OPT",
98
  "GLOBAL_SHARED_FILEDIR_OPT",
99
  "HOTPLUG_OPT",
100
  "HOTPLUG_IF_POSSIBLE_OPT",
101
  "HVLIST_OPT",
102
  "HVOPTS_OPT",
103
  "HYPERVISOR_OPT",
104
  "IALLOCATOR_OPT",
105
  "DEFAULT_IALLOCATOR_OPT",
106
  "IDENTIFY_DEFAULTS_OPT",
107
  "IGNORE_CONSIST_OPT",
108
  "IGNORE_ERRORS_OPT",
109
  "IGNORE_FAILURES_OPT",
110
  "IGNORE_OFFLINE_OPT",
111
  "IGNORE_REMOVE_FAILURES_OPT",
112
  "IGNORE_SECONDARIES_OPT",
113
  "IGNORE_SIZE_OPT",
114
  "INCLUDEDEFAULTS_OPT",
115
  "INTERVAL_OPT",
116
  "MAC_PREFIX_OPT",
117
  "MAINTAIN_NODE_HEALTH_OPT",
118
  "MASTER_NETDEV_OPT",
119
  "MASTER_NETMASK_OPT",
120
  "MC_OPT",
121
  "MIGRATION_MODE_OPT",
122
  "MODIFY_ETCHOSTS_OPT",
123
  "NET_OPT",
124
  "NETWORK_OPT",
125
  "NETWORK6_OPT",
126
  "NEW_CLUSTER_CERT_OPT",
127
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
128
  "NEW_CONFD_HMAC_KEY_OPT",
129
  "NEW_RAPI_CERT_OPT",
130
  "NEW_PRIMARY_OPT",
131
  "NEW_SECONDARY_OPT",
132
  "NEW_SPICE_CERT_OPT",
133
  "NIC_PARAMS_OPT",
134
  "NOCONFLICTSCHECK_OPT",
135
  "NODE_FORCE_JOIN_OPT",
136
  "NODE_LIST_OPT",
137
  "NODE_PLACEMENT_OPT",
138
  "NODEGROUP_OPT",
139
  "NODE_PARAMS_OPT",
140
  "NODE_POWERED_OPT",
141
  "NOHDR_OPT",
142
  "NOIPCHECK_OPT",
143
  "NO_INSTALL_OPT",
144
  "NONAMECHECK_OPT",
145
  "NOLVM_STORAGE_OPT",
146
  "NOMODIFY_ETCHOSTS_OPT",
147
  "NOMODIFY_SSH_SETUP_OPT",
148
  "NONICS_OPT",
149
  "NONLIVE_OPT",
150
  "NONPLUS1_OPT",
151
  "NORUNTIME_CHGS_OPT",
152
  "NOSHUTDOWN_OPT",
153
  "NOSTART_OPT",
154
  "NOSSH_KEYCHECK_OPT",
155
  "NOVOTING_OPT",
156
  "NO_REMEMBER_OPT",
157
  "NWSYNC_OPT",
158
  "OFFLINE_INST_OPT",
159
  "ONLINE_INST_OPT",
160
  "ON_PRIMARY_OPT",
161
  "ON_SECONDARY_OPT",
162
  "OFFLINE_OPT",
163
  "OSPARAMS_OPT",
164
  "OS_OPT",
165
  "OS_SIZE_OPT",
166
  "OOB_TIMEOUT_OPT",
167
  "POWER_DELAY_OPT",
168
  "PREALLOC_WIPE_DISKS_OPT",
169
  "PRIMARY_IP_VERSION_OPT",
170
  "PRIMARY_ONLY_OPT",
171
  "PRINT_JOBID_OPT",
172
  "PRIORITY_OPT",
173
  "RAPI_CERT_OPT",
174
  "READD_OPT",
175
  "REASON_OPT",
176
  "REBOOT_TYPE_OPT",
177
  "REMOVE_INSTANCE_OPT",
178
  "REMOVE_RESERVED_IPS_OPT",
179
  "REMOVE_UIDS_OPT",
180
  "RESERVED_LVS_OPT",
181
  "RUNTIME_MEM_OPT",
182
  "ROMAN_OPT",
183
  "SECONDARY_IP_OPT",
184
  "SECONDARY_ONLY_OPT",
185
  "SELECT_OS_OPT",
186
  "SEP_OPT",
187
  "SHOWCMD_OPT",
188
  "SHOW_MACHINE_OPT",
189
  "SHUTDOWN_TIMEOUT_OPT",
190
  "SINGLE_NODE_OPT",
191
  "SPECS_CPU_COUNT_OPT",
192
  "SPECS_DISK_COUNT_OPT",
193
  "SPECS_DISK_SIZE_OPT",
194
  "SPECS_MEM_SIZE_OPT",
195
  "SPECS_NIC_COUNT_OPT",
196
  "SPLIT_ISPECS_OPTS",
197
  "IPOLICY_STD_SPECS_OPT",
198
  "IPOLICY_DISK_TEMPLATES",
199
  "IPOLICY_VCPU_RATIO",
200
  "SEQUENTIAL_OPT",
201
  "SPICE_CACERT_OPT",
202
  "SPICE_CERT_OPT",
203
  "SRC_DIR_OPT",
204
  "SRC_NODE_OPT",
205
  "SUBMIT_OPT",
206
  "SUBMIT_OPTS",
207
  "STARTUP_PAUSED_OPT",
208
  "STATIC_OPT",
209
  "SYNC_OPT",
210
  "TAG_ADD_OPT",
211
  "TAG_SRC_OPT",
212
  "TIMEOUT_OPT",
213
  "TO_GROUP_OPT",
214
  "UIDPOOL_OPT",
215
  "USEUNITS_OPT",
216
  "USE_EXTERNAL_MIP_SCRIPT",
217
  "USE_REPL_NET_OPT",
218
  "VERBOSE_OPT",
219
  "VG_NAME_OPT",
220
  "WFSYNC_OPT",
221
  "YES_DOIT_OPT",
222
  "DISK_STATE_OPT",
223
  "HV_STATE_OPT",
224
  "IGNORE_IPOLICY_OPT",
225
  "INSTANCE_POLICY_OPTS",
226
  # Generic functions for CLI programs
227
  "ConfirmOperation",
228
  "CreateIPolicyFromOpts",
229
  "GenericMain",
230
  "GenericInstanceCreate",
231
  "GenericList",
232
  "GenericListFields",
233
  "GetClient",
234
  "GetOnlineNodes",
235
  "JobExecutor",
236
  "JobSubmittedException",
237
  "ParseTimespec",
238
  "RunWhileClusterStopped",
239
  "SubmitOpCode",
240
  "SubmitOpCodeToDrainedQueue",
241
  "SubmitOrSend",
242
  "UsesRPC",
243
  # Formatting functions
244
  "ToStderr", "ToStdout",
245
  "FormatError",
246
  "FormatQueryResult",
247
  "FormatParamsDictInfo",
248
  "FormatPolicyInfo",
249
  "PrintIPolicyCommand",
250
  "PrintGenericInfo",
251
  "GenerateTable",
252
  "AskUser",
253
  "FormatTimestamp",
254
  "FormatLogMessage",
255
  # Tags functions
256
  "ListTags",
257
  "AddTags",
258
  "RemoveTags",
259
  # command line options support infrastructure
260
  "ARGS_MANY_INSTANCES",
261
  "ARGS_MANY_NODES",
262
  "ARGS_MANY_GROUPS",
263
  "ARGS_MANY_NETWORKS",
264
  "ARGS_NONE",
265
  "ARGS_ONE_INSTANCE",
266
  "ARGS_ONE_NODE",
267
  "ARGS_ONE_GROUP",
268
  "ARGS_ONE_OS",
269
  "ARGS_ONE_NETWORK",
270
  "ArgChoice",
271
  "ArgCommand",
272
  "ArgFile",
273
  "ArgGroup",
274
  "ArgHost",
275
  "ArgInstance",
276
  "ArgJobId",
277
  "ArgNetwork",
278
  "ArgNode",
279
  "ArgOs",
280
  "ArgExtStorage",
281
  "ArgSuggest",
282
  "ArgUnknown",
283
  "OPT_COMPL_INST_ADD_NODES",
284
  "OPT_COMPL_MANY_NODES",
285
  "OPT_COMPL_ONE_IALLOCATOR",
286
  "OPT_COMPL_ONE_INSTANCE",
287
  "OPT_COMPL_ONE_NODE",
288
  "OPT_COMPL_ONE_NODEGROUP",
289
  "OPT_COMPL_ONE_NETWORK",
290
  "OPT_COMPL_ONE_OS",
291
  "OPT_COMPL_ONE_EXTSTORAGE",
292
  "cli_option",
293
  "FixHvParams",
294
  "SplitNodeOption",
295
  "CalculateOSNames",
296
  "ParseFields",
297
  "COMMON_CREATE_OPTS",
298
  ]
299

    
300
NO_PREFIX = "no_"
301
UN_PREFIX = "-"
302

    
303
#: Priorities (sorted)
304
_PRIORITY_NAMES = [
305
  ("low", constants.OP_PRIO_LOW),
306
  ("normal", constants.OP_PRIO_NORMAL),
307
  ("high", constants.OP_PRIO_HIGH),
308
  ]
309

    
310
#: Priority dictionary for easier lookup
311
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
312
# we migrate to Python 2.6
313
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
314

    
315
# Query result status for clients
316
(QR_NORMAL,
317
 QR_UNKNOWN,
318
 QR_INCOMPLETE) = range(3)
319

    
320
#: Maximum batch size for ChooseJob
321
_CHOOSE_BATCH = 25
322

    
323

    
324
# constants used to create InstancePolicy dictionary
325
TISPECS_GROUP_TYPES = {
326
  constants.ISPECS_MIN: constants.VTYPE_INT,
327
  constants.ISPECS_MAX: constants.VTYPE_INT,
328
  }
329

    
330
TISPECS_CLUSTER_TYPES = {
331
  constants.ISPECS_MIN: constants.VTYPE_INT,
332
  constants.ISPECS_MAX: constants.VTYPE_INT,
333
  constants.ISPECS_STD: constants.VTYPE_INT,
334
  }
335

    
336
#: User-friendly names for query2 field types
337
_QFT_NAMES = {
338
  constants.QFT_UNKNOWN: "Unknown",
339
  constants.QFT_TEXT: "Text",
340
  constants.QFT_BOOL: "Boolean",
341
  constants.QFT_NUMBER: "Number",
342
  constants.QFT_UNIT: "Storage size",
343
  constants.QFT_TIMESTAMP: "Timestamp",
344
  constants.QFT_OTHER: "Custom",
345
  }
346

    
347

    
348
class _Argument(object):
349
  def __init__(self, min=0, max=None): # pylint: disable=W0622
350
    self.min = min
351
    self.max = max
352

    
353
  def __repr__(self):
354
    return ("<%s min=%s max=%s>" %
355
            (self.__class__.__name__, self.min, self.max))
356

    
357

    
358
class ArgSuggest(_Argument):
359
  """Suggesting argument.
360

361
  Value can be any of the ones passed to the constructor.
362

363
  """
364
  # pylint: disable=W0622
365
  def __init__(self, min=0, max=None, choices=None):
366
    _Argument.__init__(self, min=min, max=max)
367
    self.choices = choices
368

    
369
  def __repr__(self):
370
    return ("<%s min=%s max=%s choices=%r>" %
371
            (self.__class__.__name__, self.min, self.max, self.choices))
372

    
373

    
374
class ArgChoice(ArgSuggest):
375
  """Choice argument.
376

377
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
378
  but value must be one of the choices.
379

380
  """
381

    
382

    
383
class ArgUnknown(_Argument):
384
  """Unknown argument to program (e.g. determined at runtime).
385

386
  """
387

    
388

    
389
class ArgInstance(_Argument):
390
  """Instances argument.
391

392
  """
393

    
394

    
395
class ArgNode(_Argument):
396
  """Node argument.
397

398
  """
399

    
400

    
401
class ArgNetwork(_Argument):
402
  """Network argument.
403

404
  """
405

    
406

    
407
class ArgGroup(_Argument):
408
  """Node group argument.
409

410
  """
411

    
412

    
413
class ArgJobId(_Argument):
414
  """Job ID argument.
415

416
  """
417

    
418

    
419
class ArgFile(_Argument):
420
  """File path argument.
421

422
  """
423

    
424

    
425
class ArgCommand(_Argument):
426
  """Command argument.
427

428
  """
429

    
430

    
431
class ArgHost(_Argument):
432
  """Host argument.
433

434
  """
435

    
436

    
437
class ArgOs(_Argument):
438
  """OS argument.
439

440
  """
441

    
442

    
443
class ArgExtStorage(_Argument):
444
  """ExtStorage argument.
445

446
  """
447

    
448

    
449
ARGS_NONE = []
450
ARGS_MANY_INSTANCES = [ArgInstance()]
451
ARGS_MANY_NETWORKS = [ArgNetwork()]
452
ARGS_MANY_NODES = [ArgNode()]
453
ARGS_MANY_GROUPS = [ArgGroup()]
454
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
455
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
456
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
457
# TODO
458
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
459
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
460

    
461

    
462
def _ExtractTagsObject(opts, args):
463
  """Extract the tag type object.
464

465
  Note that this function will modify its args parameter.
466

467
  """
468
  if not hasattr(opts, "tag_type"):
469
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
470
  kind = opts.tag_type
471
  if kind == constants.TAG_CLUSTER:
472
    retval = kind, ""
473
  elif kind in (constants.TAG_NODEGROUP,
474
                constants.TAG_NODE,
475
                constants.TAG_NETWORK,
476
                constants.TAG_INSTANCE):
477
    if not args:
478
      raise errors.OpPrereqError("no arguments passed to the command",
479
                                 errors.ECODE_INVAL)
480
    name = args.pop(0)
481
    retval = kind, name
482
  else:
483
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
484
  return retval
485

    
486

    
487
def _ExtendTags(opts, args):
488
  """Extend the args if a source file has been given.
489

490
  This function will extend the tags with the contents of the file
491
  passed in the 'tags_source' attribute of the opts parameter. A file
492
  named '-' will be replaced by stdin.
493

494
  """
495
  fname = opts.tags_source
496
  if fname is None:
497
    return
498
  if fname == "-":
499
    new_fh = sys.stdin
500
  else:
501
    new_fh = open(fname, "r")
502
  new_data = []
503
  try:
504
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
505
    # because of python bug 1633941
506
    while True:
507
      line = new_fh.readline()
508
      if not line:
509
        break
510
      new_data.append(line.strip())
511
  finally:
512
    new_fh.close()
513
  args.extend(new_data)
514

    
515

    
516
def ListTags(opts, args):
517
  """List the tags on a given object.
518

519
  This is a generic implementation that knows how to deal with all
520
  three cases of tag objects (cluster, node, instance). The opts
521
  argument is expected to contain a tag_type field denoting what
522
  object type we work on.
523

524
  """
525
  kind, name = _ExtractTagsObject(opts, args)
526
  cl = GetClient(query=True)
527
  result = cl.QueryTags(kind, name)
528
  result = list(result)
529
  result.sort()
530
  for tag in result:
531
    ToStdout(tag)
532

    
533

    
534
def AddTags(opts, args):
535
  """Add tags on a given object.
536

537
  This is a generic implementation that knows how to deal with all
538
  three cases of tag objects (cluster, node, instance). The opts
539
  argument is expected to contain a tag_type field denoting what
540
  object type we work on.
541

542
  """
543
  kind, name = _ExtractTagsObject(opts, args)
544
  _ExtendTags(opts, args)
545
  if not args:
546
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
547
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
548
  SubmitOrSend(op, opts)
549

    
550

    
551
def RemoveTags(opts, args):
552
  """Remove tags from a given object.
553

554
  This is a generic implementation that knows how to deal with all
555
  three cases of tag objects (cluster, node, instance). The opts
556
  argument is expected to contain a tag_type field denoting what
557
  object type we work on.
558

559
  """
560
  kind, name = _ExtractTagsObject(opts, args)
561
  _ExtendTags(opts, args)
562
  if not args:
563
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
564
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
565
  SubmitOrSend(op, opts)
566

    
567

    
568
def check_unit(option, opt, value): # pylint: disable=W0613
569
  """OptParsers custom converter for units.
570

571
  """
572
  try:
573
    return utils.ParseUnit(value)
574
  except errors.UnitParseError, err:
575
    raise OptionValueError("option %s: %s" % (opt, err))
576

    
577

    
578
def _SplitKeyVal(opt, data, parse_prefixes):
579
  """Convert a KeyVal string into a dict.
580

581
  This function will convert a key=val[,...] string into a dict. Empty
582
  values will be converted specially: keys which have the prefix 'no_'
583
  will have the value=False and the prefix stripped, keys with the prefix
584
  "-" will have value=None and the prefix stripped, and the others will
585
  have value=True.
586

587
  @type opt: string
588
  @param opt: a string holding the option name for which we process the
589
      data, used in building error messages
590
  @type data: string
591
  @param data: a string of the format key=val,key=val,...
592
  @type parse_prefixes: bool
593
  @param parse_prefixes: whether to handle prefixes specially
594
  @rtype: dict
595
  @return: {key=val, key=val}
596
  @raises errors.ParameterError: if there are duplicate keys
597

598
  """
599
  kv_dict = {}
600
  if data:
601
    for elem in utils.UnescapeAndSplit(data, sep=","):
602
      if "=" in elem:
603
        key, val = elem.split("=", 1)
604
      elif parse_prefixes:
605
        if elem.startswith(NO_PREFIX):
606
          key, val = elem[len(NO_PREFIX):], False
607
        elif elem.startswith(UN_PREFIX):
608
          key, val = elem[len(UN_PREFIX):], None
609
        else:
610
          key, val = elem, True
611
      else:
612
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
613
                                    (elem, opt))
614
      if key in kv_dict:
615
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
616
                                    (key, opt))
617
      kv_dict[key] = val
618
  return kv_dict
619

    
620

    
621
def _SplitIdentKeyVal(opt, value, parse_prefixes):
622
  """Helper function to parse "ident:key=val,key=val" options.
623

624
  @type opt: string
625
  @param opt: option name, used in error messages
626
  @type value: string
627
  @param value: expected to be in the format "ident:key=val,key=val,..."
628
  @type parse_prefixes: bool
629
  @param parse_prefixes: whether to handle prefixes specially (see
630
      L{_SplitKeyVal})
631
  @rtype: tuple
632
  @return: (ident, {key=val, key=val})
633
  @raises errors.ParameterError: in case of duplicates or other parsing errors
634

635
  """
636
  if ":" not in value:
637
    ident, rest = value, ""
638
  else:
639
    ident, rest = value.split(":", 1)
640

    
641
  if parse_prefixes and ident.startswith(NO_PREFIX):
642
    if rest:
643
      msg = "Cannot pass options when removing parameter groups: %s" % value
644
      raise errors.ParameterError(msg)
645
    retval = (ident[len(NO_PREFIX):], False)
646
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
647
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
648
    if rest:
649
      msg = "Cannot pass options when removing parameter groups: %s" % value
650
      raise errors.ParameterError(msg)
651
    retval = (ident[len(UN_PREFIX):], None)
652
  else:
653
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
654
    retval = (ident, kv_dict)
655
  return retval
656

    
657

    
658
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
659
  """Custom parser for ident:key=val,key=val options.
660

661
  This will store the parsed values as a tuple (ident, {key: val}). As such,
662
  multiple uses of this option via action=append is possible.
663

664
  """
665
  return _SplitIdentKeyVal(opt, value, True)
666

    
667

    
668
def check_key_val(option, opt, value):  # pylint: disable=W0613
669
  """Custom parser class for key=val,key=val options.
670

671
  This will store the parsed values as a dict {key: val}.
672

673
  """
674
  return _SplitKeyVal(opt, value, True)
675

    
676

    
677
def _SplitListKeyVal(opt, value):
678
  retval = {}
679
  for elem in value.split("/"):
680
    if not elem:
681
      raise errors.ParameterError("Empty section in option '%s'" % opt)
682
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
683
    if ident in retval:
684
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
685
             (ident, opt, elem))
686
      raise errors.ParameterError(msg)
687
    retval[ident] = valdict
688
  return retval
689

    
690

    
691
def check_multilist_ident_key_val(_, opt, value):
692
  """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
693

694
  @rtype: list of dictionary
695
  @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
696

697
  """
698
  retval = []
699
  for line in value.split("//"):
700
    retval.append(_SplitListKeyVal(opt, line))
701
  return retval
702

    
703

    
704
def check_bool(option, opt, value): # pylint: disable=W0613
705
  """Custom parser for yes/no options.
706

707
  This will store the parsed value as either True or False.
708

709
  """
710
  value = value.lower()
711
  if value == constants.VALUE_FALSE or value == "no":
712
    return False
713
  elif value == constants.VALUE_TRUE or value == "yes":
714
    return True
715
  else:
716
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
717

    
718

    
719
def check_list(option, opt, value): # pylint: disable=W0613
720
  """Custom parser for comma-separated lists.
721

722
  """
723
  # we have to make this explicit check since "".split(",") is [""],
724
  # not an empty list :(
725
  if not value:
726
    return []
727
  else:
728
    return utils.UnescapeAndSplit(value)
729

    
730

    
731
def check_maybefloat(option, opt, value): # pylint: disable=W0613
732
  """Custom parser for float numbers which might be also defaults.
733

734
  """
735
  value = value.lower()
736

    
737
  if value == constants.VALUE_DEFAULT:
738
    return value
739
  else:
740
    return float(value)
741

    
742

    
743
# completion_suggestion is normally a list. Using numeric values not evaluating
744
# to False for dynamic completion.
745
(OPT_COMPL_MANY_NODES,
746
 OPT_COMPL_ONE_NODE,
747
 OPT_COMPL_ONE_INSTANCE,
748
 OPT_COMPL_ONE_OS,
749
 OPT_COMPL_ONE_EXTSTORAGE,
750
 OPT_COMPL_ONE_IALLOCATOR,
751
 OPT_COMPL_ONE_NETWORK,
752
 OPT_COMPL_INST_ADD_NODES,
753
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
754

    
755
OPT_COMPL_ALL = compat.UniqueFrozenset([
756
  OPT_COMPL_MANY_NODES,
757
  OPT_COMPL_ONE_NODE,
758
  OPT_COMPL_ONE_INSTANCE,
759
  OPT_COMPL_ONE_OS,
760
  OPT_COMPL_ONE_EXTSTORAGE,
761
  OPT_COMPL_ONE_IALLOCATOR,
762
  OPT_COMPL_ONE_NETWORK,
763
  OPT_COMPL_INST_ADD_NODES,
764
  OPT_COMPL_ONE_NODEGROUP,
765
  ])
766

    
767

    
768
class CliOption(Option):
769
  """Custom option class for optparse.
770

771
  """
772
  ATTRS = Option.ATTRS + [
773
    "completion_suggest",
774
    ]
775
  TYPES = Option.TYPES + (
776
    "multilistidentkeyval",
777
    "identkeyval",
778
    "keyval",
779
    "unit",
780
    "bool",
781
    "list",
782
    "maybefloat",
783
    )
784
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
785
  TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
786
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
787
  TYPE_CHECKER["keyval"] = check_key_val
788
  TYPE_CHECKER["unit"] = check_unit
789
  TYPE_CHECKER["bool"] = check_bool
790
  TYPE_CHECKER["list"] = check_list
791
  TYPE_CHECKER["maybefloat"] = check_maybefloat
792

    
793

    
794
# optparse.py sets make_option, so we do it for our own option class, too
795
cli_option = CliOption
796

    
797

    
798
_YORNO = "yes|no"
799

    
800
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
801
                       help="Increase debugging level")
802

    
803
NOHDR_OPT = cli_option("--no-headers", default=False,
804
                       action="store_true", dest="no_headers",
805
                       help="Don't display column headers")
806

    
807
SEP_OPT = cli_option("--separator", default=None,
808
                     action="store", dest="separator",
809
                     help=("Separator between output fields"
810
                           " (defaults to one space)"))
811

    
812
USEUNITS_OPT = cli_option("--units", default=None,
813
                          dest="units", choices=("h", "m", "g", "t"),
814
                          help="Specify units for output (one of h/m/g/t)")
815

    
816
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
817
                        type="string", metavar="FIELDS",
818
                        help="Comma separated list of output fields")
819

    
820
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
821
                       default=False, help="Force the operation")
822

    
823
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
824
                         default=False, help="Do not require confirmation")
825

    
826
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
827
                                  action="store_true", default=False,
828
                                  help=("Ignore offline nodes and do as much"
829
                                        " as possible"))
830

    
831
TAG_ADD_OPT = cli_option("--tags", dest="tags",
832
                         default=None, help="Comma-separated list of instance"
833
                                            " tags")
834

    
835
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
836
                         default=None, help="File with tag names")
837

    
838
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
839
                        default=False, action="store_true",
840
                        help=("Submit the job and return the job ID, but"
841
                              " don't wait for the job to finish"))
842

    
843
PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
844
                             default=False, action="store_true",
845
                             help=("Additionally print the job as first line"
846
                                   " on stdout (for scripting)."))
847

    
848
SEQUENTIAL_OPT = cli_option("--sequential", dest="sequential",
849
                            default=False, action="store_true",
850
                            help=("Execute all resulting jobs sequentially"))
851

    
852
SYNC_OPT = cli_option("--sync", dest="do_locking",
853
                      default=False, action="store_true",
854
                      help=("Grab locks while doing the queries"
855
                            " in order to ensure more consistent results"))
856

    
857
DRY_RUN_OPT = cli_option("--dry-run", default=False,
858
                         action="store_true",
859
                         help=("Do not execute the operation, just run the"
860
                               " check steps and verify if it could be"
861
                               " executed"))
862

    
863
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
864
                         action="store_true",
865
                         help="Increase the verbosity of the operation")
866

    
867
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
868
                              action="store_true", dest="simulate_errors",
869
                              help="Debugging option that makes the operation"
870
                              " treat most runtime checks as failed")
871

    
872
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
873
                        default=True, action="store_false",
874
                        help="Don't wait for sync (DANGEROUS!)")
875

    
876
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
877
                        default=False, action="store_true",
878
                        help="Wait for disks to sync")
879

    
880
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
881
                             action="store_true", default=False,
882
                             help="Enable offline instance")
883

    
884
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
885
                              action="store_true", default=False,
886
                              help="Disable down instance")
887

    
888
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
889
                               help=("Custom disk setup (%s)" %
890
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
891
                               default=None, metavar="TEMPL",
892
                               choices=list(constants.DISK_TEMPLATES))
893

    
894
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
895
                        help="Do not create any network cards for"
896
                        " the instance")
897

    
898
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
899
                               help="Relative path under default cluster-wide"
900
                               " file storage dir to store file-based disks",
901
                               default=None, metavar="<DIR>")
902

    
903
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
904
                                  help="Driver to use for image files",
905
                                  default=None, metavar="<DRIVER>",
906
                                  choices=list(constants.FILE_DRIVER))
907

    
908
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
909
                            help="Select nodes for the instance automatically"
910
                            " using the <NAME> iallocator plugin",
911
                            default=None, type="string",
912
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
913

    
914
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
915
                                    metavar="<NAME>",
916
                                    help="Set the default instance"
917
                                    " allocator plugin",
918
                                    default=None, type="string",
919
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
920

    
921
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
922
                    metavar="<os>",
923
                    completion_suggest=OPT_COMPL_ONE_OS)
924

    
925
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
926
                          type="keyval", default={},
927
                          help="OS parameters")
928

    
929
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
930
                               action="store_true", default=False,
931
                               help="Force an unknown variant")
932

    
933
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
934
                            action="store_true", default=False,
935
                            help="Do not install the OS (will"
936
                            " enable no-start)")
937

    
938
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
939
                                dest="allow_runtime_chgs",
940
                                default=True, action="store_false",
941
                                help="Don't allow runtime changes")
942

    
943
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
944
                         type="keyval", default={},
945
                         help="Backend parameters")
946

    
947
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
948
                        default={}, dest="hvparams",
949
                        help="Hypervisor parameters")
950

    
951
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
952
                             help="Disk template parameters, in the format"
953
                             " template:option=value,option=value,...",
954
                             type="identkeyval", action="append", default=[])
955

    
956
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
957
                                 type="keyval", default={},
958
                                 help="Memory size specs: list of key=value,"
959
                                " where key is one of min, max, std"
960
                                 " (in MB or using a unit)")
961

    
962
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
963
                                 type="keyval", default={},
964
                                 help="CPU count specs: list of key=value,"
965
                                 " where key is one of min, max, std")
966

    
967
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
968
                                  dest="ispecs_disk_count",
969
                                  type="keyval", default={},
970
                                  help="Disk count specs: list of key=value,"
971
                                  " where key is one of min, max, std")
972

    
973
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
974
                                 type="keyval", default={},
975
                                 help="Disk size specs: list of key=value,"
976
                                 " where key is one of min, max, std"
977
                                 " (in MB or using a unit)")
978

    
979
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
980
                                 type="keyval", default={},
981
                                 help="NIC count specs: list of key=value,"
982
                                 " where key is one of min, max, std")
983

    
984
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
985
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
986
                                      dest="ipolicy_bounds_specs",
987
                                      type="multilistidentkeyval", default=None,
988
                                      help="Complete instance specs limits")
989

    
990
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
991
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
992
                                   dest="ipolicy_std_specs",
993
                                   type="keyval", default=None,
994
                                   help="Complte standard instance specs")
995

    
996
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
997
                                    dest="ipolicy_disk_templates",
998
                                    type="list", default=None,
999
                                    help="Comma-separated list of"
1000
                                    " enabled disk templates")
1001

    
1002
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
1003
                                 dest="ipolicy_vcpu_ratio",
1004
                                 type="maybefloat", default=None,
1005
                                 help="The maximum allowed vcpu-to-cpu ratio")
1006

    
1007
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
1008
                                   dest="ipolicy_spindle_ratio",
1009
                                   type="maybefloat", default=None,
1010
                                   help=("The maximum allowed instances to"
1011
                                         " spindle ratio"))
1012

    
1013
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1014
                            help="Hypervisor and hypervisor options, in the"
1015
                            " format hypervisor:option=value,option=value,...",
1016
                            default=None, type="identkeyval")
1017

    
1018
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1019
                        help="Hypervisor and hypervisor options, in the"
1020
                        " format hypervisor:option=value,option=value,...",
1021
                        default=[], action="append", type="identkeyval")
1022

    
1023
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1024
                           action="store_false",
1025
                           help="Don't check that the instance's IP"
1026
                           " is alive")
1027

    
1028
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1029
                             default=True, action="store_false",
1030
                             help="Don't check that the instance's name"
1031
                             " is resolvable")
1032

    
1033
NET_OPT = cli_option("--net",
1034
                     help="NIC parameters", default=[],
1035
                     dest="nics", action="append", type="identkeyval")
1036

    
1037
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1038
                      dest="disks", action="append", type="identkeyval")
1039

    
1040
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1041
                         help="Comma-separated list of disks"
1042
                         " indices to act on (e.g. 0,2) (optional,"
1043
                         " defaults to all disks)")
1044

    
1045
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1046
                         help="Enforces a single-disk configuration using the"
1047
                         " given disk size, in MiB unless a suffix is used",
1048
                         default=None, type="unit", metavar="<size>")
1049

    
1050
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1051
                                dest="ignore_consistency",
1052
                                action="store_true", default=False,
1053
                                help="Ignore the consistency of the disks on"
1054
                                " the secondary")
1055

    
1056
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1057
                                dest="allow_failover",
1058
                                action="store_true", default=False,
1059
                                help="If migration is not possible fallback to"
1060
                                     " failover")
1061

    
1062
FORCE_FAILOVER_OPT = cli_option("--force-failover",
1063
                                dest="force_failover",
1064
                                action="store_true", default=False,
1065
                                help="Do not use migration, always use"
1066
                                     " failover")
1067

    
1068
NONLIVE_OPT = cli_option("--non-live", dest="live",
1069
                         default=True, action="store_false",
1070
                         help="Do a non-live migration (this usually means"
1071
                         " freeze the instance, save the state, transfer and"
1072
                         " only then resume running on the secondary node)")
1073

    
1074
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1075
                                default=None,
1076
                                choices=list(constants.HT_MIGRATION_MODES),
1077
                                help="Override default migration mode (choose"
1078
                                " either live or non-live")
1079

    
1080
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1081
                                help="Target node and optional secondary node",
1082
                                metavar="<pnode>[:<snode>]",
1083
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1084

    
1085
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1086
                           action="append", metavar="<node>",
1087
                           help="Use only this node (can be used multiple"
1088
                           " times, if not given defaults to all nodes)",
1089
                           completion_suggest=OPT_COMPL_ONE_NODE)
1090

    
1091
NODEGROUP_OPT_NAME = "--node-group"
1092
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1093
                           dest="nodegroup",
1094
                           help="Node group (name or uuid)",
1095
                           metavar="<nodegroup>",
1096
                           default=None, type="string",
1097
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1098

    
1099
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1100
                             metavar="<node>",
1101
                             completion_suggest=OPT_COMPL_ONE_NODE)
1102

    
1103
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1104
                         action="store_false",
1105
                         help="Don't start the instance after creation")
1106

    
1107
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1108
                         action="store_true", default=False,
1109
                         help="Show command instead of executing it")
1110

    
1111
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1112
                         default=False, action="store_true",
1113
                         help="Instead of performing the migration/failover,"
1114
                         " try to recover from a failed cleanup. This is safe"
1115
                         " to run even if the instance is healthy, but it"
1116
                         " will create extra replication traffic and "
1117
                         " disrupt briefly the replication (like during the"
1118
                         " migration/failover")
1119

    
1120
STATIC_OPT = cli_option("-s", "--static", dest="static",
1121
                        action="store_true", default=False,
1122
                        help="Only show configuration data, not runtime data")
1123

    
1124
ALL_OPT = cli_option("--all", dest="show_all",
1125
                     default=False, action="store_true",
1126
                     help="Show info on all instances on the cluster."
1127
                     " This can take a long time to run, use wisely")
1128

    
1129
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1130
                           action="store_true", default=False,
1131
                           help="Interactive OS reinstall, lists available"
1132
                           " OS templates for selection")
1133

    
1134
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1135
                                 action="store_true", default=False,
1136
                                 help="Remove the instance from the cluster"
1137
                                 " configuration even if there are failures"
1138
                                 " during the removal process")
1139

    
1140
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1141
                                        dest="ignore_remove_failures",
1142
                                        action="store_true", default=False,
1143
                                        help="Remove the instance from the"
1144
                                        " cluster configuration even if there"
1145
                                        " are failures during the removal"
1146
                                        " process")
1147

    
1148
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1149
                                 action="store_true", default=False,
1150
                                 help="Remove the instance from the cluster")
1151

    
1152
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1153
                               help="Specifies the new node for the instance",
1154
                               metavar="NODE", default=None,
1155
                               completion_suggest=OPT_COMPL_ONE_NODE)
1156

    
1157
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1158
                               help="Specifies the new secondary node",
1159
                               metavar="NODE", default=None,
1160
                               completion_suggest=OPT_COMPL_ONE_NODE)
1161

    
1162
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1163
                             help="Specifies the new primary node",
1164
                             metavar="<node>", default=None,
1165
                             completion_suggest=OPT_COMPL_ONE_NODE)
1166

    
1167
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1168
                            default=False, action="store_true",
1169
                            help="Replace the disk(s) on the primary"
1170
                                 " node (applies only to internally mirrored"
1171
                                 " disk templates, e.g. %s)" %
1172
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1173

    
1174
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1175
                              default=False, action="store_true",
1176
                              help="Replace the disk(s) on the secondary"
1177
                                   " node (applies only to internally mirrored"
1178
                                   " disk templates, e.g. %s)" %
1179
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1180

    
1181
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1182
                              default=False, action="store_true",
1183
                              help="Lock all nodes and auto-promote as needed"
1184
                              " to MC status")
1185

    
1186
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1187
                              default=False, action="store_true",
1188
                              help="Automatically replace faulty disks"
1189
                                   " (applies only to internally mirrored"
1190
                                   " disk templates, e.g. %s)" %
1191
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1192

    
1193
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1194
                             default=False, action="store_true",
1195
                             help="Ignore current recorded size"
1196
                             " (useful for forcing activation when"
1197
                             " the recorded size is wrong)")
1198

    
1199
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1200
                          metavar="<node>",
1201
                          completion_suggest=OPT_COMPL_ONE_NODE)
1202

    
1203
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1204
                         metavar="<dir>")
1205

    
1206
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1207
                              help="Specify the secondary ip for the node",
1208
                              metavar="ADDRESS", default=None)
1209

    
1210
READD_OPT = cli_option("--readd", dest="readd",
1211
                       default=False, action="store_true",
1212
                       help="Readd old node after replacing it")
1213

    
1214
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1215
                                default=True, action="store_false",
1216
                                help="Disable SSH key fingerprint checking")
1217

    
1218
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1219
                                 default=False, action="store_true",
1220
                                 help="Force the joining of a node")
1221

    
1222
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1223
                    type="bool", default=None, metavar=_YORNO,
1224
                    help="Set the master_candidate flag on the node")
1225

    
1226
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1227
                         type="bool", default=None,
1228
                         help=("Set the offline flag on the node"
1229
                               " (cluster does not communicate with offline"
1230
                               " nodes)"))
1231

    
1232
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1233
                         type="bool", default=None,
1234
                         help=("Set the drained flag on the node"
1235
                               " (excluded from allocation operations)"))
1236

    
1237
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1238
                              type="bool", default=None, metavar=_YORNO,
1239
                              help="Set the master_capable flag on the node")
1240

    
1241
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1242
                          type="bool", default=None, metavar=_YORNO,
1243
                          help="Set the vm_capable flag on the node")
1244

    
1245
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1246
                             type="bool", default=None, metavar=_YORNO,
1247
                             help="Set the allocatable flag on a volume")
1248

    
1249
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1250
                               help="Disable support for lvm based instances"
1251
                               " (cluster-wide)",
1252
                               action="store_false", default=True)
1253

    
1254
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1255
                            dest="enabled_hypervisors",
1256
                            help="Comma-separated list of hypervisors",
1257
                            type="string", default=None)
1258

    
1259
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1260
                                        dest="enabled_disk_templates",
1261
                                        help="Comma-separated list of "
1262
                                             "disk templates",
1263
                                        type="string", default=None)
1264

    
1265
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1266
                            type="keyval", default={},
1267
                            help="NIC parameters")
1268

    
1269
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1270
                         dest="candidate_pool_size", type="int",
1271
                         help="Set the candidate pool size")
1272

    
1273
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1274
                         help=("Enables LVM and specifies the volume group"
1275
                               " name (cluster-wide) for disk allocation"
1276
                               " [%s]" % constants.DEFAULT_VG),
1277
                         metavar="VG", default=None)
1278

    
1279
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1280
                          help="Destroy cluster", action="store_true")
1281

    
1282
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1283
                          help="Skip node agreement check (dangerous)",
1284
                          action="store_true", default=False)
1285

    
1286
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1287
                            help="Specify the mac prefix for the instance IP"
1288
                            " addresses, in the format XX:XX:XX",
1289
                            metavar="PREFIX",
1290
                            default=None)
1291

    
1292
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1293
                               help="Specify the node interface (cluster-wide)"
1294
                               " on which the master IP address will be added"
1295
                               " (cluster init default: %s)" %
1296
                               constants.DEFAULT_BRIDGE,
1297
                               metavar="NETDEV",
1298
                               default=None)
1299

    
1300
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1301
                                help="Specify the netmask of the master IP",
1302
                                metavar="NETMASK",
1303
                                default=None)
1304

    
1305
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1306
                                     dest="use_external_mip_script",
1307
                                     help="Specify whether to run a"
1308
                                     " user-provided script for the master"
1309
                                     " IP address turnup and"
1310
                                     " turndown operations",
1311
                                     type="bool", metavar=_YORNO, default=None)
1312

    
1313
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1314
                                help="Specify the default directory (cluster-"
1315
                                "wide) for storing the file-based disks [%s]" %
1316
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1317
                                metavar="DIR",
1318
                                default=None)
1319

    
1320
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1321
  "--shared-file-storage-dir",
1322
  dest="shared_file_storage_dir",
1323
  help="Specify the default directory (cluster-wide) for storing the"
1324
  " shared file-based disks [%s]" %
1325
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1326
  metavar="SHAREDDIR", default=None)
1327

    
1328
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1329
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1330
                                   action="store_false", default=True)
1331

    
1332
MODIFY_ETCHOSTS_OPT = \
1333
 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1334
            default=None, type="bool",
1335
            help="Defines whether the cluster should autonomously modify"
1336
            " and keep in sync the /etc/hosts file of the nodes")
1337

    
1338
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1339
                                    help="Don't initialize SSH keys",
1340
                                    action="store_false", default=True)
1341

    
1342
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1343
                             help="Enable parseable error messages",
1344
                             action="store_true", default=False)
1345

    
1346
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1347
                          help="Skip N+1 memory redundancy tests",
1348
                          action="store_true", default=False)
1349

    
1350
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1351
                             help="Type of reboot: soft/hard/full",
1352
                             default=constants.INSTANCE_REBOOT_HARD,
1353
                             metavar="<REBOOT>",
1354
                             choices=list(constants.REBOOT_TYPES))
1355

    
1356
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1357
                                    dest="ignore_secondaries",
1358
                                    default=False, action="store_true",
1359
                                    help="Ignore errors from secondaries")
1360

    
1361
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1362
                            action="store_false", default=True,
1363
                            help="Don't shutdown the instance (unsafe)")
1364

    
1365
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1366
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1367
                         help="Maximum time to wait")
1368

    
1369
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1370
                                  dest="shutdown_timeout", type="int",
1371
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1372
                                  help="Maximum time to wait for instance"
1373
                                  " shutdown")
1374

    
1375
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1376
                          default=None,
1377
                          help=("Number of seconds between repetions of the"
1378
                                " command"))
1379

    
1380
EARLY_RELEASE_OPT = cli_option("--early-release",
1381
                               dest="early_release", default=False,
1382
                               action="store_true",
1383
                               help="Release the locks on the secondary"
1384
                               " node(s) early")
1385

    
1386
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1387
                                  dest="new_cluster_cert",
1388
                                  default=False, action="store_true",
1389
                                  help="Generate a new cluster certificate")
1390

    
1391
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1392
                           default=None,
1393
                           help="File containing new RAPI certificate")
1394

    
1395
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1396
                               default=None, action="store_true",
1397
                               help=("Generate a new self-signed RAPI"
1398
                                     " certificate"))
1399

    
1400
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1401
                            default=None,
1402
                            help="File containing new SPICE certificate")
1403

    
1404
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1405
                              default=None,
1406
                              help="File containing the certificate of the CA"
1407
                              " which signed the SPICE certificate")
1408

    
1409
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1410
                                dest="new_spice_cert", default=None,
1411
                                action="store_true",
1412
                                help=("Generate a new self-signed SPICE"
1413
                                      " certificate"))
1414

    
1415
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1416
                                    dest="new_confd_hmac_key",
1417
                                    default=False, action="store_true",
1418
                                    help=("Create a new HMAC key for %s" %
1419
                                          constants.CONFD))
1420

    
1421
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1422
                                       dest="cluster_domain_secret",
1423
                                       default=None,
1424
                                       help=("Load new new cluster domain"
1425
                                             " secret from file"))
1426

    
1427
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1428
                                           dest="new_cluster_domain_secret",
1429
                                           default=False, action="store_true",
1430
                                           help=("Create a new cluster domain"
1431
                                                 " secret"))
1432

    
1433
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1434
                              dest="use_replication_network",
1435
                              help="Whether to use the replication network"
1436
                              " for talking to the nodes",
1437
                              action="store_true", default=False)
1438

    
1439
MAINTAIN_NODE_HEALTH_OPT = \
1440
    cli_option("--maintain-node-health", dest="maintain_node_health",
1441
               metavar=_YORNO, default=None, type="bool",
1442
               help="Configure the cluster to automatically maintain node"
1443
               " health, by shutting down unknown instances, shutting down"
1444
               " unknown DRBD devices, etc.")
1445

    
1446
IDENTIFY_DEFAULTS_OPT = \
1447
    cli_option("--identify-defaults", dest="identify_defaults",
1448
               default=False, action="store_true",
1449
               help="Identify which saved instance parameters are equal to"
1450
               " the current cluster defaults and set them as such, instead"
1451
               " of marking them as overridden")
1452

    
1453
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1454
                         action="store", dest="uid_pool",
1455
                         help=("A list of user-ids or user-id"
1456
                               " ranges separated by commas"))
1457

    
1458
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1459
                          action="store", dest="add_uids",
1460
                          help=("A list of user-ids or user-id"
1461
                                " ranges separated by commas, to be"
1462
                                " added to the user-id pool"))
1463

    
1464
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1465
                             action="store", dest="remove_uids",
1466
                             help=("A list of user-ids or user-id"
1467
                                   " ranges separated by commas, to be"
1468
                                   " removed from the user-id pool"))
1469

    
1470
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1471
                              action="store", dest="reserved_lvs",
1472
                              help=("A comma-separated list of reserved"
1473
                                    " logical volumes names, that will be"
1474
                                    " ignored by cluster verify"))
1475

    
1476
ROMAN_OPT = cli_option("--roman",
1477
                       dest="roman_integers", default=False,
1478
                       action="store_true",
1479
                       help="Use roman numbers for positive integers")
1480

    
1481
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1482
                             action="store", default=None,
1483
                             help="Specifies usermode helper for DRBD")
1484

    
1485
PRIMARY_IP_VERSION_OPT = \
1486
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1487
               action="store", dest="primary_ip_version",
1488
               metavar="%d|%d" % (constants.IP4_VERSION,
1489
                                  constants.IP6_VERSION),
1490
               help="Cluster-wide IP version for primary IP")
1491

    
1492
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1493
                              action="store_true",
1494
                              help="Show machine name for every line in output")
1495

    
1496
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1497
                              action="store_true",
1498
                              help=("Hide successful results and show failures"
1499
                                    " only (determined by the exit code)"))
1500

    
1501
REASON_OPT = cli_option("--reason", default=None,
1502
                        help="The reason for executing the command")
1503

    
1504

    
1505
def _PriorityOptionCb(option, _, value, parser):
1506
  """Callback for processing C{--priority} option.
1507

1508
  """
1509
  value = _PRIONAME_TO_VALUE[value]
1510

    
1511
  setattr(parser.values, option.dest, value)
1512

    
1513

    
1514
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1515
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1516
                          choices=_PRIONAME_TO_VALUE.keys(),
1517
                          action="callback", type="choice",
1518
                          callback=_PriorityOptionCb,
1519
                          help="Priority for opcode processing")
1520

    
1521
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1522
                        type="bool", default=None, metavar=_YORNO,
1523
                        help="Sets the hidden flag on the OS")
1524

    
1525
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1526
                        type="bool", default=None, metavar=_YORNO,
1527
                        help="Sets the blacklisted flag on the OS")
1528

    
1529
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1530
                                     type="bool", metavar=_YORNO,
1531
                                     dest="prealloc_wipe_disks",
1532
                                     help=("Wipe disks prior to instance"
1533
                                           " creation"))
1534

    
1535
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1536
                             type="keyval", default=None,
1537
                             help="Node parameters")
1538

    
1539
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1540
                              action="store", metavar="POLICY", default=None,
1541
                              help="Allocation policy for the node group")
1542

    
1543
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1544
                              type="bool", metavar=_YORNO,
1545
                              dest="node_powered",
1546
                              help="Specify if the SoR for node is powered")
1547

    
1548
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1549
                             default=constants.OOB_TIMEOUT,
1550
                             help="Maximum time to wait for out-of-band helper")
1551

    
1552
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1553
                             default=constants.OOB_POWER_DELAY,
1554
                             help="Time in seconds to wait between power-ons")
1555

    
1556
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1557
                              action="store_true", default=False,
1558
                              help=("Whether command argument should be treated"
1559
                                    " as filter"))
1560

    
1561
NO_REMEMBER_OPT = cli_option("--no-remember",
1562
                             dest="no_remember",
1563
                             action="store_true", default=False,
1564
                             help="Perform but do not record the change"
1565
                             " in the configuration")
1566

    
1567
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1568
                              default=False, action="store_true",
1569
                              help="Evacuate primary instances only")
1570

    
1571
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1572
                                default=False, action="store_true",
1573
                                help="Evacuate secondary instances only"
1574
                                     " (applies only to internally mirrored"
1575
                                     " disk templates, e.g. %s)" %
1576
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1577

    
1578
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1579
                                action="store_true", default=False,
1580
                                help="Pause instance at startup")
1581

    
1582
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1583
                          help="Destination node group (name or uuid)",
1584
                          default=None, action="append",
1585
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1586

    
1587
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1588
                               action="append", dest="ignore_errors",
1589
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1590
                               help="Error code to be ignored")
1591

    
1592
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1593
                            action="append",
1594
                            help=("Specify disk state information in the"
1595
                                  " format"
1596
                                  " storage_type/identifier:option=value,...;"
1597
                                  " note this is unused for now"),
1598
                            type="identkeyval")
1599

    
1600
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1601
                          action="append",
1602
                          help=("Specify hypervisor state information in the"
1603
                                " format hypervisor:option=value,...;"
1604
                                " note this is unused for now"),
1605
                          type="identkeyval")
1606

    
1607
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1608
                                action="store_true", default=False,
1609
                                help="Ignore instance policy violations")
1610

    
1611
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1612
                             help="Sets the instance's runtime memory,"
1613
                             " ballooning it up or down to the new value",
1614
                             default=None, type="unit", metavar="<size>")
1615

    
1616
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1617
                          action="store_true", default=False,
1618
                          help="Marks the grow as absolute instead of the"
1619
                          " (default) relative mode")
1620

    
1621
NETWORK_OPT = cli_option("--network",
1622
                         action="store", default=None, dest="network",
1623
                         help="IP network in CIDR notation")
1624

    
1625
GATEWAY_OPT = cli_option("--gateway",
1626
                         action="store", default=None, dest="gateway",
1627
                         help="IP address of the router (gateway)")
1628

    
1629
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1630
                                  action="store", default=None,
1631
                                  dest="add_reserved_ips",
1632
                                  help="Comma-separated list of"
1633
                                  " reserved IPs to add")
1634

    
1635
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1636
                                     action="store", default=None,
1637
                                     dest="remove_reserved_ips",
1638
                                     help="Comma-delimited list of"
1639
                                     " reserved IPs to remove")
1640

    
1641
NETWORK6_OPT = cli_option("--network6",
1642
                          action="store", default=None, dest="network6",
1643
                          help="IP network in CIDR notation")
1644

    
1645
GATEWAY6_OPT = cli_option("--gateway6",
1646
                          action="store", default=None, dest="gateway6",
1647
                          help="IP6 address of the router (gateway)")
1648

    
1649
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1650
                                  dest="conflicts_check",
1651
                                  default=True,
1652
                                  action="store_false",
1653
                                  help="Don't check for conflicting IPs")
1654

    
1655
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1656
                                 default=False, action="store_true",
1657
                                 help="Include default values")
1658

    
1659
HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1660
                         action="store_true", default=False,
1661
                         help="Hotplug supported devices (NICs and Disks)")
1662

    
1663
HOTPLUG_IF_POSSIBLE_OPT = cli_option("--hotplug-if-possible",
1664
                                     dest="hotplug_if_possible",
1665
                                     action="store_true", default=False,
1666
                                     help="Hotplug devices in case"
1667
                                          " hotplug is supported")
1668

    
1669
#: Options provided by all commands
1670
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1671

    
1672
# options related to asynchronous job handling
1673

    
1674
SUBMIT_OPTS = [
1675
  SUBMIT_OPT,
1676
  PRINT_JOBID_OPT,
1677
  ]
1678

    
1679
# common options for creating instances. add and import then add their own
1680
# specific ones.
1681
COMMON_CREATE_OPTS = [
1682
  BACKEND_OPT,
1683
  DISK_OPT,
1684
  DISK_TEMPLATE_OPT,
1685
  FILESTORE_DIR_OPT,
1686
  FILESTORE_DRIVER_OPT,
1687
  HYPERVISOR_OPT,
1688
  IALLOCATOR_OPT,
1689
  NET_OPT,
1690
  NODE_PLACEMENT_OPT,
1691
  NOIPCHECK_OPT,
1692
  NOCONFLICTSCHECK_OPT,
1693
  NONAMECHECK_OPT,
1694
  NONICS_OPT,
1695
  NWSYNC_OPT,
1696
  OSPARAMS_OPT,
1697
  OS_SIZE_OPT,
1698
  SUBMIT_OPT,
1699
  PRINT_JOBID_OPT,
1700
  TAG_ADD_OPT,
1701
  DRY_RUN_OPT,
1702
  PRIORITY_OPT,
1703
  ]
1704

    
1705
# common instance policy options
1706
INSTANCE_POLICY_OPTS = [
1707
  IPOLICY_BOUNDS_SPECS_OPT,
1708
  IPOLICY_DISK_TEMPLATES,
1709
  IPOLICY_VCPU_RATIO,
1710
  IPOLICY_SPINDLE_RATIO,
1711
  ]
1712

    
1713
# instance policy split specs options
1714
SPLIT_ISPECS_OPTS = [
1715
  SPECS_CPU_COUNT_OPT,
1716
  SPECS_DISK_COUNT_OPT,
1717
  SPECS_DISK_SIZE_OPT,
1718
  SPECS_MEM_SIZE_OPT,
1719
  SPECS_NIC_COUNT_OPT,
1720
  ]
1721

    
1722

    
1723
class _ShowUsage(Exception):
1724
  """Exception class for L{_ParseArgs}.
1725

1726
  """
1727
  def __init__(self, exit_error):
1728
    """Initializes instances of this class.
1729

1730
    @type exit_error: bool
1731
    @param exit_error: Whether to report failure on exit
1732

1733
    """
1734
    Exception.__init__(self)
1735
    self.exit_error = exit_error
1736

    
1737

    
1738
class _ShowVersion(Exception):
1739
  """Exception class for L{_ParseArgs}.
1740

1741
  """
1742

    
1743

    
1744
def _ParseArgs(binary, argv, commands, aliases, env_override):
1745
  """Parser for the command line arguments.
1746

1747
  This function parses the arguments and returns the function which
1748
  must be executed together with its (modified) arguments.
1749

1750
  @param binary: Script name
1751
  @param argv: Command line arguments
1752
  @param commands: Dictionary containing command definitions
1753
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1754
  @param env_override: list of env variables allowed for default args
1755
  @raise _ShowUsage: If usage description should be shown
1756
  @raise _ShowVersion: If version should be shown
1757

1758
  """
1759
  assert not (env_override - set(commands))
1760
  assert not (set(aliases.keys()) & set(commands.keys()))
1761

    
1762
  if len(argv) > 1:
1763
    cmd = argv[1]
1764
  else:
1765
    # No option or command given
1766
    raise _ShowUsage(exit_error=True)
1767

    
1768
  if cmd == "--version":
1769
    raise _ShowVersion()
1770
  elif cmd == "--help":
1771
    raise _ShowUsage(exit_error=False)
1772
  elif not (cmd in commands or cmd in aliases):
1773
    raise _ShowUsage(exit_error=True)
1774

    
1775
  # get command, unalias it, and look it up in commands
1776
  if cmd in aliases:
1777
    if aliases[cmd] not in commands:
1778
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1779
                                   " command '%s'" % (cmd, aliases[cmd]))
1780

    
1781
    cmd = aliases[cmd]
1782

    
1783
  if cmd in env_override:
1784
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1785
    env_args = os.environ.get(args_env_name)
1786
    if env_args:
1787
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1788

    
1789
  func, args_def, parser_opts, usage, description = commands[cmd]
1790
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1791
                        description=description,
1792
                        formatter=TitledHelpFormatter(),
1793
                        usage="%%prog %s %s" % (cmd, usage))
1794
  parser.disable_interspersed_args()
1795
  options, args = parser.parse_args(args=argv[2:])
1796

    
1797
  if not _CheckArguments(cmd, args_def, args):
1798
    return None, None, None
1799

    
1800
  return func, options, args
1801

    
1802

    
1803
def _FormatUsage(binary, commands):
1804
  """Generates a nice description of all commands.
1805

1806
  @param binary: Script name
1807
  @param commands: Dictionary containing command definitions
1808

1809
  """
1810
  # compute the max line length for cmd + usage
1811
  mlen = min(60, max(map(len, commands)))
1812

    
1813
  yield "Usage: %s {command} [options...] [argument...]" % binary
1814
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1815
  yield ""
1816
  yield "Commands:"
1817

    
1818
  # and format a nice command list
1819
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1820
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1821
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1822
    for line in help_lines:
1823
      yield " %-*s   %s" % (mlen, "", line)
1824

    
1825
  yield ""
1826

    
1827

    
1828
def _CheckArguments(cmd, args_def, args):
1829
  """Verifies the arguments using the argument definition.
1830

1831
  Algorithm:
1832

1833
    1. Abort with error if values specified by user but none expected.
1834

1835
    1. For each argument in definition
1836

1837
      1. Keep running count of minimum number of values (min_count)
1838
      1. Keep running count of maximum number of values (max_count)
1839
      1. If it has an unlimited number of values
1840

1841
        1. Abort with error if it's not the last argument in the definition
1842

1843
    1. If last argument has limited number of values
1844

1845
      1. Abort with error if number of values doesn't match or is too large
1846

1847
    1. Abort with error if user didn't pass enough values (min_count)
1848

1849
  """
1850
  if args and not args_def:
1851
    ToStderr("Error: Command %s expects no arguments", cmd)
1852
    return False
1853

    
1854
  min_count = None
1855
  max_count = None
1856
  check_max = None
1857

    
1858
  last_idx = len(args_def) - 1
1859

    
1860
  for idx, arg in enumerate(args_def):
1861
    if min_count is None:
1862
      min_count = arg.min
1863
    elif arg.min is not None:
1864
      min_count += arg.min
1865

    
1866
    if max_count is None:
1867
      max_count = arg.max
1868
    elif arg.max is not None:
1869
      max_count += arg.max
1870

    
1871
    if idx == last_idx:
1872
      check_max = (arg.max is not None)
1873

    
1874
    elif arg.max is None:
1875
      raise errors.ProgrammerError("Only the last argument can have max=None")
1876

    
1877
  if check_max:
1878
    # Command with exact number of arguments
1879
    if (min_count is not None and max_count is not None and
1880
        min_count == max_count and len(args) != min_count):
1881
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1882
      return False
1883

    
1884
    # Command with limited number of arguments
1885
    if max_count is not None and len(args) > max_count:
1886
      ToStderr("Error: Command %s expects only %d argument(s)",
1887
               cmd, max_count)
1888
      return False
1889

    
1890
  # Command with some required arguments
1891
  if min_count is not None and len(args) < min_count:
1892
    ToStderr("Error: Command %s expects at least %d argument(s)",
1893
             cmd, min_count)
1894
    return False
1895

    
1896
  return True
1897

    
1898

    
1899
def SplitNodeOption(value):
1900
  """Splits the value of a --node option.
1901

1902
  """
1903
  if value and ":" in value:
1904
    return value.split(":", 1)
1905
  else:
1906
    return (value, None)
1907

    
1908

    
1909
def CalculateOSNames(os_name, os_variants):
1910
  """Calculates all the names an OS can be called, according to its variants.
1911

1912
  @type os_name: string
1913
  @param os_name: base name of the os
1914
  @type os_variants: list or None
1915
  @param os_variants: list of supported variants
1916
  @rtype: list
1917
  @return: list of valid names
1918

1919
  """
1920
  if os_variants:
1921
    return ["%s+%s" % (os_name, v) for v in os_variants]
1922
  else:
1923
    return [os_name]
1924

    
1925

    
1926
def ParseFields(selected, default):
1927
  """Parses the values of "--field"-like options.
1928

1929
  @type selected: string or None
1930
  @param selected: User-selected options
1931
  @type default: list
1932
  @param default: Default fields
1933

1934
  """
1935
  if selected is None:
1936
    return default
1937

    
1938
  if selected.startswith("+"):
1939
    return default + selected[1:].split(",")
1940

    
1941
  return selected.split(",")
1942

    
1943

    
1944
UsesRPC = rpc.RunWithRPC
1945

    
1946

    
1947
def AskUser(text, choices=None):
1948
  """Ask the user a question.
1949

1950
  @param text: the question to ask
1951

1952
  @param choices: list with elements tuples (input_char, return_value,
1953
      description); if not given, it will default to: [('y', True,
1954
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1955
      note that the '?' char is reserved for help
1956

1957
  @return: one of the return values from the choices list; if input is
1958
      not possible (i.e. not running with a tty, we return the last
1959
      entry from the list
1960

1961
  """
1962
  if choices is None:
1963
    choices = [("y", True, "Perform the operation"),
1964
               ("n", False, "Do not perform the operation")]
1965
  if not choices or not isinstance(choices, list):
1966
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1967
  for entry in choices:
1968
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1969
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1970

    
1971
  answer = choices[-1][1]
1972
  new_text = []
1973
  for line in text.splitlines():
1974
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1975
  text = "\n".join(new_text)
1976
  try:
1977
    f = file("/dev/tty", "a+")
1978
  except IOError:
1979
    return answer
1980
  try:
1981
    chars = [entry[0] for entry in choices]
1982
    chars[-1] = "[%s]" % chars[-1]
1983
    chars.append("?")
1984
    maps = dict([(entry[0], entry[1]) for entry in choices])
1985
    while True:
1986
      f.write(text)
1987
      f.write("\n")
1988
      f.write("/".join(chars))
1989
      f.write(": ")
1990
      line = f.readline(2).strip().lower()
1991
      if line in maps:
1992
        answer = maps[line]
1993
        break
1994
      elif line == "?":
1995
        for entry in choices:
1996
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1997
        f.write("\n")
1998
        continue
1999
  finally:
2000
    f.close()
2001
  return answer
2002

    
2003

    
2004
class JobSubmittedException(Exception):
2005
  """Job was submitted, client should exit.
2006

2007
  This exception has one argument, the ID of the job that was
2008
  submitted. The handler should print this ID.
2009

2010
  This is not an error, just a structured way to exit from clients.
2011

2012
  """
2013

    
2014

    
2015
def SendJob(ops, cl=None):
2016
  """Function to submit an opcode without waiting for the results.
2017

2018
  @type ops: list
2019
  @param ops: list of opcodes
2020
  @type cl: luxi.Client
2021
  @param cl: the luxi client to use for communicating with the master;
2022
             if None, a new client will be created
2023

2024
  """
2025
  if cl is None:
2026
    cl = GetClient()
2027

    
2028
  job_id = cl.SubmitJob(ops)
2029

    
2030
  return job_id
2031

    
2032

    
2033
def GenericPollJob(job_id, cbs, report_cbs):
2034
  """Generic job-polling function.
2035

2036
  @type job_id: number
2037
  @param job_id: Job ID
2038
  @type cbs: Instance of L{JobPollCbBase}
2039
  @param cbs: Data callbacks
2040
  @type report_cbs: Instance of L{JobPollReportCbBase}
2041
  @param report_cbs: Reporting callbacks
2042

2043
  """
2044
  prev_job_info = None
2045
  prev_logmsg_serial = None
2046

    
2047
  status = None
2048

    
2049
  while True:
2050
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2051
                                      prev_logmsg_serial)
2052
    if not result:
2053
      # job not found, go away!
2054
      raise errors.JobLost("Job with id %s lost" % job_id)
2055

    
2056
    if result == constants.JOB_NOTCHANGED:
2057
      report_cbs.ReportNotChanged(job_id, status)
2058

    
2059
      # Wait again
2060
      continue
2061

    
2062
    # Split result, a tuple of (field values, log entries)
2063
    (job_info, log_entries) = result
2064
    (status, ) = job_info
2065

    
2066
    if log_entries:
2067
      for log_entry in log_entries:
2068
        (serial, timestamp, log_type, message) = log_entry
2069
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2070
                                    log_type, message)
2071
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2072

    
2073
    # TODO: Handle canceled and archived jobs
2074
    elif status in (constants.JOB_STATUS_SUCCESS,
2075
                    constants.JOB_STATUS_ERROR,
2076
                    constants.JOB_STATUS_CANCELING,
2077
                    constants.JOB_STATUS_CANCELED):
2078
      break
2079

    
2080
    prev_job_info = job_info
2081

    
2082
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2083
  if not jobs:
2084
    raise errors.JobLost("Job with id %s lost" % job_id)
2085

    
2086
  status, opstatus, result = jobs[0]
2087

    
2088
  if status == constants.JOB_STATUS_SUCCESS:
2089
    return result
2090

    
2091
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2092
    raise errors.OpExecError("Job was canceled")
2093

    
2094
  has_ok = False
2095
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2096
    if status == constants.OP_STATUS_SUCCESS:
2097
      has_ok = True
2098
    elif status == constants.OP_STATUS_ERROR:
2099
      errors.MaybeRaise(msg)
2100

    
2101
      if has_ok:
2102
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2103
                                 (idx, msg))
2104

    
2105
      raise errors.OpExecError(str(msg))
2106

    
2107
  # default failure mode
2108
  raise errors.OpExecError(result)
2109

    
2110

    
2111
class JobPollCbBase(object):
2112
  """Base class for L{GenericPollJob} callbacks.
2113

2114
  """
2115
  def __init__(self):
2116
    """Initializes this class.
2117

2118
    """
2119

    
2120
  def WaitForJobChangeOnce(self, job_id, fields,
2121
                           prev_job_info, prev_log_serial):
2122
    """Waits for changes on a job.
2123

2124
    """
2125
    raise NotImplementedError()
2126

    
2127
  def QueryJobs(self, job_ids, fields):
2128
    """Returns the selected fields for the selected job IDs.
2129

2130
    @type job_ids: list of numbers
2131
    @param job_ids: Job IDs
2132
    @type fields: list of strings
2133
    @param fields: Fields
2134

2135
    """
2136
    raise NotImplementedError()
2137

    
2138

    
2139
class JobPollReportCbBase(object):
2140
  """Base class for L{GenericPollJob} reporting callbacks.
2141

2142
  """
2143
  def __init__(self):
2144
    """Initializes this class.
2145

2146
    """
2147

    
2148
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2149
    """Handles a log message.
2150

2151
    """
2152
    raise NotImplementedError()
2153

    
2154
  def ReportNotChanged(self, job_id, status):
2155
    """Called for if a job hasn't changed in a while.
2156

2157
    @type job_id: number
2158
    @param job_id: Job ID
2159
    @type status: string or None
2160
    @param status: Job status if available
2161

2162
    """
2163
    raise NotImplementedError()
2164

    
2165

    
2166
class _LuxiJobPollCb(JobPollCbBase):
2167
  def __init__(self, cl):
2168
    """Initializes this class.
2169

2170
    """
2171
    JobPollCbBase.__init__(self)
2172
    self.cl = cl
2173

    
2174
  def WaitForJobChangeOnce(self, job_id, fields,
2175
                           prev_job_info, prev_log_serial):
2176
    """Waits for changes on a job.
2177

2178
    """
2179
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2180
                                        prev_job_info, prev_log_serial)
2181

    
2182
  def QueryJobs(self, job_ids, fields):
2183
    """Returns the selected fields for the selected job IDs.
2184

2185
    """
2186
    return self.cl.QueryJobs(job_ids, fields)
2187

    
2188

    
2189
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2190
  def __init__(self, feedback_fn):
2191
    """Initializes this class.
2192

2193
    """
2194
    JobPollReportCbBase.__init__(self)
2195

    
2196
    self.feedback_fn = feedback_fn
2197

    
2198
    assert callable(feedback_fn)
2199

    
2200
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2201
    """Handles a log message.
2202

2203
    """
2204
    self.feedback_fn((timestamp, log_type, log_msg))
2205

    
2206
  def ReportNotChanged(self, job_id, status):
2207
    """Called if a job hasn't changed in a while.
2208

2209
    """
2210
    # Ignore
2211

    
2212

    
2213
class StdioJobPollReportCb(JobPollReportCbBase):
2214
  def __init__(self):
2215
    """Initializes this class.
2216

2217
    """
2218
    JobPollReportCbBase.__init__(self)
2219

    
2220
    self.notified_queued = False
2221
    self.notified_waitlock = False
2222

    
2223
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2224
    """Handles a log message.
2225

2226
    """
2227
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2228
             FormatLogMessage(log_type, log_msg))
2229

    
2230
  def ReportNotChanged(self, job_id, status):
2231
    """Called if a job hasn't changed in a while.
2232

2233
    """
2234
    if status is None:
2235
      return
2236

    
2237
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2238
      ToStderr("Job %s is waiting in queue", job_id)
2239
      self.notified_queued = True
2240

    
2241
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2242
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2243
      self.notified_waitlock = True
2244

    
2245

    
2246
def FormatLogMessage(log_type, log_msg):
2247
  """Formats a job message according to its type.
2248

2249
  """
2250
  if log_type != constants.ELOG_MESSAGE:
2251
    log_msg = str(log_msg)
2252

    
2253
  return utils.SafeEncode(log_msg)
2254

    
2255

    
2256
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2257
  """Function to poll for the result of a job.
2258

2259
  @type job_id: job identified
2260
  @param job_id: the job to poll for results
2261
  @type cl: luxi.Client
2262
  @param cl: the luxi client to use for communicating with the master;
2263
             if None, a new client will be created
2264

2265
  """
2266
  if cl is None:
2267
    cl = GetClient()
2268

    
2269
  if reporter is None:
2270
    if feedback_fn:
2271
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2272
    else:
2273
      reporter = StdioJobPollReportCb()
2274
  elif feedback_fn:
2275
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2276

    
2277
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2278

    
2279

    
2280
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2281
  """Legacy function to submit an opcode.
2282

2283
  This is just a simple wrapper over the construction of the processor
2284
  instance. It should be extended to better handle feedback and
2285
  interaction functions.
2286

2287
  """
2288
  if cl is None:
2289
    cl = GetClient()
2290

    
2291
  SetGenericOpcodeOpts([op], opts)
2292

    
2293
  job_id = SendJob([op], cl=cl)
2294
  if hasattr(opts, "print_jobid") and opts.print_jobid:
2295
    ToStdout("%d" % job_id)
2296

    
2297
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2298
                       reporter=reporter)
2299

    
2300
  return op_results[0]
2301

    
2302

    
2303
def SubmitOpCodeToDrainedQueue(op):
2304
  """Forcefully insert a job in the queue, even if it is drained.
2305

2306
  """
2307
  cl = GetClient()
2308
  job_id = cl.SubmitJobToDrainedQueue([op])
2309
  op_results = PollJob(job_id, cl=cl)
2310
  return op_results[0]
2311

    
2312

    
2313
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2314
  """Wrapper around SubmitOpCode or SendJob.
2315

2316
  This function will decide, based on the 'opts' parameter, whether to
2317
  submit and wait for the result of the opcode (and return it), or
2318
  whether to just send the job and print its identifier. It is used in
2319
  order to simplify the implementation of the '--submit' option.
2320

2321
  It will also process the opcodes if we're sending the via SendJob
2322
  (otherwise SubmitOpCode does it).
2323

2324
  """
2325
  if opts and opts.submit_only:
2326
    job = [op]
2327
    SetGenericOpcodeOpts(job, opts)
2328
    job_id = SendJob(job, cl=cl)
2329
    if opts.print_jobid:
2330
      ToStdout("%d" % job_id)
2331
    raise JobSubmittedException(job_id)
2332
  else:
2333
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2334

    
2335

    
2336
def _InitReasonTrail(op, opts):
2337
  """Builds the first part of the reason trail
2338

2339
  Builds the initial part of the reason trail, adding the user provided reason
2340
  (if it exists) and the name of the command starting the operation.
2341

2342
  @param op: the opcode the reason trail will be added to
2343
  @param opts: the command line options selected by the user
2344

2345
  """
2346
  assert len(sys.argv) >= 2
2347
  trail = []
2348

    
2349
  if opts.reason:
2350
    trail.append((constants.OPCODE_REASON_SRC_USER,
2351
                  opts.reason,
2352
                  utils.EpochNano()))
2353

    
2354
  binary = os.path.basename(sys.argv[0])
2355
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2356
  command = sys.argv[1]
2357
  trail.append((source, command, utils.EpochNano()))
2358
  op.reason = trail
2359

    
2360

    
2361
def SetGenericOpcodeOpts(opcode_list, options):
2362
  """Processor for generic options.
2363

2364
  This function updates the given opcodes based on generic command
2365
  line options (like debug, dry-run, etc.).
2366

2367
  @param opcode_list: list of opcodes
2368
  @param options: command line options or None
2369
  @return: None (in-place modification)
2370

2371
  """
2372
  if not options:
2373
    return
2374
  for op in opcode_list:
2375
    op.debug_level = options.debug
2376
    if hasattr(options, "dry_run"):
2377
      op.dry_run = options.dry_run
2378
    if getattr(options, "priority", None) is not None:
2379
      op.priority = options.priority
2380
    _InitReasonTrail(op, options)
2381

    
2382

    
2383
def GetClient(query=False):
2384
  """Connects to the a luxi socket and returns a client.
2385

2386
  @type query: boolean
2387
  @param query: this signifies that the client will only be
2388
      used for queries; if the build-time parameter
2389
      enable-split-queries is enabled, then the client will be
2390
      connected to the query socket instead of the masterd socket
2391

2392
  """
2393
  override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2394
  if override_socket:
2395
    if override_socket == constants.LUXI_OVERRIDE_MASTER:
2396
      address = pathutils.MASTER_SOCKET
2397
    elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2398
      address = pathutils.QUERY_SOCKET
2399
    else:
2400
      address = override_socket
2401
  elif query and constants.ENABLE_SPLIT_QUERY:
2402
    address = pathutils.QUERY_SOCKET
2403
  else:
2404
    address = None
2405
  # TODO: Cache object?
2406
  try:
2407
    client = luxi.Client(address=address)
2408
  except luxi.NoMasterError:
2409
    ss = ssconf.SimpleStore()
2410

    
2411
    # Try to read ssconf file
2412
    try:
2413
      ss.GetMasterNode()
2414
    except errors.ConfigurationError:
2415
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2416
                                 " not part of a cluster",
2417
                                 errors.ECODE_INVAL)
2418

    
2419
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2420
    if master != myself:
2421
      raise errors.OpPrereqError("This is not the master node, please connect"
2422
                                 " to node '%s' and rerun the command" %
2423
                                 master, errors.ECODE_INVAL)
2424
    raise
2425
  return client
2426

    
2427

    
2428
def FormatError(err):
2429
  """Return a formatted error message for a given error.
2430

2431
  This function takes an exception instance and returns a tuple
2432
  consisting of two values: first, the recommended exit code, and
2433
  second, a string describing the error message (not
2434
  newline-terminated).
2435

2436
  """
2437
  retcode = 1
2438
  obuf = StringIO()
2439
  msg = str(err)
2440
  if isinstance(err, errors.ConfigurationError):
2441
    txt = "Corrupt configuration file: %s" % msg
2442
    logging.error(txt)
2443
    obuf.write(txt + "\n")
2444
    obuf.write("Aborting.")
2445
    retcode = 2
2446
  elif isinstance(err, errors.HooksAbort):
2447
    obuf.write("Failure: hooks execution failed:\n")
2448
    for node, script, out in err.args[0]:
2449
      if out:
2450
        obuf.write("  node: %s, script: %s, output: %s\n" %
2451
                   (node, script, out))
2452
      else:
2453
        obuf.write("  node: %s, script: %s (no output)\n" %
2454
                   (node, script))
2455
  elif isinstance(err, errors.HooksFailure):
2456
    obuf.write("Failure: hooks general failure: %s" % msg)
2457
  elif isinstance(err, errors.ResolverError):
2458
    this_host = netutils.Hostname.GetSysName()
2459
    if err.args[0] == this_host:
2460
      msg = "Failure: can't resolve my own hostname ('%s')"
2461
    else:
2462
      msg = "Failure: can't resolve hostname '%s'"
2463
    obuf.write(msg % err.args[0])
2464
  elif isinstance(err, errors.OpPrereqError):
2465
    if len(err.args) == 2:
2466
      obuf.write("Failure: prerequisites not met for this"
2467
                 " operation:\nerror type: %s, error details:\n%s" %
2468
                 (err.args[1], err.args[0]))
2469
    else:
2470
      obuf.write("Failure: prerequisites not met for this"
2471
                 " operation:\n%s" % msg)
2472
  elif isinstance(err, errors.OpExecError):
2473
    obuf.write("Failure: command execution error:\n%s" % msg)
2474
  elif isinstance(err, errors.TagError):
2475
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2476
  elif isinstance(err, errors.JobQueueDrainError):
2477
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2478
               " accept new requests\n")
2479
  elif isinstance(err, errors.JobQueueFull):
2480
    obuf.write("Failure: the job queue is full and doesn't accept new"
2481
               " job submissions until old jobs are archived\n")
2482
  elif isinstance(err, errors.TypeEnforcementError):
2483
    obuf.write("Parameter Error: %s" % msg)
2484
  elif isinstance(err, errors.ParameterError):
2485
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2486
  elif isinstance(err, luxi.NoMasterError):
2487
    if err.args[0] == pathutils.MASTER_SOCKET:
2488
      daemon = "the master daemon"
2489
    elif err.args[0] == pathutils.QUERY_SOCKET:
2490
      daemon = "the config daemon"
2491
    else:
2492
      daemon = "socket '%s'" % str(err.args[0])
2493
    obuf.write("Cannot communicate with %s.\nIs the process running"
2494
               " and listening for connections?" % daemon)
2495
  elif isinstance(err, luxi.TimeoutError):
2496
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2497
               " been submitted and will continue to run even if the call"
2498
               " timed out. Useful commands in this situation are \"gnt-job"
2499
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2500
    obuf.write(msg)
2501
  elif isinstance(err, luxi.PermissionError):
2502
    obuf.write("It seems you don't have permissions to connect to the"
2503
               " master daemon.\nPlease retry as a different user.")
2504
  elif isinstance(err, luxi.ProtocolError):
2505
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2506
               "%s" % msg)
2507
  elif isinstance(err, errors.JobLost):
2508
    obuf.write("Error checking job status: %s" % msg)
2509
  elif isinstance(err, errors.QueryFilterParseError):
2510
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2511
    obuf.write("\n".join(err.GetDetails()))
2512
  elif isinstance(err, errors.GenericError):
2513
    obuf.write("Unhandled Ganeti error: %s" % msg)
2514
  elif isinstance(err, JobSubmittedException):
2515
    obuf.write("JobID: %s\n" % err.args[0])
2516
    retcode = 0
2517
  else:
2518
    obuf.write("Unhandled exception: %s" % msg)
2519
  return retcode, obuf.getvalue().rstrip("\n")
2520

    
2521

    
2522
def GenericMain(commands, override=None, aliases=None,
2523
                env_override=frozenset()):
2524
  """Generic main function for all the gnt-* commands.
2525

2526
  @param commands: a dictionary with a special structure, see the design doc
2527
                   for command line handling.
2528
  @param override: if not None, we expect a dictionary with keys that will
2529
                   override command line options; this can be used to pass
2530
                   options from the scripts to generic functions
2531
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2532
  @param env_override: list of environment names which are allowed to submit
2533
                       default args for commands
2534

2535
  """
2536
  # save the program name and the entire command line for later logging
2537
  if sys.argv:
2538
    binary = os.path.basename(sys.argv[0])
2539
    if not binary:
2540
      binary = sys.argv[0]
2541

    
2542
    if len(sys.argv) >= 2:
2543
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2544
    else:
2545
      logname = binary
2546

    
2547
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2548
  else:
2549
    binary = "<unknown program>"
2550
    cmdline = "<unknown>"
2551

    
2552
  if aliases is None:
2553
    aliases = {}
2554

    
2555
  try:
2556
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2557
                                       env_override)
2558
  except _ShowVersion:
2559
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2560
             constants.RELEASE_VERSION)
2561
    return constants.EXIT_SUCCESS
2562
  except _ShowUsage, err:
2563
    for line in _FormatUsage(binary, commands):
2564
      ToStdout(line)
2565

    
2566
    if err.exit_error:
2567
      return constants.EXIT_FAILURE
2568
    else:
2569
      return constants.EXIT_SUCCESS
2570
  except errors.ParameterError, err:
2571
    result, err_msg = FormatError(err)
2572
    ToStderr(err_msg)
2573
    return 1
2574

    
2575
  if func is None: # parse error
2576
    return 1
2577

    
2578
  if override is not None:
2579
    for key, val in override.iteritems():
2580
      setattr(options, key, val)
2581

    
2582
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2583
                     stderr_logging=True)
2584

    
2585
  logging.info("Command line: %s", cmdline)
2586

    
2587
  try:
2588
    result = func(options, args)
2589
  except (errors.GenericError, luxi.ProtocolError,
2590
          JobSubmittedException), err:
2591
    result, err_msg = FormatError(err)
2592
    logging.exception("Error during command processing")
2593
    ToStderr(err_msg)
2594
  except KeyboardInterrupt:
2595
    result = constants.EXIT_FAILURE
2596
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2597
             " might have been submitted and"
2598
             " will continue to run in the background.")
2599
  except IOError, err:
2600
    if err.errno == errno.EPIPE:
2601
      # our terminal went away, we'll exit
2602
      sys.exit(constants.EXIT_FAILURE)
2603
    else:
2604
      raise
2605

    
2606
  return result
2607

    
2608

    
2609
def ParseNicOption(optvalue):
2610
  """Parses the value of the --net option(s).
2611

2612
  """
2613
  try:
2614
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2615
  except (TypeError, ValueError), err:
2616
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2617
                               errors.ECODE_INVAL)
2618

    
2619
  nics = [{}] * nic_max
2620
  for nidx, ndict in optvalue:
2621
    nidx = int(nidx)
2622

    
2623
    if not isinstance(ndict, dict):
2624
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2625
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2626

    
2627
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2628

    
2629
    nics[nidx] = ndict
2630

    
2631
  return nics
2632

    
2633

    
2634
def FixHvParams(hvparams):
2635
  # In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from
2636
  # comma to space because commas cannot be accepted on the command line
2637
  # (they already act as the separator between different hvparams). Still,
2638
  # RAPI should be able to accept commas for backwards compatibility.
2639
  # Therefore, we convert spaces into commas here, and we keep the old
2640
  # parsing logic everywhere else.
2641
  try:
2642
    new_usb_devices = hvparams[constants.HV_USB_DEVICES].replace(" ", ",")
2643
    hvparams[constants.HV_USB_DEVICES] = new_usb_devices
2644
  except KeyError:
2645
    #No usb_devices, no modification required
2646
    pass
2647

    
2648

    
2649
def GenericInstanceCreate(mode, opts, args):
2650
  """Add an instance to the cluster via either creation or import.
2651

2652
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2653
  @param opts: the command line options selected by the user
2654
  @type args: list
2655
  @param args: should contain only one element, the new instance name
2656
  @rtype: int
2657
  @return: the desired exit code
2658

2659
  """
2660
  instance = args[0]
2661

    
2662
  (pnode, snode) = SplitNodeOption(opts.node)
2663

    
2664
  hypervisor = None
2665
  hvparams = {}
2666
  if opts.hypervisor:
2667
    hypervisor, hvparams = opts.hypervisor
2668

    
2669
  if opts.nics:
2670
    nics = ParseNicOption(opts.nics)
2671
  elif opts.no_nics:
2672
    # no nics
2673
    nics = []
2674
  elif mode == constants.INSTANCE_CREATE:
2675
    # default of one nic, all auto
2676
    nics = [{}]
2677
  else:
2678
    # mode == import
2679
    nics = []
2680

    
2681
  if opts.disk_template == constants.DT_DISKLESS:
2682
    if opts.disks or opts.sd_size is not None:
2683
      raise errors.OpPrereqError("Diskless instance but disk"
2684
                                 " information passed", errors.ECODE_INVAL)
2685
    disks = []
2686
  else:
2687
    if (not opts.disks and not opts.sd_size
2688
        and mode == constants.INSTANCE_CREATE):
2689
      raise errors.OpPrereqError("No disk information specified",
2690
                                 errors.ECODE_INVAL)
2691
    if opts.disks and opts.sd_size is not None:
2692
      raise errors.OpPrereqError("Please use either the '--disk' or"
2693
                                 " '-s' option", errors.ECODE_INVAL)
2694
    if opts.sd_size is not None:
2695
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2696

    
2697
    if opts.disks:
2698
      try:
2699
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2700
      except ValueError, err:
2701
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2702
                                   errors.ECODE_INVAL)
2703
      disks = [{}] * disk_max
2704
    else:
2705
      disks = []
2706
    for didx, ddict in opts.disks:
2707
      didx = int(didx)
2708
      if not isinstance(ddict, dict):
2709
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2710
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2711
      elif constants.IDISK_SIZE in ddict:
2712
        if constants.IDISK_ADOPT in ddict:
2713
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2714
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2715
        try:
2716
          ddict[constants.IDISK_SIZE] = \
2717
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2718
        except ValueError, err:
2719
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2720
                                     (didx, err), errors.ECODE_INVAL)
2721
      elif constants.IDISK_ADOPT in ddict:
2722
        if constants.IDISK_SPINDLES in ddict:
2723
          raise errors.OpPrereqError("spindles is not a valid option when"
2724
                                     " adopting a disk", errors.ECODE_INVAL)
2725
        if mode == constants.INSTANCE_IMPORT:
2726
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2727
                                     " import", errors.ECODE_INVAL)
2728
        ddict[constants.IDISK_SIZE] = 0
2729
      else:
2730
        raise errors.OpPrereqError("Missing size or adoption source for"
2731
                                   " disk %d" % didx, errors.ECODE_INVAL)
2732
      disks[didx] = ddict
2733

    
2734
  if opts.tags is not None:
2735
    tags = opts.tags.split(",")
2736
  else:
2737
    tags = []
2738

    
2739
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2740
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2741
  FixHvParams(hvparams)
2742

    
2743
  if mode == constants.INSTANCE_CREATE:
2744
    start = opts.start
2745
    os_type = opts.os
2746
    force_variant = opts.force_variant
2747
    src_node = None
2748
    src_path = None
2749
    no_install = opts.no_install
2750
    identify_defaults = False
2751
  elif mode == constants.INSTANCE_IMPORT:
2752
    start = False
2753
    os_type = None
2754
    force_variant = False
2755
    src_node = opts.src_node
2756
    src_path = opts.src_dir
2757
    no_install = None
2758
    identify_defaults = opts.identify_defaults
2759
  else:
2760
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2761

    
2762
  op = opcodes.OpInstanceCreate(instance_name=instance,
2763
                                disks=disks,
2764
                                disk_template=opts.disk_template,
2765
                                nics=nics,
2766
                                conflicts_check=opts.conflicts_check,
2767
                                pnode=pnode, snode=snode,
2768
                                ip_check=opts.ip_check,
2769
                                name_check=opts.name_check,
2770
                                wait_for_sync=opts.wait_for_sync,
2771
                                file_storage_dir=opts.file_storage_dir,
2772
                                file_driver=opts.file_driver,
2773
                                iallocator=opts.iallocator,
2774
                                hypervisor=hypervisor,
2775
                                hvparams=hvparams,
2776
                                beparams=opts.beparams,
2777
                                osparams=opts.osparams,
2778
                                mode=mode,
2779
                                start=start,
2780
                                os_type=os_type,
2781
                                force_variant=force_variant,
2782
                                src_node=src_node,
2783
                                src_path=src_path,
2784
                                tags=tags,
2785
                                no_install=no_install,
2786
                                identify_defaults=identify_defaults,
2787
                                ignore_ipolicy=opts.ignore_ipolicy)
2788

    
2789
  SubmitOrSend(op, opts)
2790
  return 0
2791

    
2792

    
2793
class _RunWhileClusterStoppedHelper(object):
2794
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2795

2796
  """
2797
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2798
    """Initializes this class.
2799

2800
    @type feedback_fn: callable
2801
    @param feedback_fn: Feedback function
2802
    @type cluster_name: string
2803
    @param cluster_name: Cluster name
2804
    @type master_node: string
2805
    @param master_node Master node name
2806
    @type online_nodes: list
2807
    @param online_nodes: List of names of online nodes
2808

2809
    """
2810
    self.feedback_fn = feedback_fn
2811
    self.cluster_name = cluster_name
2812
    self.master_node = master_node
2813
    self.online_nodes = online_nodes
2814

    
2815
    self.ssh = ssh.SshRunner(self.cluster_name)
2816

    
2817
    self.nonmaster_nodes = [name for name in online_nodes
2818
                            if name != master_node]
2819

    
2820
    assert self.master_node not in self.nonmaster_nodes
2821

    
2822
  def _RunCmd(self, node_name, cmd):
2823
    """Runs a command on the local or a remote machine.
2824

2825
    @type node_name: string
2826
    @param node_name: Machine name
2827
    @type cmd: list
2828
    @param cmd: Command
2829

2830
    """
2831
    if node_name is None or node_name == self.master_node:
2832
      # No need to use SSH
2833
      result = utils.RunCmd(cmd)
2834
    else:
2835
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2836
                            utils.ShellQuoteArgs(cmd))
2837

    
2838
    if result.failed:
2839
      errmsg = ["Failed to run command %s" % result.cmd]
2840
      if node_name:
2841
        errmsg.append("on node %s" % node_name)
2842
      errmsg.append(": exitcode %s and error %s" %
2843
                    (result.exit_code, result.output))
2844
      raise errors.OpExecError(" ".join(errmsg))
2845

    
2846
  def Call(self, fn, *args):
2847
    """Call function while all daemons are stopped.
2848

2849
    @type fn: callable
2850
    @param fn: Function to be called
2851

2852
    """
2853
    # Pause watcher by acquiring an exclusive lock on watcher state file
2854
    self.feedback_fn("Blocking watcher")
2855
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2856
    try:
2857
      # TODO: Currently, this just blocks. There's no timeout.
2858
      # TODO: Should it be a shared lock?
2859
      watcher_block.Exclusive(blocking=True)
2860

    
2861
      # Stop master daemons, so that no new jobs can come in and all running
2862
      # ones are finished
2863
      self.feedback_fn("Stopping master daemons")
2864
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2865
      try:
2866
        # Stop daemons on all nodes
2867
        for node_name in self.online_nodes:
2868
          self.feedback_fn("Stopping daemons on %s" % node_name)
2869
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2870

    
2871
        # All daemons are shut down now
2872
        try:
2873
          return fn(self, *args)
2874
        except Exception, err:
2875
          _, errmsg = FormatError(err)
2876
          logging.exception("Caught exception")
2877
          self.feedback_fn(errmsg)
2878
          raise
2879
      finally:
2880
        # Start cluster again, master node last
2881
        for node_name in self.nonmaster_nodes + [self.master_node]:
2882
          self.feedback_fn("Starting daemons on %s" % node_name)
2883
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2884
    finally:
2885
      # Resume watcher
2886
      watcher_block.Close()
2887

    
2888

    
2889
def RunWhileClusterStopped(feedback_fn, fn, *args):
2890
  """Calls a function while all cluster daemons are stopped.
2891

2892
  @type feedback_fn: callable
2893
  @param feedback_fn: Feedback function
2894
  @type fn: callable
2895
  @param fn: Function to be called when daemons are stopped
2896

2897
  """
2898
  feedback_fn("Gathering cluster information")
2899

    
2900
  # This ensures we're running on the master daemon
2901
  cl = GetClient()
2902

    
2903
  (cluster_name, master_node) = \
2904
    cl.QueryConfigValues(["cluster_name", "master_node"])
2905

    
2906
  online_nodes = GetOnlineNodes([], cl=cl)
2907

    
2908
  # Don't keep a reference to the client. The master daemon will go away.
2909
  del cl
2910

    
2911
  assert master_node in online_nodes
2912

    
2913
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2914
                                       online_nodes).Call(fn, *args)
2915

    
2916

    
2917
def GenerateTable(headers, fields, separator, data,
2918
                  numfields=None, unitfields=None,
2919
                  units=None):
2920
  """Prints a table with headers and different fields.
2921

2922
  @type headers: dict
2923
  @param headers: dictionary mapping field names to headers for
2924
      the table
2925
  @type fields: list
2926
  @param fields: the field names corresponding to each row in
2927
      the data field
2928
  @param separator: the separator to be used; if this is None,
2929
      the default 'smart' algorithm is used which computes optimal
2930
      field width, otherwise just the separator is used between
2931
      each field
2932
  @type data: list
2933
  @param data: a list of lists, each sublist being one row to be output
2934
  @type numfields: list
2935
  @param numfields: a list with the fields that hold numeric
2936
      values and thus should be right-aligned
2937
  @type unitfields: list
2938
  @param unitfields: a list with the fields that hold numeric
2939
      values that should be formatted with the units field
2940
  @type units: string or None
2941
  @param units: the units we should use for formatting, or None for
2942
      automatic choice (human-readable for non-separator usage, otherwise
2943
      megabytes); this is a one-letter string
2944

2945
  """
2946
  if units is None:
2947
    if separator:
2948
      units = "m"
2949
    else:
2950
      units = "h"
2951

    
2952
  if numfields is None:
2953
    numfields = []
2954
  if unitfields is None:
2955
    unitfields = []
2956

    
2957
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2958
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2959

    
2960
  format_fields = []
2961
  for field in fields:
2962
    if headers and field not in headers:
2963
      # TODO: handle better unknown fields (either revert to old
2964
      # style of raising exception, or deal more intelligently with
2965
      # variable fields)
2966
      headers[field] = field
2967
    if separator is not None:
2968
      format_fields.append("%s")
2969
    elif numfields.Matches(field):
2970
      format_fields.append("%*s")
2971
    else:
2972
      format_fields.append("%-*s")
2973

    
2974
  if separator is None:
2975
    mlens = [0 for name in fields]
2976
    format_str = " ".join(format_fields)
2977
  else:
2978
    format_str = separator.replace("%", "%%").join(format_fields)
2979

    
2980
  for row in data:
2981
    if row is None:
2982
      continue
2983
    for idx, val in enumerate(row):
2984
      if unitfields.Matches(fields[idx]):
2985
        try:
2986
          val = int(val)
2987
        except (TypeError, ValueError):
2988
          pass
2989
        else:
2990
          val = row[idx] = utils.FormatUnit(val, units)
2991
      val = row[idx] = str(val)
2992
      if separator is None:
2993
        mlens[idx] = max(mlens[idx], len(val))
2994

    
2995
  result = []
2996
  if headers:
2997
    args = []
2998
    for idx, name in enumerate(fields):
2999
      hdr = headers[name]
3000
      if separator is None:
3001
        mlens[idx] = max(mlens[idx], len(hdr))
3002
        args.append(mlens[idx])
3003
      args.append(hdr)
3004
    result.append(format_str % tuple(args))
3005

    
3006
  if separator is None:
3007
    assert len(mlens) == len(fields)
3008

    
3009
    if fields and not numfields.Matches(fields[-1]):
3010
      mlens[-1] = 0
3011

    
3012
  for line in data:
3013
    args = []
3014
    if line is None:
3015
      line = ["-" for _ in fields]
3016
    for idx in range(len(fields)):
3017
      if separator is None:
3018
        args.append(mlens[idx])
3019
      args.append(line[idx])
3020
    result.append(format_str % tuple(args))
3021

    
3022
  return result
3023

    
3024

    
3025
def _FormatBool(value):
3026
  """Formats a boolean value as a string.
3027

3028
  """
3029
  if value:
3030
    return "Y"
3031
  return "N"
3032

    
3033

    
3034
#: Default formatting for query results; (callback, align right)
3035
_DEFAULT_FORMAT_QUERY = {
3036
  constants.QFT_TEXT: (str, False),
3037
  constants.QFT_BOOL: (_FormatBool, False),
3038
  constants.QFT_NUMBER: (str, True),
3039
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
3040
  constants.QFT_OTHER: (str, False),
3041
  constants.QFT_UNKNOWN: (str, False),
3042
  }
3043

    
3044

    
3045
def _GetColumnFormatter(fdef, override, unit):
3046
  """Returns formatting function for a field.
3047

3048
  @type fdef: L{objects.QueryFieldDefinition}
3049
  @type override: dict
3050
  @param override: Dictionary for overriding field formatting functions,
3051
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3052
  @type unit: string
3053
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
3054
  @rtype: tuple; (callable, bool)
3055
  @return: Returns the function to format a value (takes one parameter) and a
3056
    boolean for aligning the value on the right-hand side
3057

3058
  """
3059
  fmt = override.get(fdef.name, None)
3060
  if fmt is not None:
3061
    return fmt
3062

    
3063
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3064

    
3065
  if fdef.kind == constants.QFT_UNIT:
3066
    # Can't keep this information in the static dictionary
3067
    return (lambda value: utils.FormatUnit(value, unit), True)
3068

    
3069
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3070
  if fmt is not None:
3071
    return fmt
3072

    
3073
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3074

    
3075

    
3076
class _QueryColumnFormatter(object):
3077
  """Callable class for formatting fields of a query.
3078

3079
  """
3080
  def __init__(self, fn, status_fn, verbose):
3081
    """Initializes this class.
3082

3083
    @type fn: callable
3084
    @param fn: Formatting function
3085
    @type status_fn: callable
3086
    @param status_fn: Function to report fields' status
3087
    @type verbose: boolean
3088
    @param verbose: whether to use verbose field descriptions or not
3089

3090
    """
3091
    self._fn = fn
3092
    self._status_fn = status_fn
3093
    self._verbose = verbose
3094

    
3095
  def __call__(self, data):
3096
    """Returns a field's string representation.
3097

3098
    """
3099
    (status, value) = data
3100

    
3101
    # Report status
3102
    self._status_fn(status)
3103

    
3104
    if status == constants.RS_NORMAL:
3105
      return self._fn(value)
3106

    
3107
    assert value is None, \
3108
           "Found value %r for abnormal status %s" % (value, status)
3109

    
3110
    return FormatResultError(status, self._verbose)
3111

    
3112

    
3113
def FormatResultError(status, verbose):
3114
  """Formats result status other than L{constants.RS_NORMAL}.
3115

3116
  @param status: The result status
3117
  @type verbose: boolean
3118
  @param verbose: Whether to return the verbose text
3119
  @return: Text of result status
3120

3121
  """
3122
  assert status != constants.RS_NORMAL, \
3123
         "FormatResultError called with status equal to constants.RS_NORMAL"
3124
  try:
3125
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3126
  except KeyError:
3127
    raise NotImplementedError("Unknown status %s" % status)
3128
  else:
3129
    if verbose:
3130
      return verbose_text
3131
    return normal_text
3132

    
3133

    
3134
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3135
                      header=False, verbose=False):
3136
  """Formats data in L{objects.QueryResponse}.
3137

3138
  @type result: L{objects.QueryResponse}
3139
  @param result: result of query operation
3140
  @type unit: string
3141
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3142
    see L{utils.text.FormatUnit}
3143
  @type format_override: dict
3144
  @param format_override: Dictionary for overriding field formatting functions,
3145
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3146
  @type separator: string or None
3147
  @param separator: String used to separate fields
3148
  @type header: bool
3149
  @param header: Whether to output header row
3150
  @type verbose: boolean
3151
  @param verbose: whether to use verbose field descriptions or not
3152

3153
  """
3154
  if unit is None:
3155
    if separator:
3156
      unit = "m"
3157
    else:
3158
      unit = "h"
3159

    
3160
  if format_override is None:
3161
    format_override = {}
3162

    
3163
  stats = dict.fromkeys(constants.RS_ALL, 0)
3164

    
3165
  def _RecordStatus(status):
3166
    if status in stats:
3167
      stats[status] += 1
3168

    
3169
  columns = []
3170
  for fdef in result.fields:
3171
    assert fdef.title and fdef.name
3172
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3173
    columns.append(TableColumn(fdef.title,
3174
                               _QueryColumnFormatter(fn, _RecordStatus,
3175
                                                     verbose),
3176
                               align_right))
3177

    
3178
  table = FormatTable(result.data, columns, header, separator)
3179

    
3180
  # Collect statistics
3181
  assert len(stats) == len(constants.RS_ALL)
3182
  assert compat.all(count >= 0 for count in stats.values())
3183

    
3184
  # Determine overall status. If there was no data, unknown fields must be
3185
  # detected via the field definitions.
3186
  if (stats[constants.RS_UNKNOWN] or
3187
      (not result.data and _GetUnknownFields(result.fields))):
3188
    status = QR_UNKNOWN
3189
  elif compat.any(count > 0 for key, count in stats.items()
3190
                  if key != constants.RS_NORMAL):
3191
    status = QR_INCOMPLETE
3192
  else:
3193
    status = QR_NORMAL
3194

    
3195
  return (status, table)
3196

    
3197

    
3198
def _GetUnknownFields(fdefs):
3199
  """Returns list of unknown fields included in C{fdefs}.
3200

3201
  @type fdefs: list of L{objects.QueryFieldDefinition}
3202

3203
  """
3204
  return [fdef for fdef in fdefs
3205
          if fdef.kind == constants.QFT_UNKNOWN]
3206

    
3207

    
3208
def _WarnUnknownFields(fdefs):
3209
  """Prints a warning to stderr if a query included unknown fields.
3210

3211
  @type fdefs: list of L{objects.QueryFieldDefinition}
3212

3213
  """
3214
  unknown = _GetUnknownFields(fdefs)
3215
  if unknown:
3216
    ToStderr("Warning: Queried for unknown fields %s",
3217
             utils.CommaJoin(fdef.name for fdef in unknown))
3218
    return True
3219

    
3220
  return False
3221

    
3222

    
3223
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3224
                format_override=None, verbose=False, force_filter=False,
3225
                namefield=None, qfilter=None, isnumeric=False):
3226
  """Generic implementation for listing all items of a resource.
3227

3228
  @param resource: One of L{constants.QR_VIA_LUXI}
3229
  @type fields: list of strings
3230
  @param fields: List of fields to query for
3231
  @type names: list of strings
3232
  @param names: Names of items to query for
3233
  @type unit: string or None
3234
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3235
    None for automatic choice (human-readable for non-separator usage,
3236
    otherwise megabytes); this is a one-letter string
3237
  @type separator: string or None
3238
  @param separator: String used to separate fields
3239
  @type header: bool
3240
  @param header: Whether to show header row
3241
  @type force_filter: bool
3242
  @param force_filter: Whether to always treat names as filter
3243
  @type format_override: dict
3244
  @param format_override: Dictionary for overriding field formatting functions,
3245
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3246
  @type verbose: boolean
3247
  @param verbose: whether to use verbose field descriptions or not
3248
  @type namefield: string
3249
  @param namefield: Name of field to use for simple filters (see
3250
    L{qlang.MakeFilter} for details)
3251
  @type qfilter: list or None
3252
  @param qfilter: Query filter (in addition to names)
3253
  @param isnumeric: bool
3254
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3255
    any simple filters built by namefield should use integer values to
3256
    reflect that
3257

3258
  """
3259
  if not names:
3260
    names = None
3261

    
3262
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3263
                                isnumeric=isnumeric)
3264

    
3265
  if qfilter is None:
3266
    qfilter = namefilter
3267
  elif namefilter is not None:
3268
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3269

    
3270
  if cl is None:
3271
    cl = GetClient()
3272

    
3273
  response = cl.Query(resource, fields, qfilter)
3274

    
3275
  found_unknown = _WarnUnknownFields(response.fields)
3276

    
3277
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3278
                                     header=header,
3279
                                     format_override=format_override,
3280
                                     verbose=verbose)
3281

    
3282
  for line in data:
3283
    ToStdout(line)
3284

    
3285
  assert ((found_unknown and status == QR_UNKNOWN) or
3286
          (not found_unknown and status != QR_UNKNOWN))
3287

    
3288
  if status == QR_UNKNOWN:
3289
    return constants.EXIT_UNKNOWN_FIELD
3290

    
3291
  # TODO: Should the list command fail if not all data could be collected?
3292
  return constants.EXIT_SUCCESS
3293

    
3294

    
3295
def _FieldDescValues(fdef):
3296
  """Helper function for L{GenericListFields} to get query field description.
3297

3298
  @type fdef: L{objects.QueryFieldDefinition}
3299
  @rtype: list
3300

3301
  """
3302
  return [
3303
    fdef.name,
3304
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3305
    fdef.title,
3306
    fdef.doc,
3307
    ]
3308

    
3309

    
3310
def GenericListFields(resource, fields, separator, header, cl=None):
3311
  """Generic implementation for listing fields for a resource.
3312

3313
  @param resource: One of L{constants.QR_VIA_LUXI}
3314
  @type fields: list of strings
3315
  @param fields: List of fields to query for
3316
  @type separator: string or None
3317
  @param separator: String used to separate fields
3318
  @type header: bool
3319
  @param header: Whether to show header row
3320

3321
  """
3322
  if cl is None:
3323
    cl = GetClient()
3324

    
3325
  if not fields:
3326
    fields = None
3327

    
3328
  response = cl.QueryFields(resource, fields)
3329

    
3330
  found_unknown = _WarnUnknownFields(response.fields)
3331

    
3332
  columns = [
3333
    TableColumn("Name", str, False),
3334
    TableColumn("Type", str, False),
3335
    TableColumn("Title", str, False),
3336
    TableColumn("Description", str, False),
3337
    ]
3338

    
3339
  rows = map(_FieldDescValues, response.fields)
3340

    
3341
  for line in FormatTable(rows, columns, header, separator):
3342
    ToStdout(line)
3343

    
3344
  if found_unknown:
3345
    return constants.EXIT_UNKNOWN_FIELD
3346

    
3347
  return constants.EXIT_SUCCESS
3348

    
3349

    
3350
class TableColumn(object):
3351
  """Describes a column for L{FormatTable}.
3352

3353
  """
3354
  def __init__(self, title, fn, align_right):
3355
    """Initializes this class.
3356

3357
    @type title: string
3358
    @param title: Column title
3359
    @type fn: callable
3360
    @param fn: Formatting function
3361
    @type align_right: bool
3362
    @param align_right: Whether to align values on the right-hand side
3363

3364
    """
3365
    self.title = title
3366
    self.format = fn
3367
    self.align_right = align_right
3368

    
3369

    
3370
def _GetColFormatString(width, align_right):
3371
  """Returns the format string for a field.
3372

3373
  """
3374
  if align_right:
3375
    sign = ""
3376
  else:
3377
    sign = "-"
3378

    
3379
  return "%%%s%ss" % (sign, width)
3380

    
3381

    
3382
def FormatTable(rows, columns, header, separator):
3383
  """Formats data as a table.
3384

3385
  @type rows: list of lists
3386
  @param rows: Row data, one list per row
3387
  @type columns: list of L{TableColumn}
3388
  @param columns: Column descriptions
3389
  @type header: bool
3390
  @param header: Whether to show header row
3391
  @type separator: string or None
3392
  @param separator: String used to separate columns
3393

3394
  """
3395
  if header:
3396
    data = [[col.title for col in columns]]
3397
    colwidth = [len(col.title) for col in columns]
3398
  else:
3399
    data = []
3400
    colwidth = [0 for _ in columns]
3401

    
3402
  # Format row data
3403
  for row in rows:
3404
    assert len(row) == len(columns)
3405

    
3406
    formatted = [col.format(value) for value, col in zip(row, columns)]
3407

    
3408
    if separator is None:
3409
      # Update column widths
3410
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3411
        # Modifying a list's items while iterating is fine
3412
        colwidth[idx] = max(oldwidth, len(value))
3413

    
3414
    data.append(formatted)
3415

    
3416
  if separator is not None:
3417
    # Return early if a separator is used
3418
    return [separator.join(row) for row in data]
3419

    
3420
  if columns and not columns[-1].align_right:
3421
    # Avoid unnecessary spaces at end of line
3422
    colwidth[-1] = 0
3423

    
3424
  # Build format string
3425
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3426
                  for col, width in zip(columns, colwidth)])
3427

    
3428
  return [fmt % tuple(row) for row in data]
3429

    
3430

    
3431
def FormatTimestamp(ts):
3432
  """Formats a given timestamp.
3433

3434
  @type ts: timestamp
3435
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3436

3437
  @rtype: string
3438
  @return: a string with the formatted timestamp
3439

3440
  """
3441
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3442
    return "?"
3443

    
3444
  (sec, usecs) = ts
3445
  return utils.FormatTime(sec, usecs=usecs)
3446

    
3447

    
3448
def ParseTimespec(value):
3449
  """Parse a time specification.
3450

3451
  The following suffixed will be recognized:
3452

3453
    - s: seconds
3454
    - m: minutes
3455
    - h: hours
3456
    - d: day
3457
    - w: weeks
3458

3459
  Without any suffix, the value will be taken to be in seconds.
3460

3461
  """
3462
  value = str(value)
3463
  if not value:
3464
    raise errors.OpPrereqError("Empty time specification passed",
3465
                               errors.ECODE_INVAL)
3466
  suffix_map = {
3467
    "s": 1,
3468
    "m": 60,
3469
    "h": 3600,
3470
    "d": 86400,
3471
    "w": 604800,
3472
    }
3473
  if value[-1] not in suffix_map:
3474
    try:
3475
      value = int(value)
3476
    except (TypeError, ValueError):
3477
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3478
                                 errors.ECODE_INVAL)
3479
  else:
3480
    multiplier = suffix_map[value[-1]]
3481
    value = value[:-1]
3482
    if not value: # no data left after stripping the suffix
3483
      raise errors.OpPrereqError("Invalid time specification (only"
3484
                                 " suffix passed)", errors.ECODE_INVAL)
3485
    try:
3486
      value = int(value) * multiplier
3487
    except (TypeError, ValueError):
3488
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3489
                                 errors.ECODE_INVAL)
3490
  return value
3491

    
3492

    
3493
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3494
                   filter_master=False, nodegroup=None):
3495
  """Returns the names of online nodes.
3496

3497
  This function will also log a warning on stderr with the names of
3498
  the online nodes.
3499

3500
  @param nodes: if not empty, use only this subset of nodes (minus the
3501
      offline ones)
3502
  @param cl: if not None, luxi client to use
3503
  @type nowarn: boolean
3504
  @param nowarn: by default, this function will output a note with the
3505
      offline nodes that are skipped; if this parameter is True the
3506
      note is not displayed
3507
  @type secondary_ips: boolean
3508
  @param secondary_ips: if True, return the secondary IPs instead of the
3509
      names, useful for doing network traffic over the replication interface
3510
      (if any)
3511
  @type filter_master: boolean
3512
  @param filter_master: if True, do not return the master node in the list
3513
      (useful in coordination with secondary_ips where we cannot check our
3514
      node name against the list)
3515
  @type nodegroup: string
3516
  @param nodegroup: If set, only return nodes in this node group
3517

3518
  """
3519
  if cl is None:
3520
    cl = GetClient()
3521

    
3522
  qfilter = []
3523

    
3524
  if nodes:
3525
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3526

    
3527
  if nodegroup is not None:
3528
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3529
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3530

    
3531
  if filter_master:
3532
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3533

    
3534
  if qfilter:
3535
    if len(qfilter) > 1:
3536
      final_filter = [qlang.OP_AND] + qfilter
3537
    else:
3538
      assert len(qfilter) == 1
3539
      final_filter = qfilter[0]
3540
  else:
3541
    final_filter = None
3542

    
3543
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3544

    
3545
  def _IsOffline(row):
3546
    (_, (_, offline), _) = row
3547
    return offline
3548

    
3549
  def _GetName(row):
3550
    ((_, name), _, _) = row
3551
    return name
3552

    
3553
  def _GetSip(row):
3554
    (_, _, (_, sip)) = row
3555
    return sip
3556

    
3557
  (offline, online) = compat.partition(result.data, _IsOffline)
3558

    
3559
  if offline and not nowarn:
3560
    ToStderr("Note: skipping offline node(s): %s" %
3561
             utils.CommaJoin(map(_GetName, offline)))
3562

    
3563
  if secondary_ips:
3564
    fn = _GetSip
3565
  else:
3566
    fn = _GetName
3567

    
3568
  return map(fn, online)
3569

    
3570

    
3571
def _ToStream(stream, txt, *args):
3572
  """Write a message to a stream, bypassing the logging system
3573

3574
  @type stream: file object
3575
  @param stream: the file to which we should write
3576
  @type txt: str
3577
  @param txt: the message
3578

3579
  """
3580
  try:
3581
    if args:
3582
      args = tuple(args)
3583
      stream.write(txt % args)
3584
    else:
3585
      stream.write(txt)
3586
    stream.write("\n")
3587
    stream.flush()
3588
  except IOError, err:
3589
    if err.errno == errno.EPIPE:
3590
      # our terminal went away, we'll exit
3591
      sys.exit(constants.EXIT_FAILURE)
3592
    else:
3593
      raise
3594

    
3595

    
3596
def ToStdout(txt, *args):
3597
  """Write a message to stdout only, bypassing the logging system
3598

3599
  This is just a wrapper over _ToStream.
3600

3601
  @type txt: str
3602
  @param txt: the message
3603

3604
  """
3605
  _ToStream(sys.stdout, txt, *args)
3606

    
3607

    
3608
def ToStderr(txt, *args):
3609
  """Write a message to stderr only, bypassing the logging system
3610

3611
  This is just a wrapper over _ToStream.
3612

3613
  @type txt: str
3614
  @param txt: the message
3615

3616
  """
3617
  _ToStream(sys.stderr, txt, *args)
3618

    
3619

    
3620
class JobExecutor(object):
3621
  """Class which manages the submission and execution of multiple jobs.
3622

3623
  Note that instances of this class should not be reused between
3624
  GetResults() calls.
3625

3626
  """
3627
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3628
    self.queue = []
3629
    if cl is None:
3630
      cl = GetClient()
3631
    self.cl = cl
3632
    self.verbose = verbose
3633
    self.jobs = []
3634
    self.opts = opts
3635
    self.feedback_fn = feedback_fn
3636
    self._counter = itertools.count()
3637

    
3638
  @staticmethod
3639
  def _IfName(name, fmt):
3640
    """Helper function for formatting name.
3641

3642
    """
3643
    if name:
3644
      return fmt % name
3645

    
3646
    return ""
3647

    
3648
  def QueueJob(self, name, *ops):
3649
    """Record a job for later submit.
3650

3651
    @type name: string
3652
    @param name: a description of the job, will be used in WaitJobSet
3653

3654
    """
3655
    SetGenericOpcodeOpts(ops, self.opts)
3656
    self.queue.append((self._counter.next(), name, ops))
3657

    
3658
  def AddJobId(self, name, status, job_id):
3659
    """Adds a job ID to the internal queue.
3660

3661
    """
3662
    self.jobs.append((self._counter.next(), status, job_id, name))
3663

    
3664
  def SubmitPending(self, each=False):
3665
    """Submit all pending jobs.
3666

3667
    """
3668
    if each:
3669
      results = []
3670
      for (_, _, ops) in self.queue:
3671
        # SubmitJob will remove the success status, but raise an exception if
3672
        # the submission fails, so we'll notice that anyway.
3673
        results.append([True, self.cl.SubmitJob(ops)[0]])
3674
    else:
3675
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3676
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3677
      self.jobs.append((idx, status, data, name))
3678

    
3679
  def _ChooseJob(self):
3680
    """Choose a non-waiting/queued job to poll next.
3681

3682
    """
3683
    assert self.jobs, "_ChooseJob called with empty job list"
3684

    
3685
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3686
                               ["status"])
3687
    assert result
3688

    
3689
    for job_data, status in zip(self.jobs, result):
3690
      if (isinstance(status, list) and status and
3691
          status[0] in (constants.JOB_STATUS_QUEUED,
3692
                        constants.JOB_STATUS_WAITING,
3693
                        constants.JOB_STATUS_CANCELING)):
3694
        # job is still present and waiting
3695
        continue
3696
      # good candidate found (either running job or lost job)
3697
      self.jobs.remove(job_data)
3698
      return job_data
3699

    
3700
    # no job found
3701
    return self.jobs.pop(0)
3702

    
3703
  def GetResults(self):
3704
    """Wait for and return the results of all jobs.
3705

3706
    @rtype: list
3707
    @return: list of tuples (success, job results), in the same order
3708
        as the submitted jobs; if a job has failed, instead of the result
3709
        there will be the error message
3710

3711
    """
3712
    if not self.jobs:
3713
      self.SubmitPending()
3714
    results = []
3715
    if self.verbose:
3716
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3717
      if ok_jobs:
3718
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3719

    
3720
    # first, remove any non-submitted jobs
3721
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3722
    for idx, _, jid, name in failures:
3723
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3724
      results.append((idx, False, jid))
3725

    
3726
    while self.jobs:
3727
      (idx, _, jid, name) = self._ChooseJob()
3728
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3729
      try:
3730
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3731
        success = True
3732
      except errors.JobLost, err:
3733
        _, job_result = FormatError(err)
3734
        ToStderr("Job %s%s has been archived, cannot check its result",
3735
                 jid, self._IfName(name, " for %s"))
3736
        success = False
3737
      except (errors.GenericError, luxi.ProtocolError), err:
3738
        _, job_result = FormatError(err)
3739
        success = False
3740
        # the error message will always be shown, verbose or not
3741
        ToStderr("Job %s%s has failed: %s",
3742
                 jid, self._IfName(name, " for %s"), job_result)
3743

    
3744
      results.append((idx, success, job_result))
3745

    
3746
    # sort based on the index, then drop it
3747
    results.sort()
3748
    results = [i[1:] for i in results]
3749

    
3750
    return results
3751

    
3752
  def WaitOrShow(self, wait):
3753
    """Wait for job results or only print the job IDs.
3754

3755
    @type wait: boolean
3756
    @param wait: whether to wait or not
3757

3758
    """
3759
    if wait:
3760
      return self.GetResults()
3761
    else:
3762
      if not self.jobs:
3763
        self.SubmitPending()
3764
      for _, status, result, name in self.jobs:
3765
        if status:
3766
          ToStdout("%s: %s", result, name)
3767
        else:
3768
          ToStderr("Failure for %s: %s", name, result)
3769
      return [row[1:3] for row in self.jobs]
3770

    
3771

    
3772
def FormatParamsDictInfo(param_dict, actual):
3773
  """Formats a parameter dictionary.
3774

3775
  @type param_dict: dict
3776
  @param param_dict: the own parameters
3777
  @type actual: dict
3778
  @param actual: the current parameter set (including defaults)
3779
  @rtype: dict
3780
  @return: dictionary where the value of each parameter is either a fully
3781
      formatted string or a dictionary containing formatted strings
3782

3783
  """
3784
  ret = {}
3785
  for (key, data) in actual.items():
3786
    if isinstance(data, dict) and data:
3787
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3788
    else:
3789
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3790
  return ret
3791

    
3792

    
3793
def _FormatListInfoDefault(data, def_data):
3794
  if data is not None:
3795
    ret = utils.CommaJoin(data)
3796
  else:
3797
    ret = "default (%s)" % utils.CommaJoin(def_data)
3798
  return ret
3799

    
3800

    
3801
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3802
  """Formats an instance policy.
3803

3804
  @type custom_ipolicy: dict
3805
  @param custom_ipolicy: own policy
3806
  @type eff_ipolicy: dict
3807
  @param eff_ipolicy: effective policy (including defaults); ignored for
3808
      cluster
3809
  @type iscluster: bool
3810
  @param iscluster: the policy is at cluster level
3811
  @rtype: list of pairs
3812
  @return: formatted data, suitable for L{PrintGenericInfo}
3813

3814
  """
3815
  if iscluster:
3816
    eff_ipolicy = custom_ipolicy
3817

    
3818
  minmax_out = []
3819
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3820
  if custom_minmax:
3821
    for (k, minmax) in enumerate(custom_minmax):
3822
      minmax_out.append([
3823
        ("%s/%s" % (key, k),
3824
         FormatParamsDictInfo(minmax[key], minmax[key]))
3825
        for key in constants.ISPECS_MINMAX_KEYS
3826
        ])
3827
  else:
3828
    for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3829
      minmax_out.append([
3830
        ("%s/%s" % (key, k),
3831
         FormatParamsDictInfo({}, minmax[key]))
3832
        for key in constants.ISPECS_MINMAX_KEYS
3833
        ])
3834
  ret = [("bounds specs", minmax_out)]
3835

    
3836
  if iscluster:
3837
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3838
    ret.append(
3839
      (constants.ISPECS_STD,
3840
       FormatParamsDictInfo(stdspecs, stdspecs))
3841
      )
3842

    
3843
  ret.append(
3844
    ("allowed disk templates",
3845
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3846
                            eff_ipolicy[constants.IPOLICY_DTS]))
3847
    )
3848
  ret.extend([
3849
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3850
    for key in constants.IPOLICY_PARAMETERS
3851
    ])
3852
  return ret
3853

    
3854

    
3855
def _PrintSpecsParameters(buf, specs):
3856
  values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3857
  buf.write(",".join(values))
3858

    
3859

    
3860
def PrintIPolicyCommand(buf, ipolicy, isgroup):
3861
  """Print the command option used to generate the given instance policy.
3862

3863
  Currently only the parts dealing with specs are supported.
3864

3865
  @type buf: StringIO
3866
  @param buf: stream to write into
3867
  @type ipolicy: dict
3868
  @param ipolicy: instance policy
3869
  @type isgroup: bool
3870
  @param isgroup: whether the policy is at group level
3871

3872
  """
3873
  if not isgroup:
3874
    stdspecs = ipolicy.get("std")
3875
    if stdspecs:
3876
      buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3877
      _PrintSpecsParameters(buf, stdspecs)
3878
  minmaxes = ipolicy.get("minmax", [])
3879
  first = True
3880
  for minmax in minmaxes:
3881
    minspecs = minmax.get("min")
3882
    maxspecs = minmax.get("max")
3883
    if minspecs and maxspecs:
3884
      if first:
3885
        buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3886
        first = False
3887
      else:
3888
        buf.write("//")
3889
      buf.write("min:")
3890
      _PrintSpecsParameters(buf, minspecs)
3891
      buf.write("/max:")
3892
      _PrintSpecsParameters(buf, maxspecs)
3893

    
3894

    
3895
def ConfirmOperation(names, list_type, text, extra=""):
3896
  """Ask the user to confirm an operation on a list of list_type.
3897

3898
  This function is used to request confirmation for doing an operation
3899
  on a given list of list_type.
3900

3901
  @type names: list
3902
  @param names: the list of names that we display when
3903
      we ask for confirmation
3904
  @type list_type: str
3905
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3906
  @type text: str
3907
  @param text: the operation that the user should confirm
3908
  @rtype: boolean
3909
  @return: True or False depending on user's confirmation.
3910

3911
  """
3912
  count = len(names)
3913
  msg = ("The %s will operate on %d %s.\n%s"
3914
         "Do you want to continue?" % (text, count, list_type, extra))
3915
  affected = (("\nAffected %s:\n" % list_type) +
3916
              "\n".join(["  %s" % name for name in names]))
3917

    
3918
  choices = [("y", True, "Yes, execute the %s" % text),
3919
             ("n", False, "No, abort the %s" % text)]
3920

    
3921
  if count > 20:
3922
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3923
    question = msg
3924
  else:
3925
    question = msg + affected
3926

    
3927
  choice = AskUser(question, choices)
3928
  if choice == "v":
3929
    choices.pop(1)
3930
    choice = AskUser(msg + affected, choices)
3931
  return choice
3932

    
3933

    
3934
def _MaybeParseUnit(elements):
3935
  """Parses and returns an array of potential values with units.
3936

3937
  """
3938
  parsed = {}
3939
  for k, v in elements.items():
3940
    if v == constants.VALUE_DEFAULT:
3941
      parsed[k] = v
3942
    else:
3943
      parsed[k] = utils.ParseUnit(v)
3944
  return parsed
3945

    
3946

    
3947
def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3948
                             ispecs_disk_count, ispecs_disk_size,
3949
                             ispecs_nic_count, group_ipolicy, fill_all):
3950
  try:
3951
    if ispecs_mem_size:
3952
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3953
    if ispecs_disk_size:
3954
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3955
  except (TypeError, ValueError, errors.UnitParseError), err:
3956
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3957
                               " in policy: %s" %
3958
                               (ispecs_disk_size, ispecs_mem_size, err),
3959
                               errors.ECODE_INVAL)
3960

    
3961
  # prepare ipolicy dict
3962
  ispecs_transposed = {
3963
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3964
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3965
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3966
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3967
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3968
    }
3969

    
3970
  # first, check that the values given are correct
3971
  if group_ipolicy:
3972
    forced_type = TISPECS_GROUP_TYPES
3973
  else:
3974
    forced_type = TISPECS_CLUSTER_TYPES
3975
  for specs in ispecs_transposed.values():
3976
    assert type(specs) is dict
3977
    utils.ForceDictType(specs, forced_type)
3978

    
3979
  # then transpose
3980
  ispecs = {
3981
    constants.ISPECS_MIN: {},
3982
    constants.ISPECS_MAX: {},
3983
    constants.ISPECS_STD: {},
3984
    }
3985
  for (name, specs) in ispecs_transposed.iteritems():
3986
    assert name in constants.ISPECS_PARAMETERS
3987
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3988
      assert key in ispecs
3989
      ispecs[key][name] = val
3990
  minmax_out = {}
3991
  for key in constants.ISPECS_MINMAX_KEYS:
3992
    if fill_all:
3993
      minmax_out[key] = \
3994
        objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3995
    else:
3996
      minmax_out[key] = ispecs[key]
3997
  ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3998
  if fill_all:
3999
    ipolicy[constants.ISPECS_STD] = \
4000
        objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
4001
                         ispecs[constants.ISPECS_STD])
4002
  else:
4003
    ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
4004

    
4005

    
4006
def _ParseSpecUnit(spec, keyname):
4007
  ret = spec.copy()
4008
  for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
4009
    if k in ret:
4010
      try:
4011
        ret[k] = utils.ParseUnit(ret[k])
4012
      except (TypeError, ValueError, errors.UnitParseError), err:
4013
        raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
4014
                                    " specs: %s" % (k, ret[k], keyname, err)),
4015
                                   errors.ECODE_INVAL)
4016
  return ret
4017

    
4018

    
4019
def _ParseISpec(spec, keyname, required):
4020
  ret = _ParseSpecUnit(spec, keyname)
4021
  utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
4022
  missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
4023
  if required and missing:
4024
    raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
4025
                               (keyname, utils.CommaJoin(missing)),
4026
                               errors.ECODE_INVAL)
4027
  return ret
4028

    
4029

    
4030
def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
4031
  ret = None
4032
  if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
4033
      len(minmax_ispecs[0]) == 1):
4034
    for (key, spec) in minmax_ispecs[0].items():
4035
      # This loop is executed exactly once
4036
      if key in allowed_values and not spec:
4037
        ret = key
4038
  return ret
4039

    
4040

    
4041
def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4042
                            group_ipolicy, allowed_values):
4043
  found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
4044
  if found_allowed is not None:
4045
    ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
4046
  elif minmax_ispecs is not None:
4047
    minmax_out = []
4048
    for mmpair in minmax_ispecs:
4049
      mmpair_out = {}
4050
      for (key, spec) in mmpair.items():
4051
        if key not in constants.ISPECS_MINMAX_KEYS:
4052
          msg = "Invalid key in bounds instance specifications: %s" % key
4053
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
4054
        mmpair_out[key] = _ParseISpec(spec, key, True)
4055
      minmax_out.append(mmpair_out)
4056
    ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
4057
  if std_ispecs is not None:
4058
    assert not group_ipolicy # This is not an option for gnt-group
4059
    ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
4060

    
4061

    
4062
def CreateIPolicyFromOpts(ispecs_mem_size=None,
4063
                          ispecs_cpu_count=None,
4064
                          ispecs_disk_count=None,
4065
                          ispecs_disk_size=None,
4066
                          ispecs_nic_count=None,
4067
                          minmax_ispecs=None,
4068
                          std_ispecs=None,
4069
                          ipolicy_disk_templates=None,
4070
                          ipolicy_vcpu_ratio=None,
4071
                          ipolicy_spindle_ratio=None,
4072
                          group_ipolicy=False,
4073
                          allowed_values=None,
4074
                          fill_all=False):
4075
  """Creation of instance policy based on command line options.
4076

4077
  @param fill_all: whether for cluster policies we should ensure that
4078
    all values are filled
4079

4080
  """
4081
  assert not (fill_all and allowed_values)
4082

    
4083
  split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4084
                 ispecs_disk_size or ispecs_nic_count)
4085
  if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4086
    raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4087
                               " together with any --ipolicy-xxx-specs option",
4088
                               errors.ECODE_INVAL)
4089

    
4090
  ipolicy_out = objects.MakeEmptyIPolicy()
4091
  if split_specs:
4092
    assert fill_all
4093
    _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4094
                             ispecs_disk_count, ispecs_disk_size,
4095
                             ispecs_nic_count, group_ipolicy, fill_all)
4096
  elif (minmax_ispecs is not None or std_ispecs is not None):
4097
    _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4098
                            group_ipolicy, allowed_values)
4099

    
4100
  if ipolicy_disk_templates is not None:
4101
    if allowed_values and ipolicy_disk_templates in allowed_values:
4102
      ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4103
    else:
4104
      ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4105
  if ipolicy_vcpu_ratio is not None:
4106
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4107
  if ipolicy_spindle_ratio is not None:
4108
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4109

    
4110
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4111

    
4112
  if not group_ipolicy and fill_all:
4113
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4114

    
4115
  return ipolicy_out
4116

    
4117

    
4118
def _SerializeGenericInfo(buf, data, level, afterkey=False):
4119
  """Formatting core of L{PrintGenericInfo}.
4120

4121
  @param buf: (string) stream to accumulate the result into
4122
  @param data: data to format
4123
  @type level: int
4124
  @param level: depth in the data hierarchy, used for indenting
4125
  @type afterkey: bool
4126
  @param afterkey: True when we are in the middle of a line after a key (used
4127
      to properly add newlines or indentation)
4128

4129
  """
4130
  baseind = "  "
4131
  if isinstance(data, dict):
4132
    if not data:
4133
      buf.write("\n")
4134
    else:
4135
      if afterkey:
4136
        buf.write("\n")
4137
        doindent = True
4138
      else:
4139
        doindent = False
4140
      for key in sorted(data):
4141
        if doindent:
4142
          buf.write(baseind * level)
4143
        else:
4144
          doindent = True
4145
        buf.write(key)
4146
        buf.write(": ")
4147
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4148
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4149
    # list of tuples (an ordered dictionary)
4150
    if afterkey:
4151
      buf.write("\n")
4152
      doindent = True
4153
    else:
4154
      doindent = False
4155
    for (key, val) in data:
4156
      if doindent:
4157
        buf.write(baseind * level)
4158
      else:
4159
        doindent = True
4160
      buf.write(key)
4161
      buf.write(": ")
4162
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4163
  elif isinstance(data, list):
4164
    if not data:
4165
      buf.write("\n")
4166
    else:
4167
      if afterkey:
4168
        buf.write("\n")
4169
        doindent = True
4170
      else:
4171
        doindent = False
4172
      for item in data:
4173
        if doindent:
4174
          buf.write(baseind * level)
4175
        else:
4176
          doindent = True
4177
        buf.write("-")
4178
        buf.write(baseind[1:])
4179
        _SerializeGenericInfo(buf, item, level + 1)
4180
  else:
4181
    # This branch should be only taken for strings, but it's practically
4182
    # impossible to guarantee that no other types are produced somewhere
4183
    buf.write(str(data))
4184
    buf.write("\n")
4185

    
4186

    
4187
def PrintGenericInfo(data):
4188
  """Print information formatted according to the hierarchy.
4189

4190
  The output is a valid YAML string.
4191

4192
  @param data: the data to print. It's a hierarchical structure whose elements
4193
      can be:
4194
        - dictionaries, where keys are strings and values are of any of the
4195
          types listed here
4196
        - lists of pairs (key, value), where key is a string and value is of
4197
          any of the types listed here; it's a way to encode ordered
4198
          dictionaries
4199
        - lists of any of the types listed here
4200
        - strings
4201

4202
  """
4203
  buf = StringIO()
4204
  _SerializeGenericInfo(buf, data, 0)
4205
  ToStdout(buf.getvalue().rstrip("\n"))