Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 89c10241

History | View | Annotate | Download (137.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ADD_RESERVED_IPS_OPT",
58
  "ALLOCATABLE_OPT",
59
  "ALLOC_POLICY_OPT",
60
  "ALL_OPT",
61
  "ALLOW_FAILOVER_OPT",
62
  "AUTO_PROMOTE_OPT",
63
  "AUTO_REPLACE_OPT",
64
  "BACKEND_OPT",
65
  "BLK_OS_OPT",
66
  "CAPAB_MASTER_OPT",
67
  "CAPAB_VM_OPT",
68
  "CLEANUP_OPT",
69
  "CLUSTER_DOMAIN_SECRET_OPT",
70
  "CONFIRM_OPT",
71
  "CP_SIZE_OPT",
72
  "DEBUG_OPT",
73
  "DEBUG_SIMERR_OPT",
74
  "DISKIDX_OPT",
75
  "DISK_OPT",
76
  "DISK_PARAMS_OPT",
77
  "DISK_TEMPLATE_OPT",
78
  "DRAINED_OPT",
79
  "DRY_RUN_OPT",
80
  "DRBD_HELPER_OPT",
81
  "DST_NODE_OPT",
82
  "EARLY_RELEASE_OPT",
83
  "ENABLED_HV_OPT",
84
  "ENABLED_DISK_TEMPLATES_OPT",
85
  "ERROR_CODES_OPT",
86
  "FAILURE_ONLY_OPT",
87
  "FIELDS_OPT",
88
  "FILESTORE_DIR_OPT",
89
  "FILESTORE_DRIVER_OPT",
90
  "FORCE_FILTER_OPT",
91
  "FORCE_OPT",
92
  "FORCE_VARIANT_OPT",
93
  "GATEWAY_OPT",
94
  "GATEWAY6_OPT",
95
  "GLOBAL_FILEDIR_OPT",
96
  "HID_OS_OPT",
97
  "GLOBAL_SHARED_FILEDIR_OPT",
98
  "HOTPLUG_OPT",
99
  "HOTPLUG_IF_POSSIBLE_OPT",
100
  "HVLIST_OPT",
101
  "HVOPTS_OPT",
102
  "HYPERVISOR_OPT",
103
  "IALLOCATOR_OPT",
104
  "DEFAULT_IALLOCATOR_OPT",
105
  "IDENTIFY_DEFAULTS_OPT",
106
  "IGNORE_CONSIST_OPT",
107
  "IGNORE_ERRORS_OPT",
108
  "IGNORE_FAILURES_OPT",
109
  "IGNORE_OFFLINE_OPT",
110
  "IGNORE_REMOVE_FAILURES_OPT",
111
  "IGNORE_SECONDARIES_OPT",
112
  "IGNORE_SIZE_OPT",
113
  "INCLUDEDEFAULTS_OPT",
114
  "INTERVAL_OPT",
115
  "MAC_PREFIX_OPT",
116
  "MAINTAIN_NODE_HEALTH_OPT",
117
  "MASTER_NETDEV_OPT",
118
  "MASTER_NETMASK_OPT",
119
  "MC_OPT",
120
  "MIGRATION_MODE_OPT",
121
  "MODIFY_ETCHOSTS_OPT",
122
  "NET_OPT",
123
  "NETWORK_OPT",
124
  "NETWORK6_OPT",
125
  "NEW_CLUSTER_CERT_OPT",
126
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
127
  "NEW_CONFD_HMAC_KEY_OPT",
128
  "NEW_RAPI_CERT_OPT",
129
  "NEW_PRIMARY_OPT",
130
  "NEW_SECONDARY_OPT",
131
  "NEW_SPICE_CERT_OPT",
132
  "NIC_PARAMS_OPT",
133
  "NOCONFLICTSCHECK_OPT",
134
  "NODE_FORCE_JOIN_OPT",
135
  "NODE_LIST_OPT",
136
  "NODE_PLACEMENT_OPT",
137
  "NODEGROUP_OPT",
138
  "NODE_PARAMS_OPT",
139
  "NODE_POWERED_OPT",
140
  "NOHDR_OPT",
141
  "NOIPCHECK_OPT",
142
  "NO_INSTALL_OPT",
143
  "NONAMECHECK_OPT",
144
  "NOLVM_STORAGE_OPT",
145
  "NOMODIFY_ETCHOSTS_OPT",
146
  "NOMODIFY_SSH_SETUP_OPT",
147
  "NONICS_OPT",
148
  "NONLIVE_OPT",
149
  "NONPLUS1_OPT",
150
  "NORUNTIME_CHGS_OPT",
151
  "NOSHUTDOWN_OPT",
152
  "NOSTART_OPT",
153
  "NOSSH_KEYCHECK_OPT",
154
  "NOVOTING_OPT",
155
  "NO_REMEMBER_OPT",
156
  "NWSYNC_OPT",
157
  "OFFLINE_INST_OPT",
158
  "ONLINE_INST_OPT",
159
  "ON_PRIMARY_OPT",
160
  "ON_SECONDARY_OPT",
161
  "OFFLINE_OPT",
162
  "OSPARAMS_OPT",
163
  "OS_OPT",
164
  "OS_SIZE_OPT",
165
  "OOB_TIMEOUT_OPT",
166
  "POWER_DELAY_OPT",
167
  "PREALLOC_WIPE_DISKS_OPT",
168
  "PRIMARY_IP_VERSION_OPT",
169
  "PRIMARY_ONLY_OPT",
170
  "PRINT_JOBID_OPT",
171
  "PRIORITY_OPT",
172
  "RAPI_CERT_OPT",
173
  "READD_OPT",
174
  "REASON_OPT",
175
  "REBOOT_TYPE_OPT",
176
  "REMOVE_INSTANCE_OPT",
177
  "REMOVE_RESERVED_IPS_OPT",
178
  "REMOVE_UIDS_OPT",
179
  "RESERVED_LVS_OPT",
180
  "RUNTIME_MEM_OPT",
181
  "ROMAN_OPT",
182
  "SECONDARY_IP_OPT",
183
  "SECONDARY_ONLY_OPT",
184
  "SELECT_OS_OPT",
185
  "SEP_OPT",
186
  "SHOWCMD_OPT",
187
  "SHOW_MACHINE_OPT",
188
  "SHUTDOWN_TIMEOUT_OPT",
189
  "SINGLE_NODE_OPT",
190
  "SPECS_CPU_COUNT_OPT",
191
  "SPECS_DISK_COUNT_OPT",
192
  "SPECS_DISK_SIZE_OPT",
193
  "SPECS_MEM_SIZE_OPT",
194
  "SPECS_NIC_COUNT_OPT",
195
  "SPLIT_ISPECS_OPTS",
196
  "IPOLICY_STD_SPECS_OPT",
197
  "IPOLICY_DISK_TEMPLATES",
198
  "IPOLICY_VCPU_RATIO",
199
  "SPICE_CACERT_OPT",
200
  "SPICE_CERT_OPT",
201
  "SRC_DIR_OPT",
202
  "SRC_NODE_OPT",
203
  "SUBMIT_OPT",
204
  "SUBMIT_OPTS",
205
  "STARTUP_PAUSED_OPT",
206
  "STATIC_OPT",
207
  "SYNC_OPT",
208
  "TAG_ADD_OPT",
209
  "TAG_SRC_OPT",
210
  "TIMEOUT_OPT",
211
  "TO_GROUP_OPT",
212
  "UIDPOOL_OPT",
213
  "USEUNITS_OPT",
214
  "USE_EXTERNAL_MIP_SCRIPT",
215
  "USE_REPL_NET_OPT",
216
  "VERBOSE_OPT",
217
  "VG_NAME_OPT",
218
  "WFSYNC_OPT",
219
  "YES_DOIT_OPT",
220
  "DISK_STATE_OPT",
221
  "HV_STATE_OPT",
222
  "IGNORE_IPOLICY_OPT",
223
  "INSTANCE_POLICY_OPTS",
224
  # Generic functions for CLI programs
225
  "ConfirmOperation",
226
  "CreateIPolicyFromOpts",
227
  "GenericMain",
228
  "GenericInstanceCreate",
229
  "GenericList",
230
  "GenericListFields",
231
  "GetClient",
232
  "GetOnlineNodes",
233
  "JobExecutor",
234
  "JobSubmittedException",
235
  "ParseTimespec",
236
  "RunWhileClusterStopped",
237
  "SubmitOpCode",
238
  "SubmitOpCodeToDrainedQueue",
239
  "SubmitOrSend",
240
  "UsesRPC",
241
  # Formatting functions
242
  "ToStderr", "ToStdout",
243
  "FormatError",
244
  "FormatQueryResult",
245
  "FormatParamsDictInfo",
246
  "FormatPolicyInfo",
247
  "PrintIPolicyCommand",
248
  "PrintGenericInfo",
249
  "GenerateTable",
250
  "AskUser",
251
  "FormatTimestamp",
252
  "FormatLogMessage",
253
  # Tags functions
254
  "ListTags",
255
  "AddTags",
256
  "RemoveTags",
257
  # command line options support infrastructure
258
  "ARGS_MANY_INSTANCES",
259
  "ARGS_MANY_NODES",
260
  "ARGS_MANY_GROUPS",
261
  "ARGS_MANY_NETWORKS",
262
  "ARGS_NONE",
263
  "ARGS_ONE_INSTANCE",
264
  "ARGS_ONE_NODE",
265
  "ARGS_ONE_GROUP",
266
  "ARGS_ONE_OS",
267
  "ARGS_ONE_NETWORK",
268
  "ArgChoice",
269
  "ArgCommand",
270
  "ArgFile",
271
  "ArgGroup",
272
  "ArgHost",
273
  "ArgInstance",
274
  "ArgJobId",
275
  "ArgNetwork",
276
  "ArgNode",
277
  "ArgOs",
278
  "ArgExtStorage",
279
  "ArgSuggest",
280
  "ArgUnknown",
281
  "OPT_COMPL_INST_ADD_NODES",
282
  "OPT_COMPL_MANY_NODES",
283
  "OPT_COMPL_ONE_IALLOCATOR",
284
  "OPT_COMPL_ONE_INSTANCE",
285
  "OPT_COMPL_ONE_NODE",
286
  "OPT_COMPL_ONE_NODEGROUP",
287
  "OPT_COMPL_ONE_NETWORK",
288
  "OPT_COMPL_ONE_OS",
289
  "OPT_COMPL_ONE_EXTSTORAGE",
290
  "cli_option",
291
  "FixHvParams",
292
  "SplitNodeOption",
293
  "CalculateOSNames",
294
  "ParseFields",
295
  "COMMON_CREATE_OPTS",
296
  ]
297

    
298
NO_PREFIX = "no_"
299
UN_PREFIX = "-"
300

    
301
#: Priorities (sorted)
302
_PRIORITY_NAMES = [
303
  ("low", constants.OP_PRIO_LOW),
304
  ("normal", constants.OP_PRIO_NORMAL),
305
  ("high", constants.OP_PRIO_HIGH),
306
  ]
307

    
308
#: Priority dictionary for easier lookup
309
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
310
# we migrate to Python 2.6
311
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
312

    
313
# Query result status for clients
314
(QR_NORMAL,
315
 QR_UNKNOWN,
316
 QR_INCOMPLETE) = range(3)
317

    
318
#: Maximum batch size for ChooseJob
319
_CHOOSE_BATCH = 25
320

    
321

    
322
# constants used to create InstancePolicy dictionary
323
TISPECS_GROUP_TYPES = {
324
  constants.ISPECS_MIN: constants.VTYPE_INT,
325
  constants.ISPECS_MAX: constants.VTYPE_INT,
326
  }
327

    
328
TISPECS_CLUSTER_TYPES = {
329
  constants.ISPECS_MIN: constants.VTYPE_INT,
330
  constants.ISPECS_MAX: constants.VTYPE_INT,
331
  constants.ISPECS_STD: constants.VTYPE_INT,
332
  }
333

    
334
#: User-friendly names for query2 field types
335
_QFT_NAMES = {
336
  constants.QFT_UNKNOWN: "Unknown",
337
  constants.QFT_TEXT: "Text",
338
  constants.QFT_BOOL: "Boolean",
339
  constants.QFT_NUMBER: "Number",
340
  constants.QFT_UNIT: "Storage size",
341
  constants.QFT_TIMESTAMP: "Timestamp",
342
  constants.QFT_OTHER: "Custom",
343
  }
344

    
345

    
346
class _Argument:
347
  def __init__(self, min=0, max=None): # pylint: disable=W0622
348
    self.min = min
349
    self.max = max
350

    
351
  def __repr__(self):
352
    return ("<%s min=%s max=%s>" %
353
            (self.__class__.__name__, self.min, self.max))
354

    
355

    
356
class ArgSuggest(_Argument):
357
  """Suggesting argument.
358

359
  Value can be any of the ones passed to the constructor.
360

361
  """
362
  # pylint: disable=W0622
363
  def __init__(self, min=0, max=None, choices=None):
364
    _Argument.__init__(self, min=min, max=max)
365
    self.choices = choices
366

    
367
  def __repr__(self):
368
    return ("<%s min=%s max=%s choices=%r>" %
369
            (self.__class__.__name__, self.min, self.max, self.choices))
370

    
371

    
372
class ArgChoice(ArgSuggest):
373
  """Choice argument.
374

375
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
376
  but value must be one of the choices.
377

378
  """
379

    
380

    
381
class ArgUnknown(_Argument):
382
  """Unknown argument to program (e.g. determined at runtime).
383

384
  """
385

    
386

    
387
class ArgInstance(_Argument):
388
  """Instances argument.
389

390
  """
391

    
392

    
393
class ArgNode(_Argument):
394
  """Node argument.
395

396
  """
397

    
398

    
399
class ArgNetwork(_Argument):
400
  """Network argument.
401

402
  """
403

    
404

    
405
class ArgGroup(_Argument):
406
  """Node group argument.
407

408
  """
409

    
410

    
411
class ArgJobId(_Argument):
412
  """Job ID argument.
413

414
  """
415

    
416

    
417
class ArgFile(_Argument):
418
  """File path argument.
419

420
  """
421

    
422

    
423
class ArgCommand(_Argument):
424
  """Command argument.
425

426
  """
427

    
428

    
429
class ArgHost(_Argument):
430
  """Host argument.
431

432
  """
433

    
434

    
435
class ArgOs(_Argument):
436
  """OS argument.
437

438
  """
439

    
440

    
441
class ArgExtStorage(_Argument):
442
  """ExtStorage argument.
443

444
  """
445

    
446

    
447
ARGS_NONE = []
448
ARGS_MANY_INSTANCES = [ArgInstance()]
449
ARGS_MANY_NETWORKS = [ArgNetwork()]
450
ARGS_MANY_NODES = [ArgNode()]
451
ARGS_MANY_GROUPS = [ArgGroup()]
452
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
453
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
454
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
455
# TODO
456
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
457
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
458

    
459

    
460
def _ExtractTagsObject(opts, args):
461
  """Extract the tag type object.
462

463
  Note that this function will modify its args parameter.
464

465
  """
466
  if not hasattr(opts, "tag_type"):
467
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
468
  kind = opts.tag_type
469
  if kind == constants.TAG_CLUSTER:
470
    retval = kind, ""
471
  elif kind in (constants.TAG_NODEGROUP,
472
                constants.TAG_NODE,
473
                constants.TAG_NETWORK,
474
                constants.TAG_INSTANCE):
475
    if not args:
476
      raise errors.OpPrereqError("no arguments passed to the command",
477
                                 errors.ECODE_INVAL)
478
    name = args.pop(0)
479
    retval = kind, name
480
  else:
481
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
482
  return retval
483

    
484

    
485
def _ExtendTags(opts, args):
486
  """Extend the args if a source file has been given.
487

488
  This function will extend the tags with the contents of the file
489
  passed in the 'tags_source' attribute of the opts parameter. A file
490
  named '-' will be replaced by stdin.
491

492
  """
493
  fname = opts.tags_source
494
  if fname is None:
495
    return
496
  if fname == "-":
497
    new_fh = sys.stdin
498
  else:
499
    new_fh = open(fname, "r")
500
  new_data = []
501
  try:
502
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
503
    # because of python bug 1633941
504
    while True:
505
      line = new_fh.readline()
506
      if not line:
507
        break
508
      new_data.append(line.strip())
509
  finally:
510
    new_fh.close()
511
  args.extend(new_data)
512

    
513

    
514
def ListTags(opts, args):
515
  """List the tags on a given object.
516

517
  This is a generic implementation that knows how to deal with all
518
  three cases of tag objects (cluster, node, instance). The opts
519
  argument is expected to contain a tag_type field denoting what
520
  object type we work on.
521

522
  """
523
  kind, name = _ExtractTagsObject(opts, args)
524
  cl = GetClient(query=True)
525
  result = cl.QueryTags(kind, name)
526
  result = list(result)
527
  result.sort()
528
  for tag in result:
529
    ToStdout(tag)
530

    
531

    
532
def AddTags(opts, args):
533
  """Add tags on a given object.
534

535
  This is a generic implementation that knows how to deal with all
536
  three cases of tag objects (cluster, node, instance). The opts
537
  argument is expected to contain a tag_type field denoting what
538
  object type we work on.
539

540
  """
541
  kind, name = _ExtractTagsObject(opts, args)
542
  _ExtendTags(opts, args)
543
  if not args:
544
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
545
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
546
  SubmitOrSend(op, opts)
547

    
548

    
549
def RemoveTags(opts, args):
550
  """Remove tags from a given object.
551

552
  This is a generic implementation that knows how to deal with all
553
  three cases of tag objects (cluster, node, instance). The opts
554
  argument is expected to contain a tag_type field denoting what
555
  object type we work on.
556

557
  """
558
  kind, name = _ExtractTagsObject(opts, args)
559
  _ExtendTags(opts, args)
560
  if not args:
561
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
562
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
563
  SubmitOrSend(op, opts)
564

    
565

    
566
def check_unit(option, opt, value): # pylint: disable=W0613
567
  """OptParsers custom converter for units.
568

569
  """
570
  try:
571
    return utils.ParseUnit(value)
572
  except errors.UnitParseError, err:
573
    raise OptionValueError("option %s: %s" % (opt, err))
574

    
575

    
576
def _SplitKeyVal(opt, data, parse_prefixes):
577
  """Convert a KeyVal string into a dict.
578

579
  This function will convert a key=val[,...] string into a dict. Empty
580
  values will be converted specially: keys which have the prefix 'no_'
581
  will have the value=False and the prefix stripped, keys with the prefix
582
  "-" will have value=None and the prefix stripped, and the others will
583
  have value=True.
584

585
  @type opt: string
586
  @param opt: a string holding the option name for which we process the
587
      data, used in building error messages
588
  @type data: string
589
  @param data: a string of the format key=val,key=val,...
590
  @type parse_prefixes: bool
591
  @param parse_prefixes: whether to handle prefixes specially
592
  @rtype: dict
593
  @return: {key=val, key=val}
594
  @raises errors.ParameterError: if there are duplicate keys
595

596
  """
597
  kv_dict = {}
598
  if data:
599
    for elem in utils.UnescapeAndSplit(data, sep=","):
600
      if "=" in elem:
601
        key, val = elem.split("=", 1)
602
      elif parse_prefixes:
603
        if elem.startswith(NO_PREFIX):
604
          key, val = elem[len(NO_PREFIX):], False
605
        elif elem.startswith(UN_PREFIX):
606
          key, val = elem[len(UN_PREFIX):], None
607
        else:
608
          key, val = elem, True
609
      else:
610
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
611
                                    (elem, opt))
612
      if key in kv_dict:
613
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
614
                                    (key, opt))
615
      kv_dict[key] = val
616
  return kv_dict
617

    
618

    
619
def _SplitIdentKeyVal(opt, value, parse_prefixes):
620
  """Helper function to parse "ident:key=val,key=val" options.
621

622
  @type opt: string
623
  @param opt: option name, used in error messages
624
  @type value: string
625
  @param value: expected to be in the format "ident:key=val,key=val,..."
626
  @type parse_prefixes: bool
627
  @param parse_prefixes: whether to handle prefixes specially (see
628
      L{_SplitKeyVal})
629
  @rtype: tuple
630
  @return: (ident, {key=val, key=val})
631
  @raises errors.ParameterError: in case of duplicates or other parsing errors
632

633
  """
634
  if ":" not in value:
635
    ident, rest = value, ""
636
  else:
637
    ident, rest = value.split(":", 1)
638

    
639
  if parse_prefixes and ident.startswith(NO_PREFIX):
640
    if rest:
641
      msg = "Cannot pass options when removing parameter groups: %s" % value
642
      raise errors.ParameterError(msg)
643
    retval = (ident[len(NO_PREFIX):], False)
644
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
645
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
646
    if rest:
647
      msg = "Cannot pass options when removing parameter groups: %s" % value
648
      raise errors.ParameterError(msg)
649
    retval = (ident[len(UN_PREFIX):], None)
650
  else:
651
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
652
    retval = (ident, kv_dict)
653
  return retval
654

    
655

    
656
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
657
  """Custom parser for ident:key=val,key=val options.
658

659
  This will store the parsed values as a tuple (ident, {key: val}). As such,
660
  multiple uses of this option via action=append is possible.
661

662
  """
663
  return _SplitIdentKeyVal(opt, value, True)
664

    
665

    
666
def check_key_val(option, opt, value):  # pylint: disable=W0613
667
  """Custom parser class for key=val,key=val options.
668

669
  This will store the parsed values as a dict {key: val}.
670

671
  """
672
  return _SplitKeyVal(opt, value, True)
673

    
674

    
675
def _SplitListKeyVal(opt, value):
676
  retval = {}
677
  for elem in value.split("/"):
678
    if not elem:
679
      raise errors.ParameterError("Empty section in option '%s'" % opt)
680
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
681
    if ident in retval:
682
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
683
             (ident, opt, elem))
684
      raise errors.ParameterError(msg)
685
    retval[ident] = valdict
686
  return retval
687

    
688

    
689
def check_multilist_ident_key_val(_, opt, value):
690
  """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
691

692
  @rtype: list of dictionary
693
  @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
694

695
  """
696
  retval = []
697
  for line in value.split("//"):
698
    retval.append(_SplitListKeyVal(opt, line))
699
  return retval
700

    
701

    
702
def check_bool(option, opt, value): # pylint: disable=W0613
703
  """Custom parser for yes/no options.
704

705
  This will store the parsed value as either True or False.
706

707
  """
708
  value = value.lower()
709
  if value == constants.VALUE_FALSE or value == "no":
710
    return False
711
  elif value == constants.VALUE_TRUE or value == "yes":
712
    return True
713
  else:
714
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
715

    
716

    
717
def check_list(option, opt, value): # pylint: disable=W0613
718
  """Custom parser for comma-separated lists.
719

720
  """
721
  # we have to make this explicit check since "".split(",") is [""],
722
  # not an empty list :(
723
  if not value:
724
    return []
725
  else:
726
    return utils.UnescapeAndSplit(value)
727

    
728

    
729
def check_maybefloat(option, opt, value): # pylint: disable=W0613
730
  """Custom parser for float numbers which might be also defaults.
731

732
  """
733
  value = value.lower()
734

    
735
  if value == constants.VALUE_DEFAULT:
736
    return value
737
  else:
738
    return float(value)
739

    
740

    
741
# completion_suggestion is normally a list. Using numeric values not evaluating
742
# to False for dynamic completion.
743
(OPT_COMPL_MANY_NODES,
744
 OPT_COMPL_ONE_NODE,
745
 OPT_COMPL_ONE_INSTANCE,
746
 OPT_COMPL_ONE_OS,
747
 OPT_COMPL_ONE_EXTSTORAGE,
748
 OPT_COMPL_ONE_IALLOCATOR,
749
 OPT_COMPL_ONE_NETWORK,
750
 OPT_COMPL_INST_ADD_NODES,
751
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
752

    
753
OPT_COMPL_ALL = compat.UniqueFrozenset([
754
  OPT_COMPL_MANY_NODES,
755
  OPT_COMPL_ONE_NODE,
756
  OPT_COMPL_ONE_INSTANCE,
757
  OPT_COMPL_ONE_OS,
758
  OPT_COMPL_ONE_EXTSTORAGE,
759
  OPT_COMPL_ONE_IALLOCATOR,
760
  OPT_COMPL_ONE_NETWORK,
761
  OPT_COMPL_INST_ADD_NODES,
762
  OPT_COMPL_ONE_NODEGROUP,
763
  ])
764

    
765

    
766
class CliOption(Option):
767
  """Custom option class for optparse.
768

769
  """
770
  ATTRS = Option.ATTRS + [
771
    "completion_suggest",
772
    ]
773
  TYPES = Option.TYPES + (
774
    "multilistidentkeyval",
775
    "identkeyval",
776
    "keyval",
777
    "unit",
778
    "bool",
779
    "list",
780
    "maybefloat",
781
    )
782
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
783
  TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
784
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
785
  TYPE_CHECKER["keyval"] = check_key_val
786
  TYPE_CHECKER["unit"] = check_unit
787
  TYPE_CHECKER["bool"] = check_bool
788
  TYPE_CHECKER["list"] = check_list
789
  TYPE_CHECKER["maybefloat"] = check_maybefloat
790

    
791

    
792
# optparse.py sets make_option, so we do it for our own option class, too
793
cli_option = CliOption
794

    
795

    
796
_YORNO = "yes|no"
797

    
798
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
799
                       help="Increase debugging level")
800

    
801
NOHDR_OPT = cli_option("--no-headers", default=False,
802
                       action="store_true", dest="no_headers",
803
                       help="Don't display column headers")
804

    
805
SEP_OPT = cli_option("--separator", default=None,
806
                     action="store", dest="separator",
807
                     help=("Separator between output fields"
808
                           " (defaults to one space)"))
809

    
810
USEUNITS_OPT = cli_option("--units", default=None,
811
                          dest="units", choices=("h", "m", "g", "t"),
812
                          help="Specify units for output (one of h/m/g/t)")
813

    
814
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
815
                        type="string", metavar="FIELDS",
816
                        help="Comma separated list of output fields")
817

    
818
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
819
                       default=False, help="Force the operation")
820

    
821
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
822
                         default=False, help="Do not require confirmation")
823

    
824
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
825
                                  action="store_true", default=False,
826
                                  help=("Ignore offline nodes and do as much"
827
                                        " as possible"))
828

    
829
TAG_ADD_OPT = cli_option("--tags", dest="tags",
830
                         default=None, help="Comma-separated list of instance"
831
                                            " tags")
832

    
833
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
834
                         default=None, help="File with tag names")
835

    
836
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
837
                        default=False, action="store_true",
838
                        help=("Submit the job and return the job ID, but"
839
                              " don't wait for the job to finish"))
840

    
841
PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
842
                             default=False, action="store_true",
843
                             help=("Additionally print the job as first line"
844
                                   " on stdout (for scripting)."))
845

    
846
SYNC_OPT = cli_option("--sync", dest="do_locking",
847
                      default=False, action="store_true",
848
                      help=("Grab locks while doing the queries"
849
                            " in order to ensure more consistent results"))
850

    
851
DRY_RUN_OPT = cli_option("--dry-run", default=False,
852
                         action="store_true",
853
                         help=("Do not execute the operation, just run the"
854
                               " check steps and verify if it could be"
855
                               " executed"))
856

    
857
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
858
                         action="store_true",
859
                         help="Increase the verbosity of the operation")
860

    
861
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
862
                              action="store_true", dest="simulate_errors",
863
                              help="Debugging option that makes the operation"
864
                              " treat most runtime checks as failed")
865

    
866
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
867
                        default=True, action="store_false",
868
                        help="Don't wait for sync (DANGEROUS!)")
869

    
870
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
871
                        default=False, action="store_true",
872
                        help="Wait for disks to sync")
873

    
874
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
875
                             action="store_true", default=False,
876
                             help="Enable offline instance")
877

    
878
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
879
                              action="store_true", default=False,
880
                              help="Disable down instance")
881

    
882
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
883
                               help=("Custom disk setup (%s)" %
884
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
885
                               default=None, metavar="TEMPL",
886
                               choices=list(constants.DISK_TEMPLATES))
887

    
888
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
889
                        help="Do not create any network cards for"
890
                        " the instance")
891

    
892
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
893
                               help="Relative path under default cluster-wide"
894
                               " file storage dir to store file-based disks",
895
                               default=None, metavar="<DIR>")
896

    
897
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
898
                                  help="Driver to use for image files",
899
                                  default=None, metavar="<DRIVER>",
900
                                  choices=list(constants.FILE_DRIVER))
901

    
902
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
903
                            help="Select nodes for the instance automatically"
904
                            " using the <NAME> iallocator plugin",
905
                            default=None, type="string",
906
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
907

    
908
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
909
                                    metavar="<NAME>",
910
                                    help="Set the default instance"
911
                                    " allocator plugin",
912
                                    default=None, type="string",
913
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
914

    
915
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
916
                    metavar="<os>",
917
                    completion_suggest=OPT_COMPL_ONE_OS)
918

    
919
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
920
                          type="keyval", default={},
921
                          help="OS parameters")
922

    
923
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
924
                               action="store_true", default=False,
925
                               help="Force an unknown variant")
926

    
927
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
928
                            action="store_true", default=False,
929
                            help="Do not install the OS (will"
930
                            " enable no-start)")
931

    
932
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
933
                                dest="allow_runtime_chgs",
934
                                default=True, action="store_false",
935
                                help="Don't allow runtime changes")
936

    
937
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
938
                         type="keyval", default={},
939
                         help="Backend parameters")
940

    
941
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
942
                        default={}, dest="hvparams",
943
                        help="Hypervisor parameters")
944

    
945
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
946
                             help="Disk template parameters, in the format"
947
                             " template:option=value,option=value,...",
948
                             type="identkeyval", action="append", default=[])
949

    
950
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
951
                                 type="keyval", default={},
952
                                 help="Memory size specs: list of key=value,"
953
                                " where key is one of min, max, std"
954
                                 " (in MB or using a unit)")
955

    
956
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
957
                                 type="keyval", default={},
958
                                 help="CPU count specs: list of key=value,"
959
                                 " where key is one of min, max, std")
960

    
961
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
962
                                  dest="ispecs_disk_count",
963
                                  type="keyval", default={},
964
                                  help="Disk count specs: list of key=value,"
965
                                  " where key is one of min, max, std")
966

    
967
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
968
                                 type="keyval", default={},
969
                                 help="Disk size specs: list of key=value,"
970
                                 " where key is one of min, max, std"
971
                                 " (in MB or using a unit)")
972

    
973
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
974
                                 type="keyval", default={},
975
                                 help="NIC count specs: list of key=value,"
976
                                 " where key is one of min, max, std")
977

    
978
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
979
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
980
                                      dest="ipolicy_bounds_specs",
981
                                      type="multilistidentkeyval", default=None,
982
                                      help="Complete instance specs limits")
983

    
984
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
985
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
986
                                   dest="ipolicy_std_specs",
987
                                   type="keyval", default=None,
988
                                   help="Complte standard instance specs")
989

    
990
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
991
                                    dest="ipolicy_disk_templates",
992
                                    type="list", default=None,
993
                                    help="Comma-separated list of"
994
                                    " enabled disk templates")
995

    
996
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
997
                                 dest="ipolicy_vcpu_ratio",
998
                                 type="maybefloat", default=None,
999
                                 help="The maximum allowed vcpu-to-cpu ratio")
1000

    
1001
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
1002
                                   dest="ipolicy_spindle_ratio",
1003
                                   type="maybefloat", default=None,
1004
                                   help=("The maximum allowed instances to"
1005
                                         " spindle ratio"))
1006

    
1007
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1008
                            help="Hypervisor and hypervisor options, in the"
1009
                            " format hypervisor:option=value,option=value,...",
1010
                            default=None, type="identkeyval")
1011

    
1012
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1013
                        help="Hypervisor and hypervisor options, in the"
1014
                        " format hypervisor:option=value,option=value,...",
1015
                        default=[], action="append", type="identkeyval")
1016

    
1017
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1018
                           action="store_false",
1019
                           help="Don't check that the instance's IP"
1020
                           " is alive")
1021

    
1022
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1023
                             default=True, action="store_false",
1024
                             help="Don't check that the instance's name"
1025
                             " is resolvable")
1026

    
1027
NET_OPT = cli_option("--net",
1028
                     help="NIC parameters", default=[],
1029
                     dest="nics", action="append", type="identkeyval")
1030

    
1031
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1032
                      dest="disks", action="append", type="identkeyval")
1033

    
1034
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1035
                         help="Comma-separated list of disks"
1036
                         " indices to act on (e.g. 0,2) (optional,"
1037
                         " defaults to all disks)")
1038

    
1039
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1040
                         help="Enforces a single-disk configuration using the"
1041
                         " given disk size, in MiB unless a suffix is used",
1042
                         default=None, type="unit", metavar="<size>")
1043

    
1044
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1045
                                dest="ignore_consistency",
1046
                                action="store_true", default=False,
1047
                                help="Ignore the consistency of the disks on"
1048
                                " the secondary")
1049

    
1050
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1051
                                dest="allow_failover",
1052
                                action="store_true", default=False,
1053
                                help="If migration is not possible fallback to"
1054
                                     " failover")
1055

    
1056
NONLIVE_OPT = cli_option("--non-live", dest="live",
1057
                         default=True, action="store_false",
1058
                         help="Do a non-live migration (this usually means"
1059
                         " freeze the instance, save the state, transfer and"
1060
                         " only then resume running on the secondary node)")
1061

    
1062
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1063
                                default=None,
1064
                                choices=list(constants.HT_MIGRATION_MODES),
1065
                                help="Override default migration mode (choose"
1066
                                " either live or non-live")
1067

    
1068
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1069
                                help="Target node and optional secondary node",
1070
                                metavar="<pnode>[:<snode>]",
1071
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1072

    
1073
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1074
                           action="append", metavar="<node>",
1075
                           help="Use only this node (can be used multiple"
1076
                           " times, if not given defaults to all nodes)",
1077
                           completion_suggest=OPT_COMPL_ONE_NODE)
1078

    
1079
NODEGROUP_OPT_NAME = "--node-group"
1080
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1081
                           dest="nodegroup",
1082
                           help="Node group (name or uuid)",
1083
                           metavar="<nodegroup>",
1084
                           default=None, type="string",
1085
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1086

    
1087
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1088
                             metavar="<node>",
1089
                             completion_suggest=OPT_COMPL_ONE_NODE)
1090

    
1091
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1092
                         action="store_false",
1093
                         help="Don't start the instance after creation")
1094

    
1095
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1096
                         action="store_true", default=False,
1097
                         help="Show command instead of executing it")
1098

    
1099
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1100
                         default=False, action="store_true",
1101
                         help="Instead of performing the migration/failover,"
1102
                         " try to recover from a failed cleanup. This is safe"
1103
                         " to run even if the instance is healthy, but it"
1104
                         " will create extra replication traffic and "
1105
                         " disrupt briefly the replication (like during the"
1106
                         " migration/failover")
1107

    
1108
STATIC_OPT = cli_option("-s", "--static", dest="static",
1109
                        action="store_true", default=False,
1110
                        help="Only show configuration data, not runtime data")
1111

    
1112
ALL_OPT = cli_option("--all", dest="show_all",
1113
                     default=False, action="store_true",
1114
                     help="Show info on all instances on the cluster."
1115
                     " This can take a long time to run, use wisely")
1116

    
1117
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1118
                           action="store_true", default=False,
1119
                           help="Interactive OS reinstall, lists available"
1120
                           " OS templates for selection")
1121

    
1122
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1123
                                 action="store_true", default=False,
1124
                                 help="Remove the instance from the cluster"
1125
                                 " configuration even if there are failures"
1126
                                 " during the removal process")
1127

    
1128
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1129
                                        dest="ignore_remove_failures",
1130
                                        action="store_true", default=False,
1131
                                        help="Remove the instance from the"
1132
                                        " cluster configuration even if there"
1133
                                        " are failures during the removal"
1134
                                        " process")
1135

    
1136
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1137
                                 action="store_true", default=False,
1138
                                 help="Remove the instance from the cluster")
1139

    
1140
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1141
                               help="Specifies the new node for the instance",
1142
                               metavar="NODE", default=None,
1143
                               completion_suggest=OPT_COMPL_ONE_NODE)
1144

    
1145
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1146
                               help="Specifies the new secondary node",
1147
                               metavar="NODE", default=None,
1148
                               completion_suggest=OPT_COMPL_ONE_NODE)
1149

    
1150
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1151
                             help="Specifies the new primary node",
1152
                             metavar="<node>", default=None,
1153
                             completion_suggest=OPT_COMPL_ONE_NODE)
1154

    
1155
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1156
                            default=False, action="store_true",
1157
                            help="Replace the disk(s) on the primary"
1158
                                 " node (applies only to internally mirrored"
1159
                                 " disk templates, e.g. %s)" %
1160
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1161

    
1162
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1163
                              default=False, action="store_true",
1164
                              help="Replace the disk(s) on the secondary"
1165
                                   " node (applies only to internally mirrored"
1166
                                   " disk templates, e.g. %s)" %
1167
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1168

    
1169
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1170
                              default=False, action="store_true",
1171
                              help="Lock all nodes and auto-promote as needed"
1172
                              " to MC status")
1173

    
1174
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1175
                              default=False, action="store_true",
1176
                              help="Automatically replace faulty disks"
1177
                                   " (applies only to internally mirrored"
1178
                                   " disk templates, e.g. %s)" %
1179
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1180

    
1181
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1182
                             default=False, action="store_true",
1183
                             help="Ignore current recorded size"
1184
                             " (useful for forcing activation when"
1185
                             " the recorded size is wrong)")
1186

    
1187
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1188
                          metavar="<node>",
1189
                          completion_suggest=OPT_COMPL_ONE_NODE)
1190

    
1191
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1192
                         metavar="<dir>")
1193

    
1194
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1195
                              help="Specify the secondary ip for the node",
1196
                              metavar="ADDRESS", default=None)
1197

    
1198
READD_OPT = cli_option("--readd", dest="readd",
1199
                       default=False, action="store_true",
1200
                       help="Readd old node after replacing it")
1201

    
1202
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1203
                                default=True, action="store_false",
1204
                                help="Disable SSH key fingerprint checking")
1205

    
1206
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1207
                                 default=False, action="store_true",
1208
                                 help="Force the joining of a node")
1209

    
1210
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1211
                    type="bool", default=None, metavar=_YORNO,
1212
                    help="Set the master_candidate flag on the node")
1213

    
1214
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1215
                         type="bool", default=None,
1216
                         help=("Set the offline flag on the node"
1217
                               " (cluster does not communicate with offline"
1218
                               " nodes)"))
1219

    
1220
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1221
                         type="bool", default=None,
1222
                         help=("Set the drained flag on the node"
1223
                               " (excluded from allocation operations)"))
1224

    
1225
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1226
                              type="bool", default=None, metavar=_YORNO,
1227
                              help="Set the master_capable flag on the node")
1228

    
1229
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1230
                          type="bool", default=None, metavar=_YORNO,
1231
                          help="Set the vm_capable flag on the node")
1232

    
1233
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1234
                             type="bool", default=None, metavar=_YORNO,
1235
                             help="Set the allocatable flag on a volume")
1236

    
1237
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1238
                               help="Disable support for lvm based instances"
1239
                               " (cluster-wide)",
1240
                               action="store_false", default=True)
1241

    
1242
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1243
                            dest="enabled_hypervisors",
1244
                            help="Comma-separated list of hypervisors",
1245
                            type="string", default=None)
1246

    
1247
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1248
                                        dest="enabled_disk_templates",
1249
                                        help="Comma-separated list of "
1250
                                             "disk templates",
1251
                                        type="string", default=None)
1252

    
1253
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1254
                            type="keyval", default={},
1255
                            help="NIC parameters")
1256

    
1257
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1258
                         dest="candidate_pool_size", type="int",
1259
                         help="Set the candidate pool size")
1260

    
1261
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1262
                         help=("Enables LVM and specifies the volume group"
1263
                               " name (cluster-wide) for disk allocation"
1264
                               " [%s]" % constants.DEFAULT_VG),
1265
                         metavar="VG", default=None)
1266

    
1267
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1268
                          help="Destroy cluster", action="store_true")
1269

    
1270
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1271
                          help="Skip node agreement check (dangerous)",
1272
                          action="store_true", default=False)
1273

    
1274
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1275
                            help="Specify the mac prefix for the instance IP"
1276
                            " addresses, in the format XX:XX:XX",
1277
                            metavar="PREFIX",
1278
                            default=None)
1279

    
1280
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1281
                               help="Specify the node interface (cluster-wide)"
1282
                               " on which the master IP address will be added"
1283
                               " (cluster init default: %s)" %
1284
                               constants.DEFAULT_BRIDGE,
1285
                               metavar="NETDEV",
1286
                               default=None)
1287

    
1288
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1289
                                help="Specify the netmask of the master IP",
1290
                                metavar="NETMASK",
1291
                                default=None)
1292

    
1293
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1294
                                     dest="use_external_mip_script",
1295
                                     help="Specify whether to run a"
1296
                                     " user-provided script for the master"
1297
                                     " IP address turnup and"
1298
                                     " turndown operations",
1299
                                     type="bool", metavar=_YORNO, default=None)
1300

    
1301
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1302
                                help="Specify the default directory (cluster-"
1303
                                "wide) for storing the file-based disks [%s]" %
1304
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1305
                                metavar="DIR",
1306
                                default=None)
1307

    
1308
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1309
  "--shared-file-storage-dir",
1310
  dest="shared_file_storage_dir",
1311
  help="Specify the default directory (cluster-wide) for storing the"
1312
  " shared file-based disks [%s]" %
1313
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1314
  metavar="SHAREDDIR", default=None)
1315

    
1316
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1317
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1318
                                   action="store_false", default=True)
1319

    
1320
MODIFY_ETCHOSTS_OPT = \
1321
 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1322
            default=None, type="bool",
1323
            help="Defines whether the cluster should autonomously modify"
1324
            " and keep in sync the /etc/hosts file of the nodes")
1325

    
1326
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1327
                                    help="Don't initialize SSH keys",
1328
                                    action="store_false", default=True)
1329

    
1330
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1331
                             help="Enable parseable error messages",
1332
                             action="store_true", default=False)
1333

    
1334
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1335
                          help="Skip N+1 memory redundancy tests",
1336
                          action="store_true", default=False)
1337

    
1338
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1339
                             help="Type of reboot: soft/hard/full",
1340
                             default=constants.INSTANCE_REBOOT_HARD,
1341
                             metavar="<REBOOT>",
1342
                             choices=list(constants.REBOOT_TYPES))
1343

    
1344
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1345
                                    dest="ignore_secondaries",
1346
                                    default=False, action="store_true",
1347
                                    help="Ignore errors from secondaries")
1348

    
1349
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1350
                            action="store_false", default=True,
1351
                            help="Don't shutdown the instance (unsafe)")
1352

    
1353
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1354
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1355
                         help="Maximum time to wait")
1356

    
1357
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1358
                                  dest="shutdown_timeout", type="int",
1359
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1360
                                  help="Maximum time to wait for instance"
1361
                                  " shutdown")
1362

    
1363
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1364
                          default=None,
1365
                          help=("Number of seconds between repetions of the"
1366
                                " command"))
1367

    
1368
EARLY_RELEASE_OPT = cli_option("--early-release",
1369
                               dest="early_release", default=False,
1370
                               action="store_true",
1371
                               help="Release the locks on the secondary"
1372
                               " node(s) early")
1373

    
1374
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1375
                                  dest="new_cluster_cert",
1376
                                  default=False, action="store_true",
1377
                                  help="Generate a new cluster certificate")
1378

    
1379
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1380
                           default=None,
1381
                           help="File containing new RAPI certificate")
1382

    
1383
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1384
                               default=None, action="store_true",
1385
                               help=("Generate a new self-signed RAPI"
1386
                                     " certificate"))
1387

    
1388
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1389
                            default=None,
1390
                            help="File containing new SPICE certificate")
1391

    
1392
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1393
                              default=None,
1394
                              help="File containing the certificate of the CA"
1395
                              " which signed the SPICE certificate")
1396

    
1397
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1398
                                dest="new_spice_cert", default=None,
1399
                                action="store_true",
1400
                                help=("Generate a new self-signed SPICE"
1401
                                      " certificate"))
1402

    
1403
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1404
                                    dest="new_confd_hmac_key",
1405
                                    default=False, action="store_true",
1406
                                    help=("Create a new HMAC key for %s" %
1407
                                          constants.CONFD))
1408

    
1409
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1410
                                       dest="cluster_domain_secret",
1411
                                       default=None,
1412
                                       help=("Load new new cluster domain"
1413
                                             " secret from file"))
1414

    
1415
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1416
                                           dest="new_cluster_domain_secret",
1417
                                           default=False, action="store_true",
1418
                                           help=("Create a new cluster domain"
1419
                                                 " secret"))
1420

    
1421
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1422
                              dest="use_replication_network",
1423
                              help="Whether to use the replication network"
1424
                              " for talking to the nodes",
1425
                              action="store_true", default=False)
1426

    
1427
MAINTAIN_NODE_HEALTH_OPT = \
1428
    cli_option("--maintain-node-health", dest="maintain_node_health",
1429
               metavar=_YORNO, default=None, type="bool",
1430
               help="Configure the cluster to automatically maintain node"
1431
               " health, by shutting down unknown instances, shutting down"
1432
               " unknown DRBD devices, etc.")
1433

    
1434
IDENTIFY_DEFAULTS_OPT = \
1435
    cli_option("--identify-defaults", dest="identify_defaults",
1436
               default=False, action="store_true",
1437
               help="Identify which saved instance parameters are equal to"
1438
               " the current cluster defaults and set them as such, instead"
1439
               " of marking them as overridden")
1440

    
1441
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1442
                         action="store", dest="uid_pool",
1443
                         help=("A list of user-ids or user-id"
1444
                               " ranges separated by commas"))
1445

    
1446
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1447
                          action="store", dest="add_uids",
1448
                          help=("A list of user-ids or user-id"
1449
                                " ranges separated by commas, to be"
1450
                                " added to the user-id pool"))
1451

    
1452
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1453
                             action="store", dest="remove_uids",
1454
                             help=("A list of user-ids or user-id"
1455
                                   " ranges separated by commas, to be"
1456
                                   " removed from the user-id pool"))
1457

    
1458
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1459
                              action="store", dest="reserved_lvs",
1460
                              help=("A comma-separated list of reserved"
1461
                                    " logical volumes names, that will be"
1462
                                    " ignored by cluster verify"))
1463

    
1464
ROMAN_OPT = cli_option("--roman",
1465
                       dest="roman_integers", default=False,
1466
                       action="store_true",
1467
                       help="Use roman numbers for positive integers")
1468

    
1469
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1470
                             action="store", default=None,
1471
                             help="Specifies usermode helper for DRBD")
1472

    
1473
PRIMARY_IP_VERSION_OPT = \
1474
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1475
               action="store", dest="primary_ip_version",
1476
               metavar="%d|%d" % (constants.IP4_VERSION,
1477
                                  constants.IP6_VERSION),
1478
               help="Cluster-wide IP version for primary IP")
1479

    
1480
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1481
                              action="store_true",
1482
                              help="Show machine name for every line in output")
1483

    
1484
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1485
                              action="store_true",
1486
                              help=("Hide successful results and show failures"
1487
                                    " only (determined by the exit code)"))
1488

    
1489
REASON_OPT = cli_option("--reason", default=None,
1490
                        help="The reason for executing the command")
1491

    
1492

    
1493
def _PriorityOptionCb(option, _, value, parser):
1494
  """Callback for processing C{--priority} option.
1495

1496
  """
1497
  value = _PRIONAME_TO_VALUE[value]
1498

    
1499
  setattr(parser.values, option.dest, value)
1500

    
1501

    
1502
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1503
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1504
                          choices=_PRIONAME_TO_VALUE.keys(),
1505
                          action="callback", type="choice",
1506
                          callback=_PriorityOptionCb,
1507
                          help="Priority for opcode processing")
1508

    
1509
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1510
                        type="bool", default=None, metavar=_YORNO,
1511
                        help="Sets the hidden flag on the OS")
1512

    
1513
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1514
                        type="bool", default=None, metavar=_YORNO,
1515
                        help="Sets the blacklisted flag on the OS")
1516

    
1517
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1518
                                     type="bool", metavar=_YORNO,
1519
                                     dest="prealloc_wipe_disks",
1520
                                     help=("Wipe disks prior to instance"
1521
                                           " creation"))
1522

    
1523
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1524
                             type="keyval", default=None,
1525
                             help="Node parameters")
1526

    
1527
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1528
                              action="store", metavar="POLICY", default=None,
1529
                              help="Allocation policy for the node group")
1530

    
1531
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1532
                              type="bool", metavar=_YORNO,
1533
                              dest="node_powered",
1534
                              help="Specify if the SoR for node is powered")
1535

    
1536
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1537
                             default=constants.OOB_TIMEOUT,
1538
                             help="Maximum time to wait for out-of-band helper")
1539

    
1540
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1541
                             default=constants.OOB_POWER_DELAY,
1542
                             help="Time in seconds to wait between power-ons")
1543

    
1544
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1545
                              action="store_true", default=False,
1546
                              help=("Whether command argument should be treated"
1547
                                    " as filter"))
1548

    
1549
NO_REMEMBER_OPT = cli_option("--no-remember",
1550
                             dest="no_remember",
1551
                             action="store_true", default=False,
1552
                             help="Perform but do not record the change"
1553
                             " in the configuration")
1554

    
1555
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1556
                              default=False, action="store_true",
1557
                              help="Evacuate primary instances only")
1558

    
1559
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1560
                                default=False, action="store_true",
1561
                                help="Evacuate secondary instances only"
1562
                                     " (applies only to internally mirrored"
1563
                                     " disk templates, e.g. %s)" %
1564
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1565

    
1566
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1567
                                action="store_true", default=False,
1568
                                help="Pause instance at startup")
1569

    
1570
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1571
                          help="Destination node group (name or uuid)",
1572
                          default=None, action="append",
1573
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1574

    
1575
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1576
                               action="append", dest="ignore_errors",
1577
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1578
                               help="Error code to be ignored")
1579

    
1580
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1581
                            action="append",
1582
                            help=("Specify disk state information in the"
1583
                                  " format"
1584
                                  " storage_type/identifier:option=value,...;"
1585
                                  " note this is unused for now"),
1586
                            type="identkeyval")
1587

    
1588
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1589
                          action="append",
1590
                          help=("Specify hypervisor state information in the"
1591
                                " format hypervisor:option=value,...;"
1592
                                " note this is unused for now"),
1593
                          type="identkeyval")
1594

    
1595
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1596
                                action="store_true", default=False,
1597
                                help="Ignore instance policy violations")
1598

    
1599
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1600
                             help="Sets the instance's runtime memory,"
1601
                             " ballooning it up or down to the new value",
1602
                             default=None, type="unit", metavar="<size>")
1603

    
1604
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1605
                          action="store_true", default=False,
1606
                          help="Marks the grow as absolute instead of the"
1607
                          " (default) relative mode")
1608

    
1609
NETWORK_OPT = cli_option("--network",
1610
                         action="store", default=None, dest="network",
1611
                         help="IP network in CIDR notation")
1612

    
1613
GATEWAY_OPT = cli_option("--gateway",
1614
                         action="store", default=None, dest="gateway",
1615
                         help="IP address of the router (gateway)")
1616

    
1617
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1618
                                  action="store", default=None,
1619
                                  dest="add_reserved_ips",
1620
                                  help="Comma-separated list of"
1621
                                  " reserved IPs to add")
1622

    
1623
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1624
                                     action="store", default=None,
1625
                                     dest="remove_reserved_ips",
1626
                                     help="Comma-delimited list of"
1627
                                     " reserved IPs to remove")
1628

    
1629
NETWORK6_OPT = cli_option("--network6",
1630
                          action="store", default=None, dest="network6",
1631
                          help="IP network in CIDR notation")
1632

    
1633
GATEWAY6_OPT = cli_option("--gateway6",
1634
                          action="store", default=None, dest="gateway6",
1635
                          help="IP6 address of the router (gateway)")
1636

    
1637
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1638
                                  dest="conflicts_check",
1639
                                  default=True,
1640
                                  action="store_false",
1641
                                  help="Don't check for conflicting IPs")
1642

    
1643
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1644
                                 default=False, action="store_true",
1645
                                 help="Include default values")
1646

    
1647
HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1648
                         action="store_true", default=False,
1649
                         help="Hotplug supported devices (NICs and Disks)")
1650

    
1651
HOTPLUG_IF_POSSIBLE_OPT = cli_option("--hotplug-if-possible",
1652
                                     dest="hotplug_if_possible",
1653
                                     action="store_true", default=False,
1654
                                     help="Hotplug devices in case"
1655
                                          " hotplug is supported")
1656

    
1657
#: Options provided by all commands
1658
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1659

    
1660
# options related to asynchronous job handling
1661

    
1662
SUBMIT_OPTS = [
1663
  SUBMIT_OPT,
1664
  PRINT_JOBID_OPT,
1665
  ]
1666

    
1667
# common options for creating instances. add and import then add their own
1668
# specific ones.
1669
COMMON_CREATE_OPTS = [
1670
  BACKEND_OPT,
1671
  DISK_OPT,
1672
  DISK_TEMPLATE_OPT,
1673
  FILESTORE_DIR_OPT,
1674
  FILESTORE_DRIVER_OPT,
1675
  HYPERVISOR_OPT,
1676
  IALLOCATOR_OPT,
1677
  NET_OPT,
1678
  NODE_PLACEMENT_OPT,
1679
  NOIPCHECK_OPT,
1680
  NOCONFLICTSCHECK_OPT,
1681
  NONAMECHECK_OPT,
1682
  NONICS_OPT,
1683
  NWSYNC_OPT,
1684
  OSPARAMS_OPT,
1685
  OS_SIZE_OPT,
1686
  SUBMIT_OPT,
1687
  PRINT_JOBID_OPT,
1688
  TAG_ADD_OPT,
1689
  DRY_RUN_OPT,
1690
  PRIORITY_OPT,
1691
  ]
1692

    
1693
# common instance policy options
1694
INSTANCE_POLICY_OPTS = [
1695
  IPOLICY_BOUNDS_SPECS_OPT,
1696
  IPOLICY_DISK_TEMPLATES,
1697
  IPOLICY_VCPU_RATIO,
1698
  IPOLICY_SPINDLE_RATIO,
1699
  ]
1700

    
1701
# instance policy split specs options
1702
SPLIT_ISPECS_OPTS = [
1703
  SPECS_CPU_COUNT_OPT,
1704
  SPECS_DISK_COUNT_OPT,
1705
  SPECS_DISK_SIZE_OPT,
1706
  SPECS_MEM_SIZE_OPT,
1707
  SPECS_NIC_COUNT_OPT,
1708
  ]
1709

    
1710

    
1711
class _ShowUsage(Exception):
1712
  """Exception class for L{_ParseArgs}.
1713

1714
  """
1715
  def __init__(self, exit_error):
1716
    """Initializes instances of this class.
1717

1718
    @type exit_error: bool
1719
    @param exit_error: Whether to report failure on exit
1720

1721
    """
1722
    Exception.__init__(self)
1723
    self.exit_error = exit_error
1724

    
1725

    
1726
class _ShowVersion(Exception):
1727
  """Exception class for L{_ParseArgs}.
1728

1729
  """
1730

    
1731

    
1732
def _ParseArgs(binary, argv, commands, aliases, env_override):
1733
  """Parser for the command line arguments.
1734

1735
  This function parses the arguments and returns the function which
1736
  must be executed together with its (modified) arguments.
1737

1738
  @param binary: Script name
1739
  @param argv: Command line arguments
1740
  @param commands: Dictionary containing command definitions
1741
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1742
  @param env_override: list of env variables allowed for default args
1743
  @raise _ShowUsage: If usage description should be shown
1744
  @raise _ShowVersion: If version should be shown
1745

1746
  """
1747
  assert not (env_override - set(commands))
1748
  assert not (set(aliases.keys()) & set(commands.keys()))
1749

    
1750
  if len(argv) > 1:
1751
    cmd = argv[1]
1752
  else:
1753
    # No option or command given
1754
    raise _ShowUsage(exit_error=True)
1755

    
1756
  if cmd == "--version":
1757
    raise _ShowVersion()
1758
  elif cmd == "--help":
1759
    raise _ShowUsage(exit_error=False)
1760
  elif not (cmd in commands or cmd in aliases):
1761
    raise _ShowUsage(exit_error=True)
1762

    
1763
  # get command, unalias it, and look it up in commands
1764
  if cmd in aliases:
1765
    if aliases[cmd] not in commands:
1766
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1767
                                   " command '%s'" % (cmd, aliases[cmd]))
1768

    
1769
    cmd = aliases[cmd]
1770

    
1771
  if cmd in env_override:
1772
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1773
    env_args = os.environ.get(args_env_name)
1774
    if env_args:
1775
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1776

    
1777
  func, args_def, parser_opts, usage, description = commands[cmd]
1778
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1779
                        description=description,
1780
                        formatter=TitledHelpFormatter(),
1781
                        usage="%%prog %s %s" % (cmd, usage))
1782
  parser.disable_interspersed_args()
1783
  options, args = parser.parse_args(args=argv[2:])
1784

    
1785
  if not _CheckArguments(cmd, args_def, args):
1786
    return None, None, None
1787

    
1788
  return func, options, args
1789

    
1790

    
1791
def _FormatUsage(binary, commands):
1792
  """Generates a nice description of all commands.
1793

1794
  @param binary: Script name
1795
  @param commands: Dictionary containing command definitions
1796

1797
  """
1798
  # compute the max line length for cmd + usage
1799
  mlen = min(60, max(map(len, commands)))
1800

    
1801
  yield "Usage: %s {command} [options...] [argument...]" % binary
1802
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1803
  yield ""
1804
  yield "Commands:"
1805

    
1806
  # and format a nice command list
1807
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1808
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1809
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1810
    for line in help_lines:
1811
      yield " %-*s   %s" % (mlen, "", line)
1812

    
1813
  yield ""
1814

    
1815

    
1816
def _CheckArguments(cmd, args_def, args):
1817
  """Verifies the arguments using the argument definition.
1818

1819
  Algorithm:
1820

1821
    1. Abort with error if values specified by user but none expected.
1822

1823
    1. For each argument in definition
1824

1825
      1. Keep running count of minimum number of values (min_count)
1826
      1. Keep running count of maximum number of values (max_count)
1827
      1. If it has an unlimited number of values
1828

1829
        1. Abort with error if it's not the last argument in the definition
1830

1831
    1. If last argument has limited number of values
1832

1833
      1. Abort with error if number of values doesn't match or is too large
1834

1835
    1. Abort with error if user didn't pass enough values (min_count)
1836

1837
  """
1838
  if args and not args_def:
1839
    ToStderr("Error: Command %s expects no arguments", cmd)
1840
    return False
1841

    
1842
  min_count = None
1843
  max_count = None
1844
  check_max = None
1845

    
1846
  last_idx = len(args_def) - 1
1847

    
1848
  for idx, arg in enumerate(args_def):
1849
    if min_count is None:
1850
      min_count = arg.min
1851
    elif arg.min is not None:
1852
      min_count += arg.min
1853

    
1854
    if max_count is None:
1855
      max_count = arg.max
1856
    elif arg.max is not None:
1857
      max_count += arg.max
1858

    
1859
    if idx == last_idx:
1860
      check_max = (arg.max is not None)
1861

    
1862
    elif arg.max is None:
1863
      raise errors.ProgrammerError("Only the last argument can have max=None")
1864

    
1865
  if check_max:
1866
    # Command with exact number of arguments
1867
    if (min_count is not None and max_count is not None and
1868
        min_count == max_count and len(args) != min_count):
1869
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1870
      return False
1871

    
1872
    # Command with limited number of arguments
1873
    if max_count is not None and len(args) > max_count:
1874
      ToStderr("Error: Command %s expects only %d argument(s)",
1875
               cmd, max_count)
1876
      return False
1877

    
1878
  # Command with some required arguments
1879
  if min_count is not None and len(args) < min_count:
1880
    ToStderr("Error: Command %s expects at least %d argument(s)",
1881
             cmd, min_count)
1882
    return False
1883

    
1884
  return True
1885

    
1886

    
1887
def SplitNodeOption(value):
1888
  """Splits the value of a --node option.
1889

1890
  """
1891
  if value and ":" in value:
1892
    return value.split(":", 1)
1893
  else:
1894
    return (value, None)
1895

    
1896

    
1897
def CalculateOSNames(os_name, os_variants):
1898
  """Calculates all the names an OS can be called, according to its variants.
1899

1900
  @type os_name: string
1901
  @param os_name: base name of the os
1902
  @type os_variants: list or None
1903
  @param os_variants: list of supported variants
1904
  @rtype: list
1905
  @return: list of valid names
1906

1907
  """
1908
  if os_variants:
1909
    return ["%s+%s" % (os_name, v) for v in os_variants]
1910
  else:
1911
    return [os_name]
1912

    
1913

    
1914
def ParseFields(selected, default):
1915
  """Parses the values of "--field"-like options.
1916

1917
  @type selected: string or None
1918
  @param selected: User-selected options
1919
  @type default: list
1920
  @param default: Default fields
1921

1922
  """
1923
  if selected is None:
1924
    return default
1925

    
1926
  if selected.startswith("+"):
1927
    return default + selected[1:].split(",")
1928

    
1929
  return selected.split(",")
1930

    
1931

    
1932
UsesRPC = rpc.RunWithRPC
1933

    
1934

    
1935
def AskUser(text, choices=None):
1936
  """Ask the user a question.
1937

1938
  @param text: the question to ask
1939

1940
  @param choices: list with elements tuples (input_char, return_value,
1941
      description); if not given, it will default to: [('y', True,
1942
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1943
      note that the '?' char is reserved for help
1944

1945
  @return: one of the return values from the choices list; if input is
1946
      not possible (i.e. not running with a tty, we return the last
1947
      entry from the list
1948

1949
  """
1950
  if choices is None:
1951
    choices = [("y", True, "Perform the operation"),
1952
               ("n", False, "Do not perform the operation")]
1953
  if not choices or not isinstance(choices, list):
1954
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1955
  for entry in choices:
1956
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1957
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1958

    
1959
  answer = choices[-1][1]
1960
  new_text = []
1961
  for line in text.splitlines():
1962
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1963
  text = "\n".join(new_text)
1964
  try:
1965
    f = file("/dev/tty", "a+")
1966
  except IOError:
1967
    return answer
1968
  try:
1969
    chars = [entry[0] for entry in choices]
1970
    chars[-1] = "[%s]" % chars[-1]
1971
    chars.append("?")
1972
    maps = dict([(entry[0], entry[1]) for entry in choices])
1973
    while True:
1974
      f.write(text)
1975
      f.write("\n")
1976
      f.write("/".join(chars))
1977
      f.write(": ")
1978
      line = f.readline(2).strip().lower()
1979
      if line in maps:
1980
        answer = maps[line]
1981
        break
1982
      elif line == "?":
1983
        for entry in choices:
1984
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1985
        f.write("\n")
1986
        continue
1987
  finally:
1988
    f.close()
1989
  return answer
1990

    
1991

    
1992
class JobSubmittedException(Exception):
1993
  """Job was submitted, client should exit.
1994

1995
  This exception has one argument, the ID of the job that was
1996
  submitted. The handler should print this ID.
1997

1998
  This is not an error, just a structured way to exit from clients.
1999

2000
  """
2001

    
2002

    
2003
def SendJob(ops, cl=None):
2004
  """Function to submit an opcode without waiting for the results.
2005

2006
  @type ops: list
2007
  @param ops: list of opcodes
2008
  @type cl: luxi.Client
2009
  @param cl: the luxi client to use for communicating with the master;
2010
             if None, a new client will be created
2011

2012
  """
2013
  if cl is None:
2014
    cl = GetClient()
2015

    
2016
  job_id = cl.SubmitJob(ops)
2017

    
2018
  return job_id
2019

    
2020

    
2021
def GenericPollJob(job_id, cbs, report_cbs):
2022
  """Generic job-polling function.
2023

2024
  @type job_id: number
2025
  @param job_id: Job ID
2026
  @type cbs: Instance of L{JobPollCbBase}
2027
  @param cbs: Data callbacks
2028
  @type report_cbs: Instance of L{JobPollReportCbBase}
2029
  @param report_cbs: Reporting callbacks
2030

2031
  """
2032
  prev_job_info = None
2033
  prev_logmsg_serial = None
2034

    
2035
  status = None
2036

    
2037
  while True:
2038
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2039
                                      prev_logmsg_serial)
2040
    if not result:
2041
      # job not found, go away!
2042
      raise errors.JobLost("Job with id %s lost" % job_id)
2043

    
2044
    if result == constants.JOB_NOTCHANGED:
2045
      report_cbs.ReportNotChanged(job_id, status)
2046

    
2047
      # Wait again
2048
      continue
2049

    
2050
    # Split result, a tuple of (field values, log entries)
2051
    (job_info, log_entries) = result
2052
    (status, ) = job_info
2053

    
2054
    if log_entries:
2055
      for log_entry in log_entries:
2056
        (serial, timestamp, log_type, message) = log_entry
2057
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2058
                                    log_type, message)
2059
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2060

    
2061
    # TODO: Handle canceled and archived jobs
2062
    elif status in (constants.JOB_STATUS_SUCCESS,
2063
                    constants.JOB_STATUS_ERROR,
2064
                    constants.JOB_STATUS_CANCELING,
2065
                    constants.JOB_STATUS_CANCELED):
2066
      break
2067

    
2068
    prev_job_info = job_info
2069

    
2070
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2071
  if not jobs:
2072
    raise errors.JobLost("Job with id %s lost" % job_id)
2073

    
2074
  status, opstatus, result = jobs[0]
2075

    
2076
  if status == constants.JOB_STATUS_SUCCESS:
2077
    return result
2078

    
2079
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2080
    raise errors.OpExecError("Job was canceled")
2081

    
2082
  has_ok = False
2083
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2084
    if status == constants.OP_STATUS_SUCCESS:
2085
      has_ok = True
2086
    elif status == constants.OP_STATUS_ERROR:
2087
      errors.MaybeRaise(msg)
2088

    
2089
      if has_ok:
2090
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2091
                                 (idx, msg))
2092

    
2093
      raise errors.OpExecError(str(msg))
2094

    
2095
  # default failure mode
2096
  raise errors.OpExecError(result)
2097

    
2098

    
2099
class JobPollCbBase:
2100
  """Base class for L{GenericPollJob} callbacks.
2101

2102
  """
2103
  def __init__(self):
2104
    """Initializes this class.
2105

2106
    """
2107

    
2108
  def WaitForJobChangeOnce(self, job_id, fields,
2109
                           prev_job_info, prev_log_serial):
2110
    """Waits for changes on a job.
2111

2112
    """
2113
    raise NotImplementedError()
2114

    
2115
  def QueryJobs(self, job_ids, fields):
2116
    """Returns the selected fields for the selected job IDs.
2117

2118
    @type job_ids: list of numbers
2119
    @param job_ids: Job IDs
2120
    @type fields: list of strings
2121
    @param fields: Fields
2122

2123
    """
2124
    raise NotImplementedError()
2125

    
2126

    
2127
class JobPollReportCbBase:
2128
  """Base class for L{GenericPollJob} reporting callbacks.
2129

2130
  """
2131
  def __init__(self):
2132
    """Initializes this class.
2133

2134
    """
2135

    
2136
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2137
    """Handles a log message.
2138

2139
    """
2140
    raise NotImplementedError()
2141

    
2142
  def ReportNotChanged(self, job_id, status):
2143
    """Called for if a job hasn't changed in a while.
2144

2145
    @type job_id: number
2146
    @param job_id: Job ID
2147
    @type status: string or None
2148
    @param status: Job status if available
2149

2150
    """
2151
    raise NotImplementedError()
2152

    
2153

    
2154
class _LuxiJobPollCb(JobPollCbBase):
2155
  def __init__(self, cl):
2156
    """Initializes this class.
2157

2158
    """
2159
    JobPollCbBase.__init__(self)
2160
    self.cl = cl
2161

    
2162
  def WaitForJobChangeOnce(self, job_id, fields,
2163
                           prev_job_info, prev_log_serial):
2164
    """Waits for changes on a job.
2165

2166
    """
2167
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2168
                                        prev_job_info, prev_log_serial)
2169

    
2170
  def QueryJobs(self, job_ids, fields):
2171
    """Returns the selected fields for the selected job IDs.
2172

2173
    """
2174
    return self.cl.QueryJobs(job_ids, fields)
2175

    
2176

    
2177
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2178
  def __init__(self, feedback_fn):
2179
    """Initializes this class.
2180

2181
    """
2182
    JobPollReportCbBase.__init__(self)
2183

    
2184
    self.feedback_fn = feedback_fn
2185

    
2186
    assert callable(feedback_fn)
2187

    
2188
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2189
    """Handles a log message.
2190

2191
    """
2192
    self.feedback_fn((timestamp, log_type, log_msg))
2193

    
2194
  def ReportNotChanged(self, job_id, status):
2195
    """Called if a job hasn't changed in a while.
2196

2197
    """
2198
    # Ignore
2199

    
2200

    
2201
class StdioJobPollReportCb(JobPollReportCbBase):
2202
  def __init__(self):
2203
    """Initializes this class.
2204

2205
    """
2206
    JobPollReportCbBase.__init__(self)
2207

    
2208
    self.notified_queued = False
2209
    self.notified_waitlock = False
2210

    
2211
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2212
    """Handles a log message.
2213

2214
    """
2215
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2216
             FormatLogMessage(log_type, log_msg))
2217

    
2218
  def ReportNotChanged(self, job_id, status):
2219
    """Called if a job hasn't changed in a while.
2220

2221
    """
2222
    if status is None:
2223
      return
2224

    
2225
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2226
      ToStderr("Job %s is waiting in queue", job_id)
2227
      self.notified_queued = True
2228

    
2229
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2230
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2231
      self.notified_waitlock = True
2232

    
2233

    
2234
def FormatLogMessage(log_type, log_msg):
2235
  """Formats a job message according to its type.
2236

2237
  """
2238
  if log_type != constants.ELOG_MESSAGE:
2239
    log_msg = str(log_msg)
2240

    
2241
  return utils.SafeEncode(log_msg)
2242

    
2243

    
2244
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2245
  """Function to poll for the result of a job.
2246

2247
  @type job_id: job identified
2248
  @param job_id: the job to poll for results
2249
  @type cl: luxi.Client
2250
  @param cl: the luxi client to use for communicating with the master;
2251
             if None, a new client will be created
2252

2253
  """
2254
  if cl is None:
2255
    cl = GetClient()
2256

    
2257
  if reporter is None:
2258
    if feedback_fn:
2259
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2260
    else:
2261
      reporter = StdioJobPollReportCb()
2262
  elif feedback_fn:
2263
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2264

    
2265
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2266

    
2267

    
2268
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2269
  """Legacy function to submit an opcode.
2270

2271
  This is just a simple wrapper over the construction of the processor
2272
  instance. It should be extended to better handle feedback and
2273
  interaction functions.
2274

2275
  """
2276
  if cl is None:
2277
    cl = GetClient()
2278

    
2279
  SetGenericOpcodeOpts([op], opts)
2280

    
2281
  job_id = SendJob([op], cl=cl)
2282
  if hasattr(opts, "print_jobid") and opts.print_jobid:
2283
    ToStdout("%d" % job_id)
2284

    
2285
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2286
                       reporter=reporter)
2287

    
2288
  return op_results[0]
2289

    
2290

    
2291
def SubmitOpCodeToDrainedQueue(op):
2292
  """Forcefully insert a job in the queue, even if it is drained.
2293

2294
  """
2295
  cl = GetClient()
2296
  job_id = cl.SubmitJobToDrainedQueue([op])
2297
  op_results = PollJob(job_id, cl=cl)
2298
  return op_results[0]
2299

    
2300

    
2301
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2302
  """Wrapper around SubmitOpCode or SendJob.
2303

2304
  This function will decide, based on the 'opts' parameter, whether to
2305
  submit and wait for the result of the opcode (and return it), or
2306
  whether to just send the job and print its identifier. It is used in
2307
  order to simplify the implementation of the '--submit' option.
2308

2309
  It will also process the opcodes if we're sending the via SendJob
2310
  (otherwise SubmitOpCode does it).
2311

2312
  """
2313
  if opts and opts.submit_only:
2314
    job = [op]
2315
    SetGenericOpcodeOpts(job, opts)
2316
    job_id = SendJob(job, cl=cl)
2317
    if opts.print_jobid:
2318
      ToStdout("%d" % job_id)
2319
    raise JobSubmittedException(job_id)
2320
  else:
2321
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2322

    
2323

    
2324
def _InitReasonTrail(op, opts):
2325
  """Builds the first part of the reason trail
2326

2327
  Builds the initial part of the reason trail, adding the user provided reason
2328
  (if it exists) and the name of the command starting the operation.
2329

2330
  @param op: the opcode the reason trail will be added to
2331
  @param opts: the command line options selected by the user
2332

2333
  """
2334
  assert len(sys.argv) >= 2
2335
  trail = []
2336

    
2337
  if opts.reason:
2338
    trail.append((constants.OPCODE_REASON_SRC_USER,
2339
                  opts.reason,
2340
                  utils.EpochNano()))
2341

    
2342
  binary = os.path.basename(sys.argv[0])
2343
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2344
  command = sys.argv[1]
2345
  trail.append((source, command, utils.EpochNano()))
2346
  op.reason = trail
2347

    
2348

    
2349
def SetGenericOpcodeOpts(opcode_list, options):
2350
  """Processor for generic options.
2351

2352
  This function updates the given opcodes based on generic command
2353
  line options (like debug, dry-run, etc.).
2354

2355
  @param opcode_list: list of opcodes
2356
  @param options: command line options or None
2357
  @return: None (in-place modification)
2358

2359
  """
2360
  if not options:
2361
    return
2362
  for op in opcode_list:
2363
    op.debug_level = options.debug
2364
    if hasattr(options, "dry_run"):
2365
      op.dry_run = options.dry_run
2366
    if getattr(options, "priority", None) is not None:
2367
      op.priority = options.priority
2368
    _InitReasonTrail(op, options)
2369

    
2370

    
2371
def GetClient(query=False):
2372
  """Connects to the a luxi socket and returns a client.
2373

2374
  @type query: boolean
2375
  @param query: this signifies that the client will only be
2376
      used for queries; if the build-time parameter
2377
      enable-split-queries is enabled, then the client will be
2378
      connected to the query socket instead of the masterd socket
2379

2380
  """
2381
  override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2382
  if override_socket:
2383
    if override_socket == constants.LUXI_OVERRIDE_MASTER:
2384
      address = pathutils.MASTER_SOCKET
2385
    elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2386
      address = pathutils.QUERY_SOCKET
2387
    else:
2388
      address = override_socket
2389
  elif query and constants.ENABLE_SPLIT_QUERY:
2390
    address = pathutils.QUERY_SOCKET
2391
  else:
2392
    address = None
2393
  # TODO: Cache object?
2394
  try:
2395
    client = luxi.Client(address=address)
2396
  except luxi.NoMasterError:
2397
    ss = ssconf.SimpleStore()
2398

    
2399
    # Try to read ssconf file
2400
    try:
2401
      ss.GetMasterNode()
2402
    except errors.ConfigurationError:
2403
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2404
                                 " not part of a cluster",
2405
                                 errors.ECODE_INVAL)
2406

    
2407
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2408
    if master != myself:
2409
      raise errors.OpPrereqError("This is not the master node, please connect"
2410
                                 " to node '%s' and rerun the command" %
2411
                                 master, errors.ECODE_INVAL)
2412
    raise
2413
  return client
2414

    
2415

    
2416
def FormatError(err):
2417
  """Return a formatted error message for a given error.
2418

2419
  This function takes an exception instance and returns a tuple
2420
  consisting of two values: first, the recommended exit code, and
2421
  second, a string describing the error message (not
2422
  newline-terminated).
2423

2424
  """
2425
  retcode = 1
2426
  obuf = StringIO()
2427
  msg = str(err)
2428
  if isinstance(err, errors.ConfigurationError):
2429
    txt = "Corrupt configuration file: %s" % msg
2430
    logging.error(txt)
2431
    obuf.write(txt + "\n")
2432
    obuf.write("Aborting.")
2433
    retcode = 2
2434
  elif isinstance(err, errors.HooksAbort):
2435
    obuf.write("Failure: hooks execution failed:\n")
2436
    for node, script, out in err.args[0]:
2437
      if out:
2438
        obuf.write("  node: %s, script: %s, output: %s\n" %
2439
                   (node, script, out))
2440
      else:
2441
        obuf.write("  node: %s, script: %s (no output)\n" %
2442
                   (node, script))
2443
  elif isinstance(err, errors.HooksFailure):
2444
    obuf.write("Failure: hooks general failure: %s" % msg)
2445
  elif isinstance(err, errors.ResolverError):
2446
    this_host = netutils.Hostname.GetSysName()
2447
    if err.args[0] == this_host:
2448
      msg = "Failure: can't resolve my own hostname ('%s')"
2449
    else:
2450
      msg = "Failure: can't resolve hostname '%s'"
2451
    obuf.write(msg % err.args[0])
2452
  elif isinstance(err, errors.OpPrereqError):
2453
    if len(err.args) == 2:
2454
      obuf.write("Failure: prerequisites not met for this"
2455
                 " operation:\nerror type: %s, error details:\n%s" %
2456
                 (err.args[1], err.args[0]))
2457
    else:
2458
      obuf.write("Failure: prerequisites not met for this"
2459
                 " operation:\n%s" % msg)
2460
  elif isinstance(err, errors.OpExecError):
2461
    obuf.write("Failure: command execution error:\n%s" % msg)
2462
  elif isinstance(err, errors.TagError):
2463
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2464
  elif isinstance(err, errors.JobQueueDrainError):
2465
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2466
               " accept new requests\n")
2467
  elif isinstance(err, errors.JobQueueFull):
2468
    obuf.write("Failure: the job queue is full and doesn't accept new"
2469
               " job submissions until old jobs are archived\n")
2470
  elif isinstance(err, errors.TypeEnforcementError):
2471
    obuf.write("Parameter Error: %s" % msg)
2472
  elif isinstance(err, errors.ParameterError):
2473
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2474
  elif isinstance(err, luxi.NoMasterError):
2475
    if err.args[0] == pathutils.MASTER_SOCKET:
2476
      daemon = "the master daemon"
2477
    elif err.args[0] == pathutils.QUERY_SOCKET:
2478
      daemon = "the config daemon"
2479
    else:
2480
      daemon = "socket '%s'" % str(err.args[0])
2481
    obuf.write("Cannot communicate with %s.\nIs the process running"
2482
               " and listening for connections?" % daemon)
2483
  elif isinstance(err, luxi.TimeoutError):
2484
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2485
               " been submitted and will continue to run even if the call"
2486
               " timed out. Useful commands in this situation are \"gnt-job"
2487
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2488
    obuf.write(msg)
2489
  elif isinstance(err, luxi.PermissionError):
2490
    obuf.write("It seems you don't have permissions to connect to the"
2491
               " master daemon.\nPlease retry as a different user.")
2492
  elif isinstance(err, luxi.ProtocolError):
2493
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2494
               "%s" % msg)
2495
  elif isinstance(err, errors.JobLost):
2496
    obuf.write("Error checking job status: %s" % msg)
2497
  elif isinstance(err, errors.QueryFilterParseError):
2498
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2499
    obuf.write("\n".join(err.GetDetails()))
2500
  elif isinstance(err, errors.GenericError):
2501
    obuf.write("Unhandled Ganeti error: %s" % msg)
2502
  elif isinstance(err, JobSubmittedException):
2503
    obuf.write("JobID: %s\n" % err.args[0])
2504
    retcode = 0
2505
  else:
2506
    obuf.write("Unhandled exception: %s" % msg)
2507
  return retcode, obuf.getvalue().rstrip("\n")
2508

    
2509

    
2510
def GenericMain(commands, override=None, aliases=None,
2511
                env_override=frozenset()):
2512
  """Generic main function for all the gnt-* commands.
2513

2514
  @param commands: a dictionary with a special structure, see the design doc
2515
                   for command line handling.
2516
  @param override: if not None, we expect a dictionary with keys that will
2517
                   override command line options; this can be used to pass
2518
                   options from the scripts to generic functions
2519
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2520
  @param env_override: list of environment names which are allowed to submit
2521
                       default args for commands
2522

2523
  """
2524
  # save the program name and the entire command line for later logging
2525
  if sys.argv:
2526
    binary = os.path.basename(sys.argv[0])
2527
    if not binary:
2528
      binary = sys.argv[0]
2529

    
2530
    if len(sys.argv) >= 2:
2531
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2532
    else:
2533
      logname = binary
2534

    
2535
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2536
  else:
2537
    binary = "<unknown program>"
2538
    cmdline = "<unknown>"
2539

    
2540
  if aliases is None:
2541
    aliases = {}
2542

    
2543
  try:
2544
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2545
                                       env_override)
2546
  except _ShowVersion:
2547
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2548
             constants.RELEASE_VERSION)
2549
    return constants.EXIT_SUCCESS
2550
  except _ShowUsage, err:
2551
    for line in _FormatUsage(binary, commands):
2552
      ToStdout(line)
2553

    
2554
    if err.exit_error:
2555
      return constants.EXIT_FAILURE
2556
    else:
2557
      return constants.EXIT_SUCCESS
2558
  except errors.ParameterError, err:
2559
    result, err_msg = FormatError(err)
2560
    ToStderr(err_msg)
2561
    return 1
2562

    
2563
  if func is None: # parse error
2564
    return 1
2565

    
2566
  if override is not None:
2567
    for key, val in override.iteritems():
2568
      setattr(options, key, val)
2569

    
2570
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2571
                     stderr_logging=True)
2572

    
2573
  logging.info("Command line: %s", cmdline)
2574

    
2575
  try:
2576
    result = func(options, args)
2577
  except (errors.GenericError, luxi.ProtocolError,
2578
          JobSubmittedException), err:
2579
    result, err_msg = FormatError(err)
2580
    logging.exception("Error during command processing")
2581
    ToStderr(err_msg)
2582
  except KeyboardInterrupt:
2583
    result = constants.EXIT_FAILURE
2584
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2585
             " might have been submitted and"
2586
             " will continue to run in the background.")
2587
  except IOError, err:
2588
    if err.errno == errno.EPIPE:
2589
      # our terminal went away, we'll exit
2590
      sys.exit(constants.EXIT_FAILURE)
2591
    else:
2592
      raise
2593

    
2594
  return result
2595

    
2596

    
2597
def ParseNicOption(optvalue):
2598
  """Parses the value of the --net option(s).
2599

2600
  """
2601
  try:
2602
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2603
  except (TypeError, ValueError), err:
2604
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2605
                               errors.ECODE_INVAL)
2606

    
2607
  nics = [{}] * nic_max
2608
  for nidx, ndict in optvalue:
2609
    nidx = int(nidx)
2610

    
2611
    if not isinstance(ndict, dict):
2612
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2613
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2614

    
2615
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2616

    
2617
    nics[nidx] = ndict
2618

    
2619
  return nics
2620

    
2621

    
2622
def FixHvParams(hvparams):
2623
  # In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from
2624
  # comma to space because commas cannot be accepted on the command line
2625
  # (they already act as the separator between different hvparams). Still,
2626
  # RAPI should be able to accept commas for backwards compatibility.
2627
  # Therefore, we convert spaces into commas here, and we keep the old
2628
  # parsing logic everywhere else.
2629
  try:
2630
    new_usb_devices = hvparams[constants.HV_USB_DEVICES].replace(" ", ",")
2631
    hvparams[constants.HV_USB_DEVICES] = new_usb_devices
2632
  except KeyError:
2633
    #No usb_devices, no modification required
2634
    pass
2635

    
2636

    
2637
def GenericInstanceCreate(mode, opts, args):
2638
  """Add an instance to the cluster via either creation or import.
2639

2640
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2641
  @param opts: the command line options selected by the user
2642
  @type args: list
2643
  @param args: should contain only one element, the new instance name
2644
  @rtype: int
2645
  @return: the desired exit code
2646

2647
  """
2648
  instance = args[0]
2649

    
2650
  (pnode, snode) = SplitNodeOption(opts.node)
2651

    
2652
  hypervisor = None
2653
  hvparams = {}
2654
  if opts.hypervisor:
2655
    hypervisor, hvparams = opts.hypervisor
2656

    
2657
  if opts.nics:
2658
    nics = ParseNicOption(opts.nics)
2659
  elif opts.no_nics:
2660
    # no nics
2661
    nics = []
2662
  elif mode == constants.INSTANCE_CREATE:
2663
    # default of one nic, all auto
2664
    nics = [{}]
2665
  else:
2666
    # mode == import
2667
    nics = []
2668

    
2669
  if opts.disk_template == constants.DT_DISKLESS:
2670
    if opts.disks or opts.sd_size is not None:
2671
      raise errors.OpPrereqError("Diskless instance but disk"
2672
                                 " information passed", errors.ECODE_INVAL)
2673
    disks = []
2674
  else:
2675
    if (not opts.disks and not opts.sd_size
2676
        and mode == constants.INSTANCE_CREATE):
2677
      raise errors.OpPrereqError("No disk information specified",
2678
                                 errors.ECODE_INVAL)
2679
    if opts.disks and opts.sd_size is not None:
2680
      raise errors.OpPrereqError("Please use either the '--disk' or"
2681
                                 " '-s' option", errors.ECODE_INVAL)
2682
    if opts.sd_size is not None:
2683
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2684

    
2685
    if opts.disks:
2686
      try:
2687
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2688
      except ValueError, err:
2689
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2690
                                   errors.ECODE_INVAL)
2691
      disks = [{}] * disk_max
2692
    else:
2693
      disks = []
2694
    for didx, ddict in opts.disks:
2695
      didx = int(didx)
2696
      if not isinstance(ddict, dict):
2697
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2698
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2699
      elif constants.IDISK_SIZE in ddict:
2700
        if constants.IDISK_ADOPT in ddict:
2701
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2702
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2703
        try:
2704
          ddict[constants.IDISK_SIZE] = \
2705
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2706
        except ValueError, err:
2707
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2708
                                     (didx, err), errors.ECODE_INVAL)
2709
      elif constants.IDISK_ADOPT in ddict:
2710
        if constants.IDISK_SPINDLES in ddict:
2711
          raise errors.OpPrereqError("spindles is not a valid option when"
2712
                                     " adopting a disk", errors.ECODE_INVAL)
2713
        if mode == constants.INSTANCE_IMPORT:
2714
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2715
                                     " import", errors.ECODE_INVAL)
2716
        ddict[constants.IDISK_SIZE] = 0
2717
      else:
2718
        raise errors.OpPrereqError("Missing size or adoption source for"
2719
                                   " disk %d" % didx, errors.ECODE_INVAL)
2720
      disks[didx] = ddict
2721

    
2722
  if opts.tags is not None:
2723
    tags = opts.tags.split(",")
2724
  else:
2725
    tags = []
2726

    
2727
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2728
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2729
  FixHvParams(hvparams)
2730

    
2731
  if mode == constants.INSTANCE_CREATE:
2732
    start = opts.start
2733
    os_type = opts.os
2734
    force_variant = opts.force_variant
2735
    src_node = None
2736
    src_path = None
2737
    no_install = opts.no_install
2738
    identify_defaults = False
2739
  elif mode == constants.INSTANCE_IMPORT:
2740
    start = False
2741
    os_type = None
2742
    force_variant = False
2743
    src_node = opts.src_node
2744
    src_path = opts.src_dir
2745
    no_install = None
2746
    identify_defaults = opts.identify_defaults
2747
  else:
2748
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2749

    
2750
  op = opcodes.OpInstanceCreate(instance_name=instance,
2751
                                disks=disks,
2752
                                disk_template=opts.disk_template,
2753
                                nics=nics,
2754
                                conflicts_check=opts.conflicts_check,
2755
                                pnode=pnode, snode=snode,
2756
                                ip_check=opts.ip_check,
2757
                                name_check=opts.name_check,
2758
                                wait_for_sync=opts.wait_for_sync,
2759
                                file_storage_dir=opts.file_storage_dir,
2760
                                file_driver=opts.file_driver,
2761
                                iallocator=opts.iallocator,
2762
                                hypervisor=hypervisor,
2763
                                hvparams=hvparams,
2764
                                beparams=opts.beparams,
2765
                                osparams=opts.osparams,
2766
                                mode=mode,
2767
                                start=start,
2768
                                os_type=os_type,
2769
                                force_variant=force_variant,
2770
                                src_node=src_node,
2771
                                src_path=src_path,
2772
                                tags=tags,
2773
                                no_install=no_install,
2774
                                identify_defaults=identify_defaults,
2775
                                ignore_ipolicy=opts.ignore_ipolicy)
2776

    
2777
  SubmitOrSend(op, opts)
2778
  return 0
2779

    
2780

    
2781
class _RunWhileClusterStoppedHelper:
2782
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2783

2784
  """
2785
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2786
    """Initializes this class.
2787

2788
    @type feedback_fn: callable
2789
    @param feedback_fn: Feedback function
2790
    @type cluster_name: string
2791
    @param cluster_name: Cluster name
2792
    @type master_node: string
2793
    @param master_node Master node name
2794
    @type online_nodes: list
2795
    @param online_nodes: List of names of online nodes
2796

2797
    """
2798
    self.feedback_fn = feedback_fn
2799
    self.cluster_name = cluster_name
2800
    self.master_node = master_node
2801
    self.online_nodes = online_nodes
2802

    
2803
    self.ssh = ssh.SshRunner(self.cluster_name)
2804

    
2805
    self.nonmaster_nodes = [name for name in online_nodes
2806
                            if name != master_node]
2807

    
2808
    assert self.master_node not in self.nonmaster_nodes
2809

    
2810
  def _RunCmd(self, node_name, cmd):
2811
    """Runs a command on the local or a remote machine.
2812

2813
    @type node_name: string
2814
    @param node_name: Machine name
2815
    @type cmd: list
2816
    @param cmd: Command
2817

2818
    """
2819
    if node_name is None or node_name == self.master_node:
2820
      # No need to use SSH
2821
      result = utils.RunCmd(cmd)
2822
    else:
2823
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2824
                            utils.ShellQuoteArgs(cmd))
2825

    
2826
    if result.failed:
2827
      errmsg = ["Failed to run command %s" % result.cmd]
2828
      if node_name:
2829
        errmsg.append("on node %s" % node_name)
2830
      errmsg.append(": exitcode %s and error %s" %
2831
                    (result.exit_code, result.output))
2832
      raise errors.OpExecError(" ".join(errmsg))
2833

    
2834
  def Call(self, fn, *args):
2835
    """Call function while all daemons are stopped.
2836

2837
    @type fn: callable
2838
    @param fn: Function to be called
2839

2840
    """
2841
    # Pause watcher by acquiring an exclusive lock on watcher state file
2842
    self.feedback_fn("Blocking watcher")
2843
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2844
    try:
2845
      # TODO: Currently, this just blocks. There's no timeout.
2846
      # TODO: Should it be a shared lock?
2847
      watcher_block.Exclusive(blocking=True)
2848

    
2849
      # Stop master daemons, so that no new jobs can come in and all running
2850
      # ones are finished
2851
      self.feedback_fn("Stopping master daemons")
2852
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2853
      try:
2854
        # Stop daemons on all nodes
2855
        for node_name in self.online_nodes:
2856
          self.feedback_fn("Stopping daemons on %s" % node_name)
2857
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2858

    
2859
        # All daemons are shut down now
2860
        try:
2861
          return fn(self, *args)
2862
        except Exception, err:
2863
          _, errmsg = FormatError(err)
2864
          logging.exception("Caught exception")
2865
          self.feedback_fn(errmsg)
2866
          raise
2867
      finally:
2868
        # Start cluster again, master node last
2869
        for node_name in self.nonmaster_nodes + [self.master_node]:
2870
          self.feedback_fn("Starting daemons on %s" % node_name)
2871
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2872
    finally:
2873
      # Resume watcher
2874
      watcher_block.Close()
2875

    
2876

    
2877
def RunWhileClusterStopped(feedback_fn, fn, *args):
2878
  """Calls a function while all cluster daemons are stopped.
2879

2880
  @type feedback_fn: callable
2881
  @param feedback_fn: Feedback function
2882
  @type fn: callable
2883
  @param fn: Function to be called when daemons are stopped
2884

2885
  """
2886
  feedback_fn("Gathering cluster information")
2887

    
2888
  # This ensures we're running on the master daemon
2889
  cl = GetClient()
2890

    
2891
  (cluster_name, master_node) = \
2892
    cl.QueryConfigValues(["cluster_name", "master_node"])
2893

    
2894
  online_nodes = GetOnlineNodes([], cl=cl)
2895

    
2896
  # Don't keep a reference to the client. The master daemon will go away.
2897
  del cl
2898

    
2899
  assert master_node in online_nodes
2900

    
2901
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2902
                                       online_nodes).Call(fn, *args)
2903

    
2904

    
2905
def GenerateTable(headers, fields, separator, data,
2906
                  numfields=None, unitfields=None,
2907
                  units=None):
2908
  """Prints a table with headers and different fields.
2909

2910
  @type headers: dict
2911
  @param headers: dictionary mapping field names to headers for
2912
      the table
2913
  @type fields: list
2914
  @param fields: the field names corresponding to each row in
2915
      the data field
2916
  @param separator: the separator to be used; if this is None,
2917
      the default 'smart' algorithm is used which computes optimal
2918
      field width, otherwise just the separator is used between
2919
      each field
2920
  @type data: list
2921
  @param data: a list of lists, each sublist being one row to be output
2922
  @type numfields: list
2923
  @param numfields: a list with the fields that hold numeric
2924
      values and thus should be right-aligned
2925
  @type unitfields: list
2926
  @param unitfields: a list with the fields that hold numeric
2927
      values that should be formatted with the units field
2928
  @type units: string or None
2929
  @param units: the units we should use for formatting, or None for
2930
      automatic choice (human-readable for non-separator usage, otherwise
2931
      megabytes); this is a one-letter string
2932

2933
  """
2934
  if units is None:
2935
    if separator:
2936
      units = "m"
2937
    else:
2938
      units = "h"
2939

    
2940
  if numfields is None:
2941
    numfields = []
2942
  if unitfields is None:
2943
    unitfields = []
2944

    
2945
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2946
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2947

    
2948
  format_fields = []
2949
  for field in fields:
2950
    if headers and field not in headers:
2951
      # TODO: handle better unknown fields (either revert to old
2952
      # style of raising exception, or deal more intelligently with
2953
      # variable fields)
2954
      headers[field] = field
2955
    if separator is not None:
2956
      format_fields.append("%s")
2957
    elif numfields.Matches(field):
2958
      format_fields.append("%*s")
2959
    else:
2960
      format_fields.append("%-*s")
2961

    
2962
  if separator is None:
2963
    mlens = [0 for name in fields]
2964
    format_str = " ".join(format_fields)
2965
  else:
2966
    format_str = separator.replace("%", "%%").join(format_fields)
2967

    
2968
  for row in data:
2969
    if row is None:
2970
      continue
2971
    for idx, val in enumerate(row):
2972
      if unitfields.Matches(fields[idx]):
2973
        try:
2974
          val = int(val)
2975
        except (TypeError, ValueError):
2976
          pass
2977
        else:
2978
          val = row[idx] = utils.FormatUnit(val, units)
2979
      val = row[idx] = str(val)
2980
      if separator is None:
2981
        mlens[idx] = max(mlens[idx], len(val))
2982

    
2983
  result = []
2984
  if headers:
2985
    args = []
2986
    for idx, name in enumerate(fields):
2987
      hdr = headers[name]
2988
      if separator is None:
2989
        mlens[idx] = max(mlens[idx], len(hdr))
2990
        args.append(mlens[idx])
2991
      args.append(hdr)
2992
    result.append(format_str % tuple(args))
2993

    
2994
  if separator is None:
2995
    assert len(mlens) == len(fields)
2996

    
2997
    if fields and not numfields.Matches(fields[-1]):
2998
      mlens[-1] = 0
2999

    
3000
  for line in data:
3001
    args = []
3002
    if line is None:
3003
      line = ["-" for _ in fields]
3004
    for idx in range(len(fields)):
3005
      if separator is None:
3006
        args.append(mlens[idx])
3007
      args.append(line[idx])
3008
    result.append(format_str % tuple(args))
3009

    
3010
  return result
3011

    
3012

    
3013
def _FormatBool(value):
3014
  """Formats a boolean value as a string.
3015

3016
  """
3017
  if value:
3018
    return "Y"
3019
  return "N"
3020

    
3021

    
3022
#: Default formatting for query results; (callback, align right)
3023
_DEFAULT_FORMAT_QUERY = {
3024
  constants.QFT_TEXT: (str, False),
3025
  constants.QFT_BOOL: (_FormatBool, False),
3026
  constants.QFT_NUMBER: (str, True),
3027
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
3028
  constants.QFT_OTHER: (str, False),
3029
  constants.QFT_UNKNOWN: (str, False),
3030
  }
3031

    
3032

    
3033
def _GetColumnFormatter(fdef, override, unit):
3034
  """Returns formatting function for a field.
3035

3036
  @type fdef: L{objects.QueryFieldDefinition}
3037
  @type override: dict
3038
  @param override: Dictionary for overriding field formatting functions,
3039
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3040
  @type unit: string
3041
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
3042
  @rtype: tuple; (callable, bool)
3043
  @return: Returns the function to format a value (takes one parameter) and a
3044
    boolean for aligning the value on the right-hand side
3045

3046
  """
3047
  fmt = override.get(fdef.name, None)
3048
  if fmt is not None:
3049
    return fmt
3050

    
3051
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3052

    
3053
  if fdef.kind == constants.QFT_UNIT:
3054
    # Can't keep this information in the static dictionary
3055
    return (lambda value: utils.FormatUnit(value, unit), True)
3056

    
3057
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3058
  if fmt is not None:
3059
    return fmt
3060

    
3061
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3062

    
3063

    
3064
class _QueryColumnFormatter:
3065
  """Callable class for formatting fields of a query.
3066

3067
  """
3068
  def __init__(self, fn, status_fn, verbose):
3069
    """Initializes this class.
3070

3071
    @type fn: callable
3072
    @param fn: Formatting function
3073
    @type status_fn: callable
3074
    @param status_fn: Function to report fields' status
3075
    @type verbose: boolean
3076
    @param verbose: whether to use verbose field descriptions or not
3077

3078
    """
3079
    self._fn = fn
3080
    self._status_fn = status_fn
3081
    self._verbose = verbose
3082

    
3083
  def __call__(self, data):
3084
    """Returns a field's string representation.
3085

3086
    """
3087
    (status, value) = data
3088

    
3089
    # Report status
3090
    self._status_fn(status)
3091

    
3092
    if status == constants.RS_NORMAL:
3093
      return self._fn(value)
3094

    
3095
    assert value is None, \
3096
           "Found value %r for abnormal status %s" % (value, status)
3097

    
3098
    return FormatResultError(status, self._verbose)
3099

    
3100

    
3101
def FormatResultError(status, verbose):
3102
  """Formats result status other than L{constants.RS_NORMAL}.
3103

3104
  @param status: The result status
3105
  @type verbose: boolean
3106
  @param verbose: Whether to return the verbose text
3107
  @return: Text of result status
3108

3109
  """
3110
  assert status != constants.RS_NORMAL, \
3111
         "FormatResultError called with status equal to constants.RS_NORMAL"
3112
  try:
3113
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3114
  except KeyError:
3115
    raise NotImplementedError("Unknown status %s" % status)
3116
  else:
3117
    if verbose:
3118
      return verbose_text
3119
    return normal_text
3120

    
3121

    
3122
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3123
                      header=False, verbose=False):
3124
  """Formats data in L{objects.QueryResponse}.
3125

3126
  @type result: L{objects.QueryResponse}
3127
  @param result: result of query operation
3128
  @type unit: string
3129
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3130
    see L{utils.text.FormatUnit}
3131
  @type format_override: dict
3132
  @param format_override: Dictionary for overriding field formatting functions,
3133
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3134
  @type separator: string or None
3135
  @param separator: String used to separate fields
3136
  @type header: bool
3137
  @param header: Whether to output header row
3138
  @type verbose: boolean
3139
  @param verbose: whether to use verbose field descriptions or not
3140

3141
  """
3142
  if unit is None:
3143
    if separator:
3144
      unit = "m"
3145
    else:
3146
      unit = "h"
3147

    
3148
  if format_override is None:
3149
    format_override = {}
3150

    
3151
  stats = dict.fromkeys(constants.RS_ALL, 0)
3152

    
3153
  def _RecordStatus(status):
3154
    if status in stats:
3155
      stats[status] += 1
3156

    
3157
  columns = []
3158
  for fdef in result.fields:
3159
    assert fdef.title and fdef.name
3160
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3161
    columns.append(TableColumn(fdef.title,
3162
                               _QueryColumnFormatter(fn, _RecordStatus,
3163
                                                     verbose),
3164
                               align_right))
3165

    
3166
  table = FormatTable(result.data, columns, header, separator)
3167

    
3168
  # Collect statistics
3169
  assert len(stats) == len(constants.RS_ALL)
3170
  assert compat.all(count >= 0 for count in stats.values())
3171

    
3172
  # Determine overall status. If there was no data, unknown fields must be
3173
  # detected via the field definitions.
3174
  if (stats[constants.RS_UNKNOWN] or
3175
      (not result.data and _GetUnknownFields(result.fields))):
3176
    status = QR_UNKNOWN
3177
  elif compat.any(count > 0 for key, count in stats.items()
3178
                  if key != constants.RS_NORMAL):
3179
    status = QR_INCOMPLETE
3180
  else:
3181
    status = QR_NORMAL
3182

    
3183
  return (status, table)
3184

    
3185

    
3186
def _GetUnknownFields(fdefs):
3187
  """Returns list of unknown fields included in C{fdefs}.
3188

3189
  @type fdefs: list of L{objects.QueryFieldDefinition}
3190

3191
  """
3192
  return [fdef for fdef in fdefs
3193
          if fdef.kind == constants.QFT_UNKNOWN]
3194

    
3195

    
3196
def _WarnUnknownFields(fdefs):
3197
  """Prints a warning to stderr if a query included unknown fields.
3198

3199
  @type fdefs: list of L{objects.QueryFieldDefinition}
3200

3201
  """
3202
  unknown = _GetUnknownFields(fdefs)
3203
  if unknown:
3204
    ToStderr("Warning: Queried for unknown fields %s",
3205
             utils.CommaJoin(fdef.name for fdef in unknown))
3206
    return True
3207

    
3208
  return False
3209

    
3210

    
3211
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3212
                format_override=None, verbose=False, force_filter=False,
3213
                namefield=None, qfilter=None, isnumeric=False):
3214
  """Generic implementation for listing all items of a resource.
3215

3216
  @param resource: One of L{constants.QR_VIA_LUXI}
3217
  @type fields: list of strings
3218
  @param fields: List of fields to query for
3219
  @type names: list of strings
3220
  @param names: Names of items to query for
3221
  @type unit: string or None
3222
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3223
    None for automatic choice (human-readable for non-separator usage,
3224
    otherwise megabytes); this is a one-letter string
3225
  @type separator: string or None
3226
  @param separator: String used to separate fields
3227
  @type header: bool
3228
  @param header: Whether to show header row
3229
  @type force_filter: bool
3230
  @param force_filter: Whether to always treat names as filter
3231
  @type format_override: dict
3232
  @param format_override: Dictionary for overriding field formatting functions,
3233
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3234
  @type verbose: boolean
3235
  @param verbose: whether to use verbose field descriptions or not
3236
  @type namefield: string
3237
  @param namefield: Name of field to use for simple filters (see
3238
    L{qlang.MakeFilter} for details)
3239
  @type qfilter: list or None
3240
  @param qfilter: Query filter (in addition to names)
3241
  @param isnumeric: bool
3242
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3243
    any simple filters built by namefield should use integer values to
3244
    reflect that
3245

3246
  """
3247
  if not names:
3248
    names = None
3249

    
3250
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3251
                                isnumeric=isnumeric)
3252

    
3253
  if qfilter is None:
3254
    qfilter = namefilter
3255
  elif namefilter is not None:
3256
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3257

    
3258
  if cl is None:
3259
    cl = GetClient()
3260

    
3261
  response = cl.Query(resource, fields, qfilter)
3262

    
3263
  found_unknown = _WarnUnknownFields(response.fields)
3264

    
3265
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3266
                                     header=header,
3267
                                     format_override=format_override,
3268
                                     verbose=verbose)
3269

    
3270
  for line in data:
3271
    ToStdout(line)
3272

    
3273
  assert ((found_unknown and status == QR_UNKNOWN) or
3274
          (not found_unknown and status != QR_UNKNOWN))
3275

    
3276
  if status == QR_UNKNOWN:
3277
    return constants.EXIT_UNKNOWN_FIELD
3278

    
3279
  # TODO: Should the list command fail if not all data could be collected?
3280
  return constants.EXIT_SUCCESS
3281

    
3282

    
3283
def _FieldDescValues(fdef):
3284
  """Helper function for L{GenericListFields} to get query field description.
3285

3286
  @type fdef: L{objects.QueryFieldDefinition}
3287
  @rtype: list
3288

3289
  """
3290
  return [
3291
    fdef.name,
3292
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3293
    fdef.title,
3294
    fdef.doc,
3295
    ]
3296

    
3297

    
3298
def GenericListFields(resource, fields, separator, header, cl=None):
3299
  """Generic implementation for listing fields for a resource.
3300

3301
  @param resource: One of L{constants.QR_VIA_LUXI}
3302
  @type fields: list of strings
3303
  @param fields: List of fields to query for
3304
  @type separator: string or None
3305
  @param separator: String used to separate fields
3306
  @type header: bool
3307
  @param header: Whether to show header row
3308

3309
  """
3310
  if cl is None:
3311
    cl = GetClient()
3312

    
3313
  if not fields:
3314
    fields = None
3315

    
3316
  response = cl.QueryFields(resource, fields)
3317

    
3318
  found_unknown = _WarnUnknownFields(response.fields)
3319

    
3320
  columns = [
3321
    TableColumn("Name", str, False),
3322
    TableColumn("Type", str, False),
3323
    TableColumn("Title", str, False),
3324
    TableColumn("Description", str, False),
3325
    ]
3326

    
3327
  rows = map(_FieldDescValues, response.fields)
3328

    
3329
  for line in FormatTable(rows, columns, header, separator):
3330
    ToStdout(line)
3331

    
3332
  if found_unknown:
3333
    return constants.EXIT_UNKNOWN_FIELD
3334

    
3335
  return constants.EXIT_SUCCESS
3336

    
3337

    
3338
class TableColumn:
3339
  """Describes a column for L{FormatTable}.
3340

3341
  """
3342
  def __init__(self, title, fn, align_right):
3343
    """Initializes this class.
3344

3345
    @type title: string
3346
    @param title: Column title
3347
    @type fn: callable
3348
    @param fn: Formatting function
3349
    @type align_right: bool
3350
    @param align_right: Whether to align values on the right-hand side
3351

3352
    """
3353
    self.title = title
3354
    self.format = fn
3355
    self.align_right = align_right
3356

    
3357

    
3358
def _GetColFormatString(width, align_right):
3359
  """Returns the format string for a field.
3360

3361
  """
3362
  if align_right:
3363
    sign = ""
3364
  else:
3365
    sign = "-"
3366

    
3367
  return "%%%s%ss" % (sign, width)
3368

    
3369

    
3370
def FormatTable(rows, columns, header, separator):
3371
  """Formats data as a table.
3372

3373
  @type rows: list of lists
3374
  @param rows: Row data, one list per row
3375
  @type columns: list of L{TableColumn}
3376
  @param columns: Column descriptions
3377
  @type header: bool
3378
  @param header: Whether to show header row
3379
  @type separator: string or None
3380
  @param separator: String used to separate columns
3381

3382
  """
3383
  if header:
3384
    data = [[col.title for col in columns]]
3385
    colwidth = [len(col.title) for col in columns]
3386
  else:
3387
    data = []
3388
    colwidth = [0 for _ in columns]
3389

    
3390
  # Format row data
3391
  for row in rows:
3392
    assert len(row) == len(columns)
3393

    
3394
    formatted = [col.format(value) for value, col in zip(row, columns)]
3395

    
3396
    if separator is None:
3397
      # Update column widths
3398
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3399
        # Modifying a list's items while iterating is fine
3400
        colwidth[idx] = max(oldwidth, len(value))
3401

    
3402
    data.append(formatted)
3403

    
3404
  if separator is not None:
3405
    # Return early if a separator is used
3406
    return [separator.join(row) for row in data]
3407

    
3408
  if columns and not columns[-1].align_right:
3409
    # Avoid unnecessary spaces at end of line
3410
    colwidth[-1] = 0
3411

    
3412
  # Build format string
3413
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3414
                  for col, width in zip(columns, colwidth)])
3415

    
3416
  return [fmt % tuple(row) for row in data]
3417

    
3418

    
3419
def FormatTimestamp(ts):
3420
  """Formats a given timestamp.
3421

3422
  @type ts: timestamp
3423
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3424

3425
  @rtype: string
3426
  @return: a string with the formatted timestamp
3427

3428
  """
3429
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3430
    return "?"
3431

    
3432
  (sec, usecs) = ts
3433
  return utils.FormatTime(sec, usecs=usecs)
3434

    
3435

    
3436
def ParseTimespec(value):
3437
  """Parse a time specification.
3438

3439
  The following suffixed will be recognized:
3440

3441
    - s: seconds
3442
    - m: minutes
3443
    - h: hours
3444
    - d: day
3445
    - w: weeks
3446

3447
  Without any suffix, the value will be taken to be in seconds.
3448

3449
  """
3450
  value = str(value)
3451
  if not value:
3452
    raise errors.OpPrereqError("Empty time specification passed",
3453
                               errors.ECODE_INVAL)
3454
  suffix_map = {
3455
    "s": 1,
3456
    "m": 60,
3457
    "h": 3600,
3458
    "d": 86400,
3459
    "w": 604800,
3460
    }
3461
  if value[-1] not in suffix_map:
3462
    try:
3463
      value = int(value)
3464
    except (TypeError, ValueError):
3465
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3466
                                 errors.ECODE_INVAL)
3467
  else:
3468
    multiplier = suffix_map[value[-1]]
3469
    value = value[:-1]
3470
    if not value: # no data left after stripping the suffix
3471
      raise errors.OpPrereqError("Invalid time specification (only"
3472
                                 " suffix passed)", errors.ECODE_INVAL)
3473
    try:
3474
      value = int(value) * multiplier
3475
    except (TypeError, ValueError):
3476
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3477
                                 errors.ECODE_INVAL)
3478
  return value
3479

    
3480

    
3481
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3482
                   filter_master=False, nodegroup=None):
3483
  """Returns the names of online nodes.
3484

3485
  This function will also log a warning on stderr with the names of
3486
  the online nodes.
3487

3488
  @param nodes: if not empty, use only this subset of nodes (minus the
3489
      offline ones)
3490
  @param cl: if not None, luxi client to use
3491
  @type nowarn: boolean
3492
  @param nowarn: by default, this function will output a note with the
3493
      offline nodes that are skipped; if this parameter is True the
3494
      note is not displayed
3495
  @type secondary_ips: boolean
3496
  @param secondary_ips: if True, return the secondary IPs instead of the
3497
      names, useful for doing network traffic over the replication interface
3498
      (if any)
3499
  @type filter_master: boolean
3500
  @param filter_master: if True, do not return the master node in the list
3501
      (useful in coordination with secondary_ips where we cannot check our
3502
      node name against the list)
3503
  @type nodegroup: string
3504
  @param nodegroup: If set, only return nodes in this node group
3505

3506
  """
3507
  if cl is None:
3508
    cl = GetClient()
3509

    
3510
  qfilter = []
3511

    
3512
  if nodes:
3513
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3514

    
3515
  if nodegroup is not None:
3516
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3517
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3518

    
3519
  if filter_master:
3520
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3521

    
3522
  if qfilter:
3523
    if len(qfilter) > 1:
3524
      final_filter = [qlang.OP_AND] + qfilter
3525
    else:
3526
      assert len(qfilter) == 1
3527
      final_filter = qfilter[0]
3528
  else:
3529
    final_filter = None
3530

    
3531
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3532

    
3533
  def _IsOffline(row):
3534
    (_, (_, offline), _) = row
3535
    return offline
3536

    
3537
  def _GetName(row):
3538
    ((_, name), _, _) = row
3539
    return name
3540

    
3541
  def _GetSip(row):
3542
    (_, _, (_, sip)) = row
3543
    return sip
3544

    
3545
  (offline, online) = compat.partition(result.data, _IsOffline)
3546

    
3547
  if offline and not nowarn:
3548
    ToStderr("Note: skipping offline node(s): %s" %
3549
             utils.CommaJoin(map(_GetName, offline)))
3550

    
3551
  if secondary_ips:
3552
    fn = _GetSip
3553
  else:
3554
    fn = _GetName
3555

    
3556
  return map(fn, online)
3557

    
3558

    
3559
def _ToStream(stream, txt, *args):
3560
  """Write a message to a stream, bypassing the logging system
3561

3562
  @type stream: file object
3563
  @param stream: the file to which we should write
3564
  @type txt: str
3565
  @param txt: the message
3566

3567
  """
3568
  try:
3569
    if args:
3570
      args = tuple(args)
3571
      stream.write(txt % args)
3572
    else:
3573
      stream.write(txt)
3574
    stream.write("\n")
3575
    stream.flush()
3576
  except IOError, err:
3577
    if err.errno == errno.EPIPE:
3578
      # our terminal went away, we'll exit
3579
      sys.exit(constants.EXIT_FAILURE)
3580
    else:
3581
      raise
3582

    
3583

    
3584
def ToStdout(txt, *args):
3585
  """Write a message to stdout only, bypassing the logging system
3586

3587
  This is just a wrapper over _ToStream.
3588

3589
  @type txt: str
3590
  @param txt: the message
3591

3592
  """
3593
  _ToStream(sys.stdout, txt, *args)
3594

    
3595

    
3596
def ToStderr(txt, *args):
3597
  """Write a message to stderr only, bypassing the logging system
3598

3599
  This is just a wrapper over _ToStream.
3600

3601
  @type txt: str
3602
  @param txt: the message
3603

3604
  """
3605
  _ToStream(sys.stderr, txt, *args)
3606

    
3607

    
3608
class JobExecutor(object):
3609
  """Class which manages the submission and execution of multiple jobs.
3610

3611
  Note that instances of this class should not be reused between
3612
  GetResults() calls.
3613

3614
  """
3615
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3616
    self.queue = []
3617
    if cl is None:
3618
      cl = GetClient()
3619
    self.cl = cl
3620
    self.verbose = verbose
3621
    self.jobs = []
3622
    self.opts = opts
3623
    self.feedback_fn = feedback_fn
3624
    self._counter = itertools.count()
3625

    
3626
  @staticmethod
3627
  def _IfName(name, fmt):
3628
    """Helper function for formatting name.
3629

3630
    """
3631
    if name:
3632
      return fmt % name
3633

    
3634
    return ""
3635

    
3636
  def QueueJob(self, name, *ops):
3637
    """Record a job for later submit.
3638

3639
    @type name: string
3640
    @param name: a description of the job, will be used in WaitJobSet
3641

3642
    """
3643
    SetGenericOpcodeOpts(ops, self.opts)
3644
    self.queue.append((self._counter.next(), name, ops))
3645

    
3646
  def AddJobId(self, name, status, job_id):
3647
    """Adds a job ID to the internal queue.
3648

3649
    """
3650
    self.jobs.append((self._counter.next(), status, job_id, name))
3651

    
3652
  def SubmitPending(self, each=False):
3653
    """Submit all pending jobs.
3654

3655
    """
3656
    if each:
3657
      results = []
3658
      for (_, _, ops) in self.queue:
3659
        # SubmitJob will remove the success status, but raise an exception if
3660
        # the submission fails, so we'll notice that anyway.
3661
        results.append([True, self.cl.SubmitJob(ops)[0]])
3662
    else:
3663
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3664
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3665
      self.jobs.append((idx, status, data, name))
3666

    
3667
  def _ChooseJob(self):
3668
    """Choose a non-waiting/queued job to poll next.
3669

3670
    """
3671
    assert self.jobs, "_ChooseJob called with empty job list"
3672

    
3673
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3674
                               ["status"])
3675
    assert result
3676

    
3677
    for job_data, status in zip(self.jobs, result):
3678
      if (isinstance(status, list) and status and
3679
          status[0] in (constants.JOB_STATUS_QUEUED,
3680
                        constants.JOB_STATUS_WAITING,
3681
                        constants.JOB_STATUS_CANCELING)):
3682
        # job is still present and waiting
3683
        continue
3684
      # good candidate found (either running job or lost job)
3685
      self.jobs.remove(job_data)
3686
      return job_data
3687

    
3688
    # no job found
3689
    return self.jobs.pop(0)
3690

    
3691
  def GetResults(self):
3692
    """Wait for and return the results of all jobs.
3693

3694
    @rtype: list
3695
    @return: list of tuples (success, job results), in the same order
3696
        as the submitted jobs; if a job has failed, instead of the result
3697
        there will be the error message
3698

3699
    """
3700
    if not self.jobs:
3701
      self.SubmitPending()
3702
    results = []
3703
    if self.verbose:
3704
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3705
      if ok_jobs:
3706
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3707

    
3708
    # first, remove any non-submitted jobs
3709
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3710
    for idx, _, jid, name in failures:
3711
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3712
      results.append((idx, False, jid))
3713

    
3714
    while self.jobs:
3715
      (idx, _, jid, name) = self._ChooseJob()
3716
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3717
      try:
3718
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3719
        success = True
3720
      except errors.JobLost, err:
3721
        _, job_result = FormatError(err)
3722
        ToStderr("Job %s%s has been archived, cannot check its result",
3723
                 jid, self._IfName(name, " for %s"))
3724
        success = False
3725
      except (errors.GenericError, luxi.ProtocolError), err:
3726
        _, job_result = FormatError(err)
3727
        success = False
3728
        # the error message will always be shown, verbose or not
3729
        ToStderr("Job %s%s has failed: %s",
3730
                 jid, self._IfName(name, " for %s"), job_result)
3731

    
3732
      results.append((idx, success, job_result))
3733

    
3734
    # sort based on the index, then drop it
3735
    results.sort()
3736
    results = [i[1:] for i in results]
3737

    
3738
    return results
3739

    
3740
  def WaitOrShow(self, wait):
3741
    """Wait for job results or only print the job IDs.
3742

3743
    @type wait: boolean
3744
    @param wait: whether to wait or not
3745

3746
    """
3747
    if wait:
3748
      return self.GetResults()
3749
    else:
3750
      if not self.jobs:
3751
        self.SubmitPending()
3752
      for _, status, result, name in self.jobs:
3753
        if status:
3754
          ToStdout("%s: %s", result, name)
3755
        else:
3756
          ToStderr("Failure for %s: %s", name, result)
3757
      return [row[1:3] for row in self.jobs]
3758

    
3759

    
3760
def FormatParamsDictInfo(param_dict, actual):
3761
  """Formats a parameter dictionary.
3762

3763
  @type param_dict: dict
3764
  @param param_dict: the own parameters
3765
  @type actual: dict
3766
  @param actual: the current parameter set (including defaults)
3767
  @rtype: dict
3768
  @return: dictionary where the value of each parameter is either a fully
3769
      formatted string or a dictionary containing formatted strings
3770

3771
  """
3772
  ret = {}
3773
  for (key, data) in actual.items():
3774
    if isinstance(data, dict) and data:
3775
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3776
    else:
3777
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3778
  return ret
3779

    
3780

    
3781
def _FormatListInfoDefault(data, def_data):
3782
  if data is not None:
3783
    ret = utils.CommaJoin(data)
3784
  else:
3785
    ret = "default (%s)" % utils.CommaJoin(def_data)
3786
  return ret
3787

    
3788

    
3789
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3790
  """Formats an instance policy.
3791

3792
  @type custom_ipolicy: dict
3793
  @param custom_ipolicy: own policy
3794
  @type eff_ipolicy: dict
3795
  @param eff_ipolicy: effective policy (including defaults); ignored for
3796
      cluster
3797
  @type iscluster: bool
3798
  @param iscluster: the policy is at cluster level
3799
  @rtype: list of pairs
3800
  @return: formatted data, suitable for L{PrintGenericInfo}
3801

3802
  """
3803
  if iscluster:
3804
    eff_ipolicy = custom_ipolicy
3805

    
3806
  minmax_out = []
3807
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3808
  if custom_minmax:
3809
    for (k, minmax) in enumerate(custom_minmax):
3810
      minmax_out.append([
3811
        ("%s/%s" % (key, k),
3812
         FormatParamsDictInfo(minmax[key], minmax[key]))
3813
        for key in constants.ISPECS_MINMAX_KEYS
3814
        ])
3815
  else:
3816
    for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3817
      minmax_out.append([
3818
        ("%s/%s" % (key, k),
3819
         FormatParamsDictInfo({}, minmax[key]))
3820
        for key in constants.ISPECS_MINMAX_KEYS
3821
        ])
3822
  ret = [("bounds specs", minmax_out)]
3823

    
3824
  if iscluster:
3825
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3826
    ret.append(
3827
      (constants.ISPECS_STD,
3828
       FormatParamsDictInfo(stdspecs, stdspecs))
3829
      )
3830

    
3831
  ret.append(
3832
    ("allowed disk templates",
3833
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3834
                            eff_ipolicy[constants.IPOLICY_DTS]))
3835
    )
3836
  ret.extend([
3837
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3838
    for key in constants.IPOLICY_PARAMETERS
3839
    ])
3840
  return ret
3841

    
3842

    
3843
def _PrintSpecsParameters(buf, specs):
3844
  values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3845
  buf.write(",".join(values))
3846

    
3847

    
3848
def PrintIPolicyCommand(buf, ipolicy, isgroup):
3849
  """Print the command option used to generate the given instance policy.
3850

3851
  Currently only the parts dealing with specs are supported.
3852

3853
  @type buf: StringIO
3854
  @param buf: stream to write into
3855
  @type ipolicy: dict
3856
  @param ipolicy: instance policy
3857
  @type isgroup: bool
3858
  @param isgroup: whether the policy is at group level
3859

3860
  """
3861
  if not isgroup:
3862
    stdspecs = ipolicy.get("std")
3863
    if stdspecs:
3864
      buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3865
      _PrintSpecsParameters(buf, stdspecs)
3866
  minmaxes = ipolicy.get("minmax", [])
3867
  first = True
3868
  for minmax in minmaxes:
3869
    minspecs = minmax.get("min")
3870
    maxspecs = minmax.get("max")
3871
    if minspecs and maxspecs:
3872
      if first:
3873
        buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3874
        first = False
3875
      else:
3876
        buf.write("//")
3877
      buf.write("min:")
3878
      _PrintSpecsParameters(buf, minspecs)
3879
      buf.write("/max:")
3880
      _PrintSpecsParameters(buf, maxspecs)
3881

    
3882

    
3883
def ConfirmOperation(names, list_type, text, extra=""):
3884
  """Ask the user to confirm an operation on a list of list_type.
3885

3886
  This function is used to request confirmation for doing an operation
3887
  on a given list of list_type.
3888

3889
  @type names: list
3890
  @param names: the list of names that we display when
3891
      we ask for confirmation
3892
  @type list_type: str
3893
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3894
  @type text: str
3895
  @param text: the operation that the user should confirm
3896
  @rtype: boolean
3897
  @return: True or False depending on user's confirmation.
3898

3899
  """
3900
  count = len(names)
3901
  msg = ("The %s will operate on %d %s.\n%s"
3902
         "Do you want to continue?" % (text, count, list_type, extra))
3903
  affected = (("\nAffected %s:\n" % list_type) +
3904
              "\n".join(["  %s" % name for name in names]))
3905

    
3906
  choices = [("y", True, "Yes, execute the %s" % text),
3907
             ("n", False, "No, abort the %s" % text)]
3908

    
3909
  if count > 20:
3910
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3911
    question = msg
3912
  else:
3913
    question = msg + affected
3914

    
3915
  choice = AskUser(question, choices)
3916
  if choice == "v":
3917
    choices.pop(1)
3918
    choice = AskUser(msg + affected, choices)
3919
  return choice
3920

    
3921

    
3922
def _MaybeParseUnit(elements):
3923
  """Parses and returns an array of potential values with units.
3924

3925
  """
3926
  parsed = {}
3927
  for k, v in elements.items():
3928
    if v == constants.VALUE_DEFAULT:
3929
      parsed[k] = v
3930
    else:
3931
      parsed[k] = utils.ParseUnit(v)
3932
  return parsed
3933

    
3934

    
3935
def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3936
                             ispecs_disk_count, ispecs_disk_size,
3937
                             ispecs_nic_count, group_ipolicy, fill_all):
3938
  try:
3939
    if ispecs_mem_size:
3940
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3941
    if ispecs_disk_size:
3942
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3943
  except (TypeError, ValueError, errors.UnitParseError), err:
3944
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3945
                               " in policy: %s" %
3946
                               (ispecs_disk_size, ispecs_mem_size, err),
3947
                               errors.ECODE_INVAL)
3948

    
3949
  # prepare ipolicy dict
3950
  ispecs_transposed = {
3951
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3952
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3953
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3954
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3955
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3956
    }
3957

    
3958
  # first, check that the values given are correct
3959
  if group_ipolicy:
3960
    forced_type = TISPECS_GROUP_TYPES
3961
  else:
3962
    forced_type = TISPECS_CLUSTER_TYPES
3963
  for specs in ispecs_transposed.values():
3964
    assert type(specs) is dict
3965
    utils.ForceDictType(specs, forced_type)
3966

    
3967
  # then transpose
3968
  ispecs = {
3969
    constants.ISPECS_MIN: {},
3970
    constants.ISPECS_MAX: {},
3971
    constants.ISPECS_STD: {},
3972
    }
3973
  for (name, specs) in ispecs_transposed.iteritems():
3974
    assert name in constants.ISPECS_PARAMETERS
3975
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3976
      assert key in ispecs
3977
      ispecs[key][name] = val
3978
  minmax_out = {}
3979
  for key in constants.ISPECS_MINMAX_KEYS:
3980
    if fill_all:
3981
      minmax_out[key] = \
3982
        objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3983
    else:
3984
      minmax_out[key] = ispecs[key]
3985
  ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3986
  if fill_all:
3987
    ipolicy[constants.ISPECS_STD] = \
3988
        objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3989
                         ispecs[constants.ISPECS_STD])
3990
  else:
3991
    ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3992

    
3993

    
3994
def _ParseSpecUnit(spec, keyname):
3995
  ret = spec.copy()
3996
  for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3997
    if k in ret:
3998
      try:
3999
        ret[k] = utils.ParseUnit(ret[k])
4000
      except (TypeError, ValueError, errors.UnitParseError), err:
4001
        raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
4002
                                    " specs: %s" % (k, ret[k], keyname, err)),
4003
                                   errors.ECODE_INVAL)
4004
  return ret
4005

    
4006

    
4007
def _ParseISpec(spec, keyname, required):
4008
  ret = _ParseSpecUnit(spec, keyname)
4009
  utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
4010
  missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
4011
  if required and missing:
4012
    raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
4013
                               (keyname, utils.CommaJoin(missing)),
4014
                               errors.ECODE_INVAL)
4015
  return ret
4016

    
4017

    
4018
def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
4019
  ret = None
4020
  if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
4021
      len(minmax_ispecs[0]) == 1):
4022
    for (key, spec) in minmax_ispecs[0].items():
4023
      # This loop is executed exactly once
4024
      if key in allowed_values and not spec:
4025
        ret = key
4026
  return ret
4027

    
4028

    
4029
def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4030
                            group_ipolicy, allowed_values):
4031
  found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
4032
  if found_allowed is not None:
4033
    ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
4034
  elif minmax_ispecs is not None:
4035
    minmax_out = []
4036
    for mmpair in minmax_ispecs:
4037
      mmpair_out = {}
4038
      for (key, spec) in mmpair.items():
4039
        if key not in constants.ISPECS_MINMAX_KEYS:
4040
          msg = "Invalid key in bounds instance specifications: %s" % key
4041
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
4042
        mmpair_out[key] = _ParseISpec(spec, key, True)
4043
      minmax_out.append(mmpair_out)
4044
    ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
4045
  if std_ispecs is not None:
4046
    assert not group_ipolicy # This is not an option for gnt-group
4047
    ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
4048

    
4049

    
4050
def CreateIPolicyFromOpts(ispecs_mem_size=None,
4051
                          ispecs_cpu_count=None,
4052
                          ispecs_disk_count=None,
4053
                          ispecs_disk_size=None,
4054
                          ispecs_nic_count=None,
4055
                          minmax_ispecs=None,
4056
                          std_ispecs=None,
4057
                          ipolicy_disk_templates=None,
4058
                          ipolicy_vcpu_ratio=None,
4059
                          ipolicy_spindle_ratio=None,
4060
                          group_ipolicy=False,
4061
                          allowed_values=None,
4062
                          fill_all=False):
4063
  """Creation of instance policy based on command line options.
4064

4065
  @param fill_all: whether for cluster policies we should ensure that
4066
    all values are filled
4067

4068
  """
4069
  assert not (fill_all and allowed_values)
4070

    
4071
  split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4072
                 ispecs_disk_size or ispecs_nic_count)
4073
  if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4074
    raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4075
                               " together with any --ipolicy-xxx-specs option",
4076
                               errors.ECODE_INVAL)
4077

    
4078
  ipolicy_out = objects.MakeEmptyIPolicy()
4079
  if split_specs:
4080
    assert fill_all
4081
    _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4082
                             ispecs_disk_count, ispecs_disk_size,
4083
                             ispecs_nic_count, group_ipolicy, fill_all)
4084
  elif (minmax_ispecs is not None or std_ispecs is not None):
4085
    _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4086
                            group_ipolicy, allowed_values)
4087

    
4088
  if ipolicy_disk_templates is not None:
4089
    if allowed_values and ipolicy_disk_templates in allowed_values:
4090
      ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4091
    else:
4092
      ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4093
  if ipolicy_vcpu_ratio is not None:
4094
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4095
  if ipolicy_spindle_ratio is not None:
4096
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4097

    
4098
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4099

    
4100
  if not group_ipolicy and fill_all:
4101
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4102

    
4103
  return ipolicy_out
4104

    
4105

    
4106
def _SerializeGenericInfo(buf, data, level, afterkey=False):
4107
  """Formatting core of L{PrintGenericInfo}.
4108

4109
  @param buf: (string) stream to accumulate the result into
4110
  @param data: data to format
4111
  @type level: int
4112
  @param level: depth in the data hierarchy, used for indenting
4113
  @type afterkey: bool
4114
  @param afterkey: True when we are in the middle of a line after a key (used
4115
      to properly add newlines or indentation)
4116

4117
  """
4118
  baseind = "  "
4119
  if isinstance(data, dict):
4120
    if not data:
4121
      buf.write("\n")
4122
    else:
4123
      if afterkey:
4124
        buf.write("\n")
4125
        doindent = True
4126
      else:
4127
        doindent = False
4128
      for key in sorted(data):
4129
        if doindent:
4130
          buf.write(baseind * level)
4131
        else:
4132
          doindent = True
4133
        buf.write(key)
4134
        buf.write(": ")
4135
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4136
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4137
    # list of tuples (an ordered dictionary)
4138
    if afterkey:
4139
      buf.write("\n")
4140
      doindent = True
4141
    else:
4142
      doindent = False
4143
    for (key, val) in data:
4144
      if doindent:
4145
        buf.write(baseind * level)
4146
      else:
4147
        doindent = True
4148
      buf.write(key)
4149
      buf.write(": ")
4150
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4151
  elif isinstance(data, list):
4152
    if not data:
4153
      buf.write("\n")
4154
    else:
4155
      if afterkey:
4156
        buf.write("\n")
4157
        doindent = True
4158
      else:
4159
        doindent = False
4160
      for item in data:
4161
        if doindent:
4162
          buf.write(baseind * level)
4163
        else:
4164
          doindent = True
4165
        buf.write("-")
4166
        buf.write(baseind[1:])
4167
        _SerializeGenericInfo(buf, item, level + 1)
4168
  else:
4169
    # This branch should be only taken for strings, but it's practically
4170
    # impossible to guarantee that no other types are produced somewhere
4171
    buf.write(str(data))
4172
    buf.write("\n")
4173

    
4174

    
4175
def PrintGenericInfo(data):
4176
  """Print information formatted according to the hierarchy.
4177

4178
  The output is a valid YAML string.
4179

4180
  @param data: the data to print. It's a hierarchical structure whose elements
4181
      can be:
4182
        - dictionaries, where keys are strings and values are of any of the
4183
          types listed here
4184
        - lists of pairs (key, value), where key is a string and value is of
4185
          any of the types listed here; it's a way to encode ordered
4186
          dictionaries
4187
        - lists of any of the types listed here
4188
        - strings
4189

4190
  """
4191
  buf = StringIO()
4192
  _SerializeGenericInfo(buf, data, 0)
4193
  ToStdout(buf.getvalue().rstrip("\n"))