Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 653bc0f1

History | View | Annotate | Download (138.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
import ganeti.rpc.errors as rpcerr
40
import ganeti.rpc.node as rpc
41
from ganeti import ssh
42
from ganeti import compat
43
from ganeti import netutils
44
from ganeti import qlang
45
from ganeti import objects
46
from ganeti import pathutils
47

    
48
from ganeti.runtime import (GetClient)
49

    
50
from optparse import (OptionParser, TitledHelpFormatter,
51
                      Option, OptionValueError)
52

    
53

    
54
__all__ = [
55
  # Command line options
56
  "ABSOLUTE_OPT",
57
  "ADD_UIDS_OPT",
58
  "ADD_RESERVED_IPS_OPT",
59
  "ALLOCATABLE_OPT",
60
  "ALLOC_POLICY_OPT",
61
  "ALL_OPT",
62
  "ALLOW_FAILOVER_OPT",
63
  "AUTO_PROMOTE_OPT",
64
  "AUTO_REPLACE_OPT",
65
  "BACKEND_OPT",
66
  "BLK_OS_OPT",
67
  "CAPAB_MASTER_OPT",
68
  "CAPAB_VM_OPT",
69
  "CLEANUP_OPT",
70
  "CLUSTER_DOMAIN_SECRET_OPT",
71
  "CONFIRM_OPT",
72
  "CP_SIZE_OPT",
73
  "DEBUG_OPT",
74
  "DEBUG_SIMERR_OPT",
75
  "DISKIDX_OPT",
76
  "DISK_OPT",
77
  "DISK_PARAMS_OPT",
78
  "DISK_TEMPLATE_OPT",
79
  "DRAINED_OPT",
80
  "DRY_RUN_OPT",
81
  "DRBD_HELPER_OPT",
82
  "DST_NODE_OPT",
83
  "EARLY_RELEASE_OPT",
84
  "ENABLED_HV_OPT",
85
  "ENABLED_DISK_TEMPLATES_OPT",
86
  "ERROR_CODES_OPT",
87
  "FAILURE_ONLY_OPT",
88
  "FIELDS_OPT",
89
  "FILESTORE_DIR_OPT",
90
  "FILESTORE_DRIVER_OPT",
91
  "FORCE_FILTER_OPT",
92
  "FORCE_OPT",
93
  "FORCE_VARIANT_OPT",
94
  "GATEWAY_OPT",
95
  "GATEWAY6_OPT",
96
  "GLOBAL_FILEDIR_OPT",
97
  "HID_OS_OPT",
98
  "GLOBAL_GLUSTER_FILEDIR_OPT",
99
  "GLOBAL_SHARED_FILEDIR_OPT",
100
  "HOTPLUG_OPT",
101
  "HOTPLUG_IF_POSSIBLE_OPT",
102
  "HVLIST_OPT",
103
  "HVOPTS_OPT",
104
  "HYPERVISOR_OPT",
105
  "IALLOCATOR_OPT",
106
  "DEFAULT_IALLOCATOR_OPT",
107
  "DEFAULT_IALLOCATOR_PARAMS_OPT",
108
  "IDENTIFY_DEFAULTS_OPT",
109
  "IGNORE_CONSIST_OPT",
110
  "IGNORE_ERRORS_OPT",
111
  "IGNORE_FAILURES_OPT",
112
  "IGNORE_OFFLINE_OPT",
113
  "IGNORE_REMOVE_FAILURES_OPT",
114
  "IGNORE_SECONDARIES_OPT",
115
  "IGNORE_SIZE_OPT",
116
  "INCLUDEDEFAULTS_OPT",
117
  "INTERVAL_OPT",
118
  "MAC_PREFIX_OPT",
119
  "MAINTAIN_NODE_HEALTH_OPT",
120
  "MASTER_NETDEV_OPT",
121
  "MASTER_NETMASK_OPT",
122
  "MC_OPT",
123
  "MIGRATION_MODE_OPT",
124
  "MODIFY_ETCHOSTS_OPT",
125
  "NET_OPT",
126
  "NETWORK_OPT",
127
  "NETWORK6_OPT",
128
  "NEW_CLUSTER_CERT_OPT",
129
  "NEW_NODE_CERT_OPT",
130
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
131
  "NEW_CONFD_HMAC_KEY_OPT",
132
  "NEW_RAPI_CERT_OPT",
133
  "NEW_PRIMARY_OPT",
134
  "NEW_SECONDARY_OPT",
135
  "NEW_SPICE_CERT_OPT",
136
  "NIC_PARAMS_OPT",
137
  "NOCONFLICTSCHECK_OPT",
138
  "NODE_FORCE_JOIN_OPT",
139
  "NODE_LIST_OPT",
140
  "NODE_PLACEMENT_OPT",
141
  "NODEGROUP_OPT",
142
  "NODE_PARAMS_OPT",
143
  "NODE_POWERED_OPT",
144
  "NOHDR_OPT",
145
  "NOIPCHECK_OPT",
146
  "NO_INSTALL_OPT",
147
  "NONAMECHECK_OPT",
148
  "NOMODIFY_ETCHOSTS_OPT",
149
  "NOMODIFY_SSH_SETUP_OPT",
150
  "NONICS_OPT",
151
  "NONLIVE_OPT",
152
  "NONPLUS1_OPT",
153
  "NORUNTIME_CHGS_OPT",
154
  "NOSHUTDOWN_OPT",
155
  "NOSTART_OPT",
156
  "NOSSH_KEYCHECK_OPT",
157
  "NOVOTING_OPT",
158
  "NO_REMEMBER_OPT",
159
  "NWSYNC_OPT",
160
  "OFFLINE_INST_OPT",
161
  "ONLINE_INST_OPT",
162
  "ON_PRIMARY_OPT",
163
  "ON_SECONDARY_OPT",
164
  "OFFLINE_OPT",
165
  "OSPARAMS_OPT",
166
  "OS_OPT",
167
  "OS_SIZE_OPT",
168
  "OOB_TIMEOUT_OPT",
169
  "POWER_DELAY_OPT",
170
  "PREALLOC_WIPE_DISKS_OPT",
171
  "PRIMARY_IP_VERSION_OPT",
172
  "PRIMARY_ONLY_OPT",
173
  "PRINT_JOBID_OPT",
174
  "PRIORITY_OPT",
175
  "RAPI_CERT_OPT",
176
  "READD_OPT",
177
  "REASON_OPT",
178
  "REBOOT_TYPE_OPT",
179
  "REMOVE_INSTANCE_OPT",
180
  "REMOVE_RESERVED_IPS_OPT",
181
  "REMOVE_UIDS_OPT",
182
  "RESERVED_LVS_OPT",
183
  "RQL_OPT",
184
  "RUNTIME_MEM_OPT",
185
  "ROMAN_OPT",
186
  "SECONDARY_IP_OPT",
187
  "SECONDARY_ONLY_OPT",
188
  "SELECT_OS_OPT",
189
  "SEP_OPT",
190
  "SHOWCMD_OPT",
191
  "SHOW_MACHINE_OPT",
192
  "COMPRESS_OPT",
193
  "SHUTDOWN_TIMEOUT_OPT",
194
  "SINGLE_NODE_OPT",
195
  "SPECS_CPU_COUNT_OPT",
196
  "SPECS_DISK_COUNT_OPT",
197
  "SPECS_DISK_SIZE_OPT",
198
  "SPECS_MEM_SIZE_OPT",
199
  "SPECS_NIC_COUNT_OPT",
200
  "SPLIT_ISPECS_OPTS",
201
  "IPOLICY_STD_SPECS_OPT",
202
  "IPOLICY_DISK_TEMPLATES",
203
  "IPOLICY_VCPU_RATIO",
204
  "SPICE_CACERT_OPT",
205
  "SPICE_CERT_OPT",
206
  "SRC_DIR_OPT",
207
  "SRC_NODE_OPT",
208
  "SUBMIT_OPT",
209
  "SUBMIT_OPTS",
210
  "STARTUP_PAUSED_OPT",
211
  "STATIC_OPT",
212
  "SYNC_OPT",
213
  "TAG_ADD_OPT",
214
  "TAG_SRC_OPT",
215
  "TIMEOUT_OPT",
216
  "TO_GROUP_OPT",
217
  "UIDPOOL_OPT",
218
  "USEUNITS_OPT",
219
  "USE_EXTERNAL_MIP_SCRIPT",
220
  "USE_REPL_NET_OPT",
221
  "VERBOSE_OPT",
222
  "VG_NAME_OPT",
223
  "WFSYNC_OPT",
224
  "YES_DOIT_OPT",
225
  "DISK_STATE_OPT",
226
  "HV_STATE_OPT",
227
  "IGNORE_IPOLICY_OPT",
228
  "INSTANCE_POLICY_OPTS",
229
  # Generic functions for CLI programs
230
  "ConfirmOperation",
231
  "CreateIPolicyFromOpts",
232
  "GenericMain",
233
  "GenericInstanceCreate",
234
  "GenericList",
235
  "GenericListFields",
236
  "GetClient",
237
  "GetOnlineNodes",
238
  "GetNodesSshPorts",
239
  "JobExecutor",
240
  "JobSubmittedException",
241
  "ParseTimespec",
242
  "RunWhileClusterStopped",
243
  "SubmitOpCode",
244
  "SubmitOpCodeToDrainedQueue",
245
  "SubmitOrSend",
246
  "UsesRPC",
247
  # Formatting functions
248
  "ToStderr", "ToStdout",
249
  "FormatError",
250
  "FormatQueryResult",
251
  "FormatParamsDictInfo",
252
  "FormatPolicyInfo",
253
  "PrintIPolicyCommand",
254
  "PrintGenericInfo",
255
  "GenerateTable",
256
  "AskUser",
257
  "FormatTimestamp",
258
  "FormatLogMessage",
259
  # Tags functions
260
  "ListTags",
261
  "AddTags",
262
  "RemoveTags",
263
  # command line options support infrastructure
264
  "ARGS_MANY_INSTANCES",
265
  "ARGS_MANY_NODES",
266
  "ARGS_MANY_GROUPS",
267
  "ARGS_MANY_NETWORKS",
268
  "ARGS_NONE",
269
  "ARGS_ONE_INSTANCE",
270
  "ARGS_ONE_NODE",
271
  "ARGS_ONE_GROUP",
272
  "ARGS_ONE_OS",
273
  "ARGS_ONE_NETWORK",
274
  "ArgChoice",
275
  "ArgCommand",
276
  "ArgFile",
277
  "ArgGroup",
278
  "ArgHost",
279
  "ArgInstance",
280
  "ArgJobId",
281
  "ArgNetwork",
282
  "ArgNode",
283
  "ArgOs",
284
  "ArgExtStorage",
285
  "ArgSuggest",
286
  "ArgUnknown",
287
  "OPT_COMPL_INST_ADD_NODES",
288
  "OPT_COMPL_MANY_NODES",
289
  "OPT_COMPL_ONE_IALLOCATOR",
290
  "OPT_COMPL_ONE_INSTANCE",
291
  "OPT_COMPL_ONE_NODE",
292
  "OPT_COMPL_ONE_NODEGROUP",
293
  "OPT_COMPL_ONE_NETWORK",
294
  "OPT_COMPL_ONE_OS",
295
  "OPT_COMPL_ONE_EXTSTORAGE",
296
  "cli_option",
297
  "FixHvParams",
298
  "SplitNodeOption",
299
  "CalculateOSNames",
300
  "ParseFields",
301
  "COMMON_CREATE_OPTS",
302
  ]
303

    
304
NO_PREFIX = "no_"
305
UN_PREFIX = "-"
306

    
307
#: Priorities (sorted)
308
_PRIORITY_NAMES = [
309
  ("low", constants.OP_PRIO_LOW),
310
  ("normal", constants.OP_PRIO_NORMAL),
311
  ("high", constants.OP_PRIO_HIGH),
312
  ]
313

    
314
#: Priority dictionary for easier lookup
315
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
316
# we migrate to Python 2.6
317
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
318

    
319
# Query result status for clients
320
(QR_NORMAL,
321
 QR_UNKNOWN,
322
 QR_INCOMPLETE) = range(3)
323

    
324
#: Maximum batch size for ChooseJob
325
_CHOOSE_BATCH = 25
326

    
327

    
328
# constants used to create InstancePolicy dictionary
329
TISPECS_GROUP_TYPES = {
330
  constants.ISPECS_MIN: constants.VTYPE_INT,
331
  constants.ISPECS_MAX: constants.VTYPE_INT,
332
  }
333

    
334
TISPECS_CLUSTER_TYPES = {
335
  constants.ISPECS_MIN: constants.VTYPE_INT,
336
  constants.ISPECS_MAX: constants.VTYPE_INT,
337
  constants.ISPECS_STD: constants.VTYPE_INT,
338
  }
339

    
340
#: User-friendly names for query2 field types
341
_QFT_NAMES = {
342
  constants.QFT_UNKNOWN: "Unknown",
343
  constants.QFT_TEXT: "Text",
344
  constants.QFT_BOOL: "Boolean",
345
  constants.QFT_NUMBER: "Number",
346
  constants.QFT_UNIT: "Storage size",
347
  constants.QFT_TIMESTAMP: "Timestamp",
348
  constants.QFT_OTHER: "Custom",
349
  }
350

    
351

    
352
class _Argument:
353
  def __init__(self, min=0, max=None): # pylint: disable=W0622
354
    self.min = min
355
    self.max = max
356

    
357
  def __repr__(self):
358
    return ("<%s min=%s max=%s>" %
359
            (self.__class__.__name__, self.min, self.max))
360

    
361

    
362
class ArgSuggest(_Argument):
363
  """Suggesting argument.
364

365
  Value can be any of the ones passed to the constructor.
366

367
  """
368
  # pylint: disable=W0622
369
  def __init__(self, min=0, max=None, choices=None):
370
    _Argument.__init__(self, min=min, max=max)
371
    self.choices = choices
372

    
373
  def __repr__(self):
374
    return ("<%s min=%s max=%s choices=%r>" %
375
            (self.__class__.__name__, self.min, self.max, self.choices))
376

    
377

    
378
class ArgChoice(ArgSuggest):
379
  """Choice argument.
380

381
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
382
  but value must be one of the choices.
383

384
  """
385

    
386

    
387
class ArgUnknown(_Argument):
388
  """Unknown argument to program (e.g. determined at runtime).
389

390
  """
391

    
392

    
393
class ArgInstance(_Argument):
394
  """Instances argument.
395

396
  """
397

    
398

    
399
class ArgNode(_Argument):
400
  """Node argument.
401

402
  """
403

    
404

    
405
class ArgNetwork(_Argument):
406
  """Network argument.
407

408
  """
409

    
410

    
411
class ArgGroup(_Argument):
412
  """Node group argument.
413

414
  """
415

    
416

    
417
class ArgJobId(_Argument):
418
  """Job ID argument.
419

420
  """
421

    
422

    
423
class ArgFile(_Argument):
424
  """File path argument.
425

426
  """
427

    
428

    
429
class ArgCommand(_Argument):
430
  """Command argument.
431

432
  """
433

    
434

    
435
class ArgHost(_Argument):
436
  """Host argument.
437

438
  """
439

    
440

    
441
class ArgOs(_Argument):
442
  """OS argument.
443

444
  """
445

    
446

    
447
class ArgExtStorage(_Argument):
448
  """ExtStorage argument.
449

450
  """
451

    
452

    
453
ARGS_NONE = []
454
ARGS_MANY_INSTANCES = [ArgInstance()]
455
ARGS_MANY_NETWORKS = [ArgNetwork()]
456
ARGS_MANY_NODES = [ArgNode()]
457
ARGS_MANY_GROUPS = [ArgGroup()]
458
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
459
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
460
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
461
# TODO
462
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
463
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
464

    
465

    
466
def _ExtractTagsObject(opts, args):
467
  """Extract the tag type object.
468

469
  Note that this function will modify its args parameter.
470

471
  """
472
  if not hasattr(opts, "tag_type"):
473
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
474
  kind = opts.tag_type
475
  if kind == constants.TAG_CLUSTER:
476
    retval = kind, ""
477
  elif kind in (constants.TAG_NODEGROUP,
478
                constants.TAG_NODE,
479
                constants.TAG_NETWORK,
480
                constants.TAG_INSTANCE):
481
    if not args:
482
      raise errors.OpPrereqError("no arguments passed to the command",
483
                                 errors.ECODE_INVAL)
484
    name = args.pop(0)
485
    retval = kind, name
486
  else:
487
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
488
  return retval
489

    
490

    
491
def _ExtendTags(opts, args):
492
  """Extend the args if a source file has been given.
493

494
  This function will extend the tags with the contents of the file
495
  passed in the 'tags_source' attribute of the opts parameter. A file
496
  named '-' will be replaced by stdin.
497

498
  """
499
  fname = opts.tags_source
500
  if fname is None:
501
    return
502
  if fname == "-":
503
    new_fh = sys.stdin
504
  else:
505
    new_fh = open(fname, "r")
506
  new_data = []
507
  try:
508
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
509
    # because of python bug 1633941
510
    while True:
511
      line = new_fh.readline()
512
      if not line:
513
        break
514
      new_data.append(line.strip())
515
  finally:
516
    new_fh.close()
517
  args.extend(new_data)
518

    
519

    
520
def ListTags(opts, args):
521
  """List the tags on a given object.
522

523
  This is a generic implementation that knows how to deal with all
524
  three cases of tag objects (cluster, node, instance). The opts
525
  argument is expected to contain a tag_type field denoting what
526
  object type we work on.
527

528
  """
529
  kind, name = _ExtractTagsObject(opts, args)
530
  cl = GetClient(query=True)
531
  result = cl.QueryTags(kind, name)
532
  result = list(result)
533
  result.sort()
534
  for tag in result:
535
    ToStdout(tag)
536

    
537

    
538
def AddTags(opts, args):
539
  """Add tags on a given object.
540

541
  This is a generic implementation that knows how to deal with all
542
  three cases of tag objects (cluster, node, instance). The opts
543
  argument is expected to contain a tag_type field denoting what
544
  object type we work on.
545

546
  """
547
  kind, name = _ExtractTagsObject(opts, args)
548
  _ExtendTags(opts, args)
549
  if not args:
550
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
551
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
552
  SubmitOrSend(op, opts)
553

    
554

    
555
def RemoveTags(opts, args):
556
  """Remove tags from a given object.
557

558
  This is a generic implementation that knows how to deal with all
559
  three cases of tag objects (cluster, node, instance). The opts
560
  argument is expected to contain a tag_type field denoting what
561
  object type we work on.
562

563
  """
564
  kind, name = _ExtractTagsObject(opts, args)
565
  _ExtendTags(opts, args)
566
  if not args:
567
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
568
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
569
  SubmitOrSend(op, opts)
570

    
571

    
572
def check_unit(option, opt, value): # pylint: disable=W0613
573
  """OptParsers custom converter for units.
574

575
  """
576
  try:
577
    return utils.ParseUnit(value)
578
  except errors.UnitParseError, err:
579
    raise OptionValueError("option %s: %s" % (opt, err))
580

    
581

    
582
def _SplitKeyVal(opt, data, parse_prefixes):
583
  """Convert a KeyVal string into a dict.
584

585
  This function will convert a key=val[,...] string into a dict. Empty
586
  values will be converted specially: keys which have the prefix 'no_'
587
  will have the value=False and the prefix stripped, keys with the prefix
588
  "-" will have value=None and the prefix stripped, and the others will
589
  have value=True.
590

591
  @type opt: string
592
  @param opt: a string holding the option name for which we process the
593
      data, used in building error messages
594
  @type data: string
595
  @param data: a string of the format key=val,key=val,...
596
  @type parse_prefixes: bool
597
  @param parse_prefixes: whether to handle prefixes specially
598
  @rtype: dict
599
  @return: {key=val, key=val}
600
  @raises errors.ParameterError: if there are duplicate keys
601

602
  """
603
  kv_dict = {}
604
  if data:
605
    for elem in utils.UnescapeAndSplit(data, sep=","):
606
      if "=" in elem:
607
        key, val = elem.split("=", 1)
608
      elif parse_prefixes:
609
        if elem.startswith(NO_PREFIX):
610
          key, val = elem[len(NO_PREFIX):], False
611
        elif elem.startswith(UN_PREFIX):
612
          key, val = elem[len(UN_PREFIX):], None
613
        else:
614
          key, val = elem, True
615
      else:
616
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
617
                                    (elem, opt))
618
      if key in kv_dict:
619
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
620
                                    (key, opt))
621
      kv_dict[key] = val
622
  return kv_dict
623

    
624

    
625
def _SplitIdentKeyVal(opt, value, parse_prefixes):
626
  """Helper function to parse "ident:key=val,key=val" options.
627

628
  @type opt: string
629
  @param opt: option name, used in error messages
630
  @type value: string
631
  @param value: expected to be in the format "ident:key=val,key=val,..."
632
  @type parse_prefixes: bool
633
  @param parse_prefixes: whether to handle prefixes specially (see
634
      L{_SplitKeyVal})
635
  @rtype: tuple
636
  @return: (ident, {key=val, key=val})
637
  @raises errors.ParameterError: in case of duplicates or other parsing errors
638

639
  """
640
  if ":" not in value:
641
    ident, rest = value, ""
642
  else:
643
    ident, rest = value.split(":", 1)
644

    
645
  if parse_prefixes and ident.startswith(NO_PREFIX):
646
    if rest:
647
      msg = "Cannot pass options when removing parameter groups: %s" % value
648
      raise errors.ParameterError(msg)
649
    retval = (ident[len(NO_PREFIX):], False)
650
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
651
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
652
    if rest:
653
      msg = "Cannot pass options when removing parameter groups: %s" % value
654
      raise errors.ParameterError(msg)
655
    retval = (ident[len(UN_PREFIX):], None)
656
  else:
657
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
658
    retval = (ident, kv_dict)
659
  return retval
660

    
661

    
662
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
663
  """Custom parser for ident:key=val,key=val options.
664

665
  This will store the parsed values as a tuple (ident, {key: val}). As such,
666
  multiple uses of this option via action=append is possible.
667

668
  """
669
  return _SplitIdentKeyVal(opt, value, True)
670

    
671

    
672
def check_key_val(option, opt, value):  # pylint: disable=W0613
673
  """Custom parser class for key=val,key=val options.
674

675
  This will store the parsed values as a dict {key: val}.
676

677
  """
678
  return _SplitKeyVal(opt, value, True)
679

    
680

    
681
def _SplitListKeyVal(opt, value):
682
  retval = {}
683
  for elem in value.split("/"):
684
    if not elem:
685
      raise errors.ParameterError("Empty section in option '%s'" % opt)
686
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
687
    if ident in retval:
688
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
689
             (ident, opt, elem))
690
      raise errors.ParameterError(msg)
691
    retval[ident] = valdict
692
  return retval
693

    
694

    
695
def check_multilist_ident_key_val(_, opt, value):
696
  """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
697

698
  @rtype: list of dictionary
699
  @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
700

701
  """
702
  retval = []
703
  for line in value.split("//"):
704
    retval.append(_SplitListKeyVal(opt, line))
705
  return retval
706

    
707

    
708
def check_bool(option, opt, value): # pylint: disable=W0613
709
  """Custom parser for yes/no options.
710

711
  This will store the parsed value as either True or False.
712

713
  """
714
  value = value.lower()
715
  if value == constants.VALUE_FALSE or value == "no":
716
    return False
717
  elif value == constants.VALUE_TRUE or value == "yes":
718
    return True
719
  else:
720
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
721

    
722

    
723
def check_list(option, opt, value): # pylint: disable=W0613
724
  """Custom parser for comma-separated lists.
725

726
  """
727
  # we have to make this explicit check since "".split(",") is [""],
728
  # not an empty list :(
729
  if not value:
730
    return []
731
  else:
732
    return utils.UnescapeAndSplit(value)
733

    
734

    
735
def check_maybefloat(option, opt, value): # pylint: disable=W0613
736
  """Custom parser for float numbers which might be also defaults.
737

738
  """
739
  value = value.lower()
740

    
741
  if value == constants.VALUE_DEFAULT:
742
    return value
743
  else:
744
    return float(value)
745

    
746

    
747
# completion_suggestion is normally a list. Using numeric values not evaluating
748
# to False for dynamic completion.
749
(OPT_COMPL_MANY_NODES,
750
 OPT_COMPL_ONE_NODE,
751
 OPT_COMPL_ONE_INSTANCE,
752
 OPT_COMPL_ONE_OS,
753
 OPT_COMPL_ONE_EXTSTORAGE,
754
 OPT_COMPL_ONE_IALLOCATOR,
755
 OPT_COMPL_ONE_NETWORK,
756
 OPT_COMPL_INST_ADD_NODES,
757
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
758

    
759
OPT_COMPL_ALL = compat.UniqueFrozenset([
760
  OPT_COMPL_MANY_NODES,
761
  OPT_COMPL_ONE_NODE,
762
  OPT_COMPL_ONE_INSTANCE,
763
  OPT_COMPL_ONE_OS,
764
  OPT_COMPL_ONE_EXTSTORAGE,
765
  OPT_COMPL_ONE_IALLOCATOR,
766
  OPT_COMPL_ONE_NETWORK,
767
  OPT_COMPL_INST_ADD_NODES,
768
  OPT_COMPL_ONE_NODEGROUP,
769
  ])
770

    
771

    
772
class CliOption(Option):
773
  """Custom option class for optparse.
774

775
  """
776
  ATTRS = Option.ATTRS + [
777
    "completion_suggest",
778
    ]
779
  TYPES = Option.TYPES + (
780
    "multilistidentkeyval",
781
    "identkeyval",
782
    "keyval",
783
    "unit",
784
    "bool",
785
    "list",
786
    "maybefloat",
787
    )
788
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
789
  TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
790
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
791
  TYPE_CHECKER["keyval"] = check_key_val
792
  TYPE_CHECKER["unit"] = check_unit
793
  TYPE_CHECKER["bool"] = check_bool
794
  TYPE_CHECKER["list"] = check_list
795
  TYPE_CHECKER["maybefloat"] = check_maybefloat
796

    
797

    
798
# optparse.py sets make_option, so we do it for our own option class, too
799
cli_option = CliOption
800

    
801

    
802
_YORNO = "yes|no"
803

    
804
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
805
                       help="Increase debugging level")
806

    
807
NOHDR_OPT = cli_option("--no-headers", default=False,
808
                       action="store_true", dest="no_headers",
809
                       help="Don't display column headers")
810

    
811
SEP_OPT = cli_option("--separator", default=None,
812
                     action="store", dest="separator",
813
                     help=("Separator between output fields"
814
                           " (defaults to one space)"))
815

    
816
USEUNITS_OPT = cli_option("--units", default=None,
817
                          dest="units", choices=("h", "m", "g", "t"),
818
                          help="Specify units for output (one of h/m/g/t)")
819

    
820
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
821
                        type="string", metavar="FIELDS",
822
                        help="Comma separated list of output fields")
823

    
824
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
825
                       default=False, help="Force the operation")
826

    
827
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
828
                         default=False, help="Do not require confirmation")
829

    
830
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
831
                                  action="store_true", default=False,
832
                                  help=("Ignore offline nodes and do as much"
833
                                        " as possible"))
834

    
835
TAG_ADD_OPT = cli_option("--tags", dest="tags",
836
                         default=None, help="Comma-separated list of instance"
837
                                            " tags")
838

    
839
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
840
                         default=None, help="File with tag names")
841

    
842
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
843
                        default=False, action="store_true",
844
                        help=("Submit the job and return the job ID, but"
845
                              " don't wait for the job to finish"))
846

    
847
PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
848
                             default=False, action="store_true",
849
                             help=("Additionally print the job as first line"
850
                                   " on stdout (for scripting)."))
851

    
852
SYNC_OPT = cli_option("--sync", dest="do_locking",
853
                      default=False, action="store_true",
854
                      help=("Grab locks while doing the queries"
855
                            " in order to ensure more consistent results"))
856

    
857
DRY_RUN_OPT = cli_option("--dry-run", default=False,
858
                         action="store_true",
859
                         help=("Do not execute the operation, just run the"
860
                               " check steps and verify if it could be"
861
                               " executed"))
862

    
863
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
864
                         action="store_true",
865
                         help="Increase the verbosity of the operation")
866

    
867
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
868
                              action="store_true", dest="simulate_errors",
869
                              help="Debugging option that makes the operation"
870
                              " treat most runtime checks as failed")
871

    
872
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
873
                        default=True, action="store_false",
874
                        help="Don't wait for sync (DANGEROUS!)")
875

    
876
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
877
                        default=False, action="store_true",
878
                        help="Wait for disks to sync")
879

    
880
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
881
                             action="store_true", default=False,
882
                             help="Enable offline instance")
883

    
884
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
885
                              action="store_true", default=False,
886
                              help="Disable down instance")
887

    
888
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
889
                               help=("Custom disk setup (%s)" %
890
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
891
                               default=None, metavar="TEMPL",
892
                               choices=list(constants.DISK_TEMPLATES))
893

    
894
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
895
                        help="Do not create any network cards for"
896
                        " the instance")
897

    
898
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
899
                               help="Relative path under default cluster-wide"
900
                               " file storage dir to store file-based disks",
901
                               default=None, metavar="<DIR>")
902

    
903
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
904
                                  help="Driver to use for image files",
905
                                  default=None, metavar="<DRIVER>",
906
                                  choices=list(constants.FILE_DRIVER))
907

    
908
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
909
                            help="Select nodes for the instance automatically"
910
                            " using the <NAME> iallocator plugin",
911
                            default=None, type="string",
912
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
913

    
914
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
915
                                    metavar="<NAME>",
916
                                    help="Set the default instance"
917
                                    " allocator plugin",
918
                                    default=None, type="string",
919
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
920

    
921
DEFAULT_IALLOCATOR_PARAMS_OPT = cli_option("--default-iallocator-params",
922
                                           dest="default_iallocator_params",
923
                                           help="iallocator template"
924
                                           " parameters, in the format"
925
                                           " template:option=value,"
926
                                           " option=value,...",
927
                                           type="keyval",
928
                                           default={})
929

    
930
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
931
                    metavar="<os>",
932
                    completion_suggest=OPT_COMPL_ONE_OS)
933

    
934
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
935
                          type="keyval", default={},
936
                          help="OS parameters")
937

    
938
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
939
                               action="store_true", default=False,
940
                               help="Force an unknown variant")
941

    
942
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
943
                            action="store_true", default=False,
944
                            help="Do not install the OS (will"
945
                            " enable no-start)")
946

    
947
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
948
                                dest="allow_runtime_chgs",
949
                                default=True, action="store_false",
950
                                help="Don't allow runtime changes")
951

    
952
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
953
                         type="keyval", default={},
954
                         help="Backend parameters")
955

    
956
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
957
                        default={}, dest="hvparams",
958
                        help="Hypervisor parameters")
959

    
960
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
961
                             help="Disk template parameters, in the format"
962
                             " template:option=value,option=value,...",
963
                             type="identkeyval", action="append", default=[])
964

    
965
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
966
                                 type="keyval", default={},
967
                                 help="Memory size specs: list of key=value,"
968
                                " where key is one of min, max, std"
969
                                 " (in MB or using a unit)")
970

    
971
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
972
                                 type="keyval", default={},
973
                                 help="CPU count specs: list of key=value,"
974
                                 " where key is one of min, max, std")
975

    
976
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
977
                                  dest="ispecs_disk_count",
978
                                  type="keyval", default={},
979
                                  help="Disk count specs: list of key=value,"
980
                                  " where key is one of min, max, std")
981

    
982
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
983
                                 type="keyval", default={},
984
                                 help="Disk size specs: list of key=value,"
985
                                 " where key is one of min, max, std"
986
                                 " (in MB or using a unit)")
987

    
988
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
989
                                 type="keyval", default={},
990
                                 help="NIC count specs: list of key=value,"
991
                                 " where key is one of min, max, std")
992

    
993
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
994
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
995
                                      dest="ipolicy_bounds_specs",
996
                                      type="multilistidentkeyval", default=None,
997
                                      help="Complete instance specs limits")
998

    
999
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
1000
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
1001
                                   dest="ipolicy_std_specs",
1002
                                   type="keyval", default=None,
1003
                                   help="Complte standard instance specs")
1004

    
1005
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
1006
                                    dest="ipolicy_disk_templates",
1007
                                    type="list", default=None,
1008
                                    help="Comma-separated list of"
1009
                                    " enabled disk templates")
1010

    
1011
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
1012
                                 dest="ipolicy_vcpu_ratio",
1013
                                 type="maybefloat", default=None,
1014
                                 help="The maximum allowed vcpu-to-cpu ratio")
1015

    
1016
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
1017
                                   dest="ipolicy_spindle_ratio",
1018
                                   type="maybefloat", default=None,
1019
                                   help=("The maximum allowed instances to"
1020
                                         " spindle ratio"))
1021

    
1022
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1023
                            help="Hypervisor and hypervisor options, in the"
1024
                            " format hypervisor:option=value,option=value,...",
1025
                            default=None, type="identkeyval")
1026

    
1027
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1028
                        help="Hypervisor and hypervisor options, in the"
1029
                        " format hypervisor:option=value,option=value,...",
1030
                        default=[], action="append", type="identkeyval")
1031

    
1032
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1033
                           action="store_false",
1034
                           help="Don't check that the instance's IP"
1035
                           " is alive")
1036

    
1037
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1038
                             default=True, action="store_false",
1039
                             help="Don't check that the instance's name"
1040
                             " is resolvable")
1041

    
1042
NET_OPT = cli_option("--net",
1043
                     help="NIC parameters", default=[],
1044
                     dest="nics", action="append", type="identkeyval")
1045

    
1046
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1047
                      dest="disks", action="append", type="identkeyval")
1048

    
1049
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1050
                         help="Comma-separated list of disks"
1051
                         " indices to act on (e.g. 0,2) (optional,"
1052
                         " defaults to all disks)")
1053

    
1054
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1055
                         help="Enforces a single-disk configuration using the"
1056
                         " given disk size, in MiB unless a suffix is used",
1057
                         default=None, type="unit", metavar="<size>")
1058

    
1059
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1060
                                dest="ignore_consistency",
1061
                                action="store_true", default=False,
1062
                                help="Ignore the consistency of the disks on"
1063
                                " the secondary")
1064

    
1065
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1066
                                dest="allow_failover",
1067
                                action="store_true", default=False,
1068
                                help="If migration is not possible fallback to"
1069
                                     " failover")
1070

    
1071
NONLIVE_OPT = cli_option("--non-live", dest="live",
1072
                         default=True, action="store_false",
1073
                         help="Do a non-live migration (this usually means"
1074
                         " freeze the instance, save the state, transfer and"
1075
                         " only then resume running on the secondary node)")
1076

    
1077
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1078
                                default=None,
1079
                                choices=list(constants.HT_MIGRATION_MODES),
1080
                                help="Override default migration mode (choose"
1081
                                " either live or non-live")
1082

    
1083
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1084
                                help="Target node and optional secondary node",
1085
                                metavar="<pnode>[:<snode>]",
1086
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1087

    
1088
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1089
                           action="append", metavar="<node>",
1090
                           help="Use only this node (can be used multiple"
1091
                           " times, if not given defaults to all nodes)",
1092
                           completion_suggest=OPT_COMPL_ONE_NODE)
1093

    
1094
NODEGROUP_OPT_NAME = "--node-group"
1095
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1096
                           dest="nodegroup",
1097
                           help="Node group (name or uuid)",
1098
                           metavar="<nodegroup>",
1099
                           default=None, type="string",
1100
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1101

    
1102
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1103
                             metavar="<node>",
1104
                             completion_suggest=OPT_COMPL_ONE_NODE)
1105

    
1106
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1107
                         action="store_false",
1108
                         help="Don't start the instance after creation")
1109

    
1110
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1111
                         action="store_true", default=False,
1112
                         help="Show command instead of executing it")
1113

    
1114
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1115
                         default=False, action="store_true",
1116
                         help="Instead of performing the migration/failover,"
1117
                         " try to recover from a failed cleanup. This is safe"
1118
                         " to run even if the instance is healthy, but it"
1119
                         " will create extra replication traffic and "
1120
                         " disrupt briefly the replication (like during the"
1121
                         " migration/failover")
1122

    
1123
STATIC_OPT = cli_option("-s", "--static", dest="static",
1124
                        action="store_true", default=False,
1125
                        help="Only show configuration data, not runtime data")
1126

    
1127
ALL_OPT = cli_option("--all", dest="show_all",
1128
                     default=False, action="store_true",
1129
                     help="Show info on all instances on the cluster."
1130
                     " This can take a long time to run, use wisely")
1131

    
1132
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1133
                           action="store_true", default=False,
1134
                           help="Interactive OS reinstall, lists available"
1135
                           " OS templates for selection")
1136

    
1137
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1138
                                 action="store_true", default=False,
1139
                                 help="Remove the instance from the cluster"
1140
                                 " configuration even if there are failures"
1141
                                 " during the removal process")
1142

    
1143
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1144
                                        dest="ignore_remove_failures",
1145
                                        action="store_true", default=False,
1146
                                        help="Remove the instance from the"
1147
                                        " cluster configuration even if there"
1148
                                        " are failures during the removal"
1149
                                        " process")
1150

    
1151
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1152
                                 action="store_true", default=False,
1153
                                 help="Remove the instance from the cluster")
1154

    
1155
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1156
                               help="Specifies the new node for the instance",
1157
                               metavar="NODE", default=None,
1158
                               completion_suggest=OPT_COMPL_ONE_NODE)
1159

    
1160
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1161
                               help="Specifies the new secondary node",
1162
                               metavar="NODE", default=None,
1163
                               completion_suggest=OPT_COMPL_ONE_NODE)
1164

    
1165
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1166
                             help="Specifies the new primary node",
1167
                             metavar="<node>", default=None,
1168
                             completion_suggest=OPT_COMPL_ONE_NODE)
1169

    
1170
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1171
                            default=False, action="store_true",
1172
                            help="Replace the disk(s) on the primary"
1173
                                 " node (applies only to internally mirrored"
1174
                                 " disk templates, e.g. %s)" %
1175
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1176

    
1177
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1178
                              default=False, action="store_true",
1179
                              help="Replace the disk(s) on the secondary"
1180
                                   " node (applies only to internally mirrored"
1181
                                   " disk templates, e.g. %s)" %
1182
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1183

    
1184
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1185
                              default=False, action="store_true",
1186
                              help="Lock all nodes and auto-promote as needed"
1187
                              " to MC status")
1188

    
1189
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1190
                              default=False, action="store_true",
1191
                              help="Automatically replace faulty disks"
1192
                                   " (applies only to internally mirrored"
1193
                                   " disk templates, e.g. %s)" %
1194
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1195

    
1196
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1197
                             default=False, action="store_true",
1198
                             help="Ignore current recorded size"
1199
                             " (useful for forcing activation when"
1200
                             " the recorded size is wrong)")
1201

    
1202
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1203
                          metavar="<node>",
1204
                          completion_suggest=OPT_COMPL_ONE_NODE)
1205

    
1206
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1207
                         metavar="<dir>")
1208

    
1209
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1210
                              help="Specify the secondary ip for the node",
1211
                              metavar="ADDRESS", default=None)
1212

    
1213
READD_OPT = cli_option("--readd", dest="readd",
1214
                       default=False, action="store_true",
1215
                       help="Readd old node after replacing it")
1216

    
1217
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1218
                                default=True, action="store_false",
1219
                                help="Disable SSH key fingerprint checking")
1220

    
1221
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1222
                                 default=False, action="store_true",
1223
                                 help="Force the joining of a node")
1224

    
1225
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1226
                    type="bool", default=None, metavar=_YORNO,
1227
                    help="Set the master_candidate flag on the node")
1228

    
1229
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1230
                         type="bool", default=None,
1231
                         help=("Set the offline flag on the node"
1232
                               " (cluster does not communicate with offline"
1233
                               " nodes)"))
1234

    
1235
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1236
                         type="bool", default=None,
1237
                         help=("Set the drained flag on the node"
1238
                               " (excluded from allocation operations)"))
1239

    
1240
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1241
                              type="bool", default=None, metavar=_YORNO,
1242
                              help="Set the master_capable flag on the node")
1243

    
1244
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1245
                          type="bool", default=None, metavar=_YORNO,
1246
                          help="Set the vm_capable flag on the node")
1247

    
1248
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1249
                             type="bool", default=None, metavar=_YORNO,
1250
                             help="Set the allocatable flag on a volume")
1251

    
1252
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1253
                            dest="enabled_hypervisors",
1254
                            help="Comma-separated list of hypervisors",
1255
                            type="string", default=None)
1256

    
1257
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1258
                                        dest="enabled_disk_templates",
1259
                                        help="Comma-separated list of "
1260
                                             "disk templates",
1261
                                        type="string", default=None)
1262

    
1263
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1264
                            type="keyval", default={},
1265
                            help="NIC parameters")
1266

    
1267
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1268
                         dest="candidate_pool_size", type="int",
1269
                         help="Set the candidate pool size")
1270

    
1271
RQL_OPT = cli_option("--max-running-jobs", dest="max_running_jobs",
1272
                     type="int", help="Set the maximal number of jobs to "
1273
                                      "run simultaneously")
1274

    
1275
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1276
                         help=("Enables LVM and specifies the volume group"
1277
                               " name (cluster-wide) for disk allocation"
1278
                               " [%s]" % constants.DEFAULT_VG),
1279
                         metavar="VG", default=None)
1280

    
1281
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1282
                          help="Destroy cluster", action="store_true")
1283

    
1284
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1285
                          help="Skip node agreement check (dangerous)",
1286
                          action="store_true", default=False)
1287

    
1288
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1289
                            help="Specify the mac prefix for the instance IP"
1290
                            " addresses, in the format XX:XX:XX",
1291
                            metavar="PREFIX",
1292
                            default=None)
1293

    
1294
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1295
                               help="Specify the node interface (cluster-wide)"
1296
                               " on which the master IP address will be added"
1297
                               " (cluster init default: %s)" %
1298
                               constants.DEFAULT_BRIDGE,
1299
                               metavar="NETDEV",
1300
                               default=None)
1301

    
1302
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1303
                                help="Specify the netmask of the master IP",
1304
                                metavar="NETMASK",
1305
                                default=None)
1306

    
1307
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1308
                                     dest="use_external_mip_script",
1309
                                     help="Specify whether to run a"
1310
                                     " user-provided script for the master"
1311
                                     " IP address turnup and"
1312
                                     " turndown operations",
1313
                                     type="bool", metavar=_YORNO, default=None)
1314

    
1315
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1316
                                help="Specify the default directory (cluster-"
1317
                                "wide) for storing the file-based disks [%s]" %
1318
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1319
                                metavar="DIR",
1320
                                default=None)
1321

    
1322
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1323
  "--shared-file-storage-dir",
1324
  dest="shared_file_storage_dir",
1325
  help="Specify the default directory (cluster-wide) for storing the"
1326
  " shared file-based disks [%s]" %
1327
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1328
  metavar="SHAREDDIR", default=None)
1329

    
1330
GLOBAL_GLUSTER_FILEDIR_OPT = cli_option(
1331
  "--gluster-storage-dir",
1332
  dest="gluster_storage_dir",
1333
  help="Specify the default directory (cluster-wide) for mounting Gluster"
1334
  " file systems [%s]" %
1335
  pathutils.DEFAULT_GLUSTER_STORAGE_DIR,
1336
  metavar="GLUSTERDIR",
1337
  default=pathutils.DEFAULT_GLUSTER_STORAGE_DIR)
1338

    
1339
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1340
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1341
                                   action="store_false", default=True)
1342

    
1343
MODIFY_ETCHOSTS_OPT = \
1344
 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1345
            default=None, type="bool",
1346
            help="Defines whether the cluster should autonomously modify"
1347
            " and keep in sync the /etc/hosts file of the nodes")
1348

    
1349
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1350
                                    help="Don't initialize SSH keys",
1351
                                    action="store_false", default=True)
1352

    
1353
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1354
                             help="Enable parseable error messages",
1355
                             action="store_true", default=False)
1356

    
1357
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1358
                          help="Skip N+1 memory redundancy tests",
1359
                          action="store_true", default=False)
1360

    
1361
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1362
                             help="Type of reboot: soft/hard/full",
1363
                             default=constants.INSTANCE_REBOOT_HARD,
1364
                             metavar="<REBOOT>",
1365
                             choices=list(constants.REBOOT_TYPES))
1366

    
1367
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1368
                                    dest="ignore_secondaries",
1369
                                    default=False, action="store_true",
1370
                                    help="Ignore errors from secondaries")
1371

    
1372
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1373
                            action="store_false", default=True,
1374
                            help="Don't shutdown the instance (unsafe)")
1375

    
1376
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1377
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1378
                         help="Maximum time to wait")
1379

    
1380
COMPRESS_OPT = cli_option("--compress", dest="compress",
1381
                          default=constants.IEC_NONE,
1382
                          help="The compression mode to use",
1383
                          choices=list(constants.IEC_ALL))
1384

    
1385
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1386
                                  dest="shutdown_timeout", type="int",
1387
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1388
                                  help="Maximum time to wait for instance"
1389
                                  " shutdown")
1390

    
1391
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1392
                          default=None,
1393
                          help=("Number of seconds between repetions of the"
1394
                                " command"))
1395

    
1396
EARLY_RELEASE_OPT = cli_option("--early-release",
1397
                               dest="early_release", default=False,
1398
                               action="store_true",
1399
                               help="Release the locks on the secondary"
1400
                               " node(s) early")
1401

    
1402
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1403
                                  dest="new_cluster_cert",
1404
                                  default=False, action="store_true",
1405
                                  help="Generate a new cluster certificate")
1406

    
1407
NEW_NODE_CERT_OPT = cli_option(
1408
  "--new-node-certificates", dest="new_node_cert", default=False,
1409
  action="store_true", help="Generate new node certificates (for all nodes)")
1410

    
1411
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1412
                           default=None,
1413
                           help="File containing new RAPI certificate")
1414

    
1415
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1416
                               default=None, action="store_true",
1417
                               help=("Generate a new self-signed RAPI"
1418
                                     " certificate"))
1419

    
1420
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1421
                            default=None,
1422
                            help="File containing new SPICE certificate")
1423

    
1424
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1425
                              default=None,
1426
                              help="File containing the certificate of the CA"
1427
                              " which signed the SPICE certificate")
1428

    
1429
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1430
                                dest="new_spice_cert", default=None,
1431
                                action="store_true",
1432
                                help=("Generate a new self-signed SPICE"
1433
                                      " certificate"))
1434

    
1435
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1436
                                    dest="new_confd_hmac_key",
1437
                                    default=False, action="store_true",
1438
                                    help=("Create a new HMAC key for %s" %
1439
                                          constants.CONFD))
1440

    
1441
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1442
                                       dest="cluster_domain_secret",
1443
                                       default=None,
1444
                                       help=("Load new new cluster domain"
1445
                                             " secret from file"))
1446

    
1447
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1448
                                           dest="new_cluster_domain_secret",
1449
                                           default=False, action="store_true",
1450
                                           help=("Create a new cluster domain"
1451
                                                 " secret"))
1452

    
1453
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1454
                              dest="use_replication_network",
1455
                              help="Whether to use the replication network"
1456
                              " for talking to the nodes",
1457
                              action="store_true", default=False)
1458

    
1459
MAINTAIN_NODE_HEALTH_OPT = \
1460
    cli_option("--maintain-node-health", dest="maintain_node_health",
1461
               metavar=_YORNO, default=None, type="bool",
1462
               help="Configure the cluster to automatically maintain node"
1463
               " health, by shutting down unknown instances, shutting down"
1464
               " unknown DRBD devices, etc.")
1465

    
1466
IDENTIFY_DEFAULTS_OPT = \
1467
    cli_option("--identify-defaults", dest="identify_defaults",
1468
               default=False, action="store_true",
1469
               help="Identify which saved instance parameters are equal to"
1470
               " the current cluster defaults and set them as such, instead"
1471
               " of marking them as overridden")
1472

    
1473
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1474
                         action="store", dest="uid_pool",
1475
                         help=("A list of user-ids or user-id"
1476
                               " ranges separated by commas"))
1477

    
1478
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1479
                          action="store", dest="add_uids",
1480
                          help=("A list of user-ids or user-id"
1481
                                " ranges separated by commas, to be"
1482
                                " added to the user-id pool"))
1483

    
1484
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1485
                             action="store", dest="remove_uids",
1486
                             help=("A list of user-ids or user-id"
1487
                                   " ranges separated by commas, to be"
1488
                                   " removed from the user-id pool"))
1489

    
1490
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1491
                              action="store", dest="reserved_lvs",
1492
                              help=("A comma-separated list of reserved"
1493
                                    " logical volumes names, that will be"
1494
                                    " ignored by cluster verify"))
1495

    
1496
ROMAN_OPT = cli_option("--roman",
1497
                       dest="roman_integers", default=False,
1498
                       action="store_true",
1499
                       help="Use roman numbers for positive integers")
1500

    
1501
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1502
                             action="store", default=None,
1503
                             help="Specifies usermode helper for DRBD")
1504

    
1505
PRIMARY_IP_VERSION_OPT = \
1506
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1507
               action="store", dest="primary_ip_version",
1508
               metavar="%d|%d" % (constants.IP4_VERSION,
1509
                                  constants.IP6_VERSION),
1510
               help="Cluster-wide IP version for primary IP")
1511

    
1512
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1513
                              action="store_true",
1514
                              help="Show machine name for every line in output")
1515

    
1516
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1517
                              action="store_true",
1518
                              help=("Hide successful results and show failures"
1519
                                    " only (determined by the exit code)"))
1520

    
1521
REASON_OPT = cli_option("--reason", default=None,
1522
                        help="The reason for executing the command")
1523

    
1524

    
1525
def _PriorityOptionCb(option, _, value, parser):
1526
  """Callback for processing C{--priority} option.
1527

1528
  """
1529
  value = _PRIONAME_TO_VALUE[value]
1530

    
1531
  setattr(parser.values, option.dest, value)
1532

    
1533

    
1534
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1535
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1536
                          choices=_PRIONAME_TO_VALUE.keys(),
1537
                          action="callback", type="choice",
1538
                          callback=_PriorityOptionCb,
1539
                          help="Priority for opcode processing")
1540

    
1541
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1542
                        type="bool", default=None, metavar=_YORNO,
1543
                        help="Sets the hidden flag on the OS")
1544

    
1545
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1546
                        type="bool", default=None, metavar=_YORNO,
1547
                        help="Sets the blacklisted flag on the OS")
1548

    
1549
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1550
                                     type="bool", metavar=_YORNO,
1551
                                     dest="prealloc_wipe_disks",
1552
                                     help=("Wipe disks prior to instance"
1553
                                           " creation"))
1554

    
1555
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1556
                             type="keyval", default=None,
1557
                             help="Node parameters")
1558

    
1559
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1560
                              action="store", metavar="POLICY", default=None,
1561
                              help="Allocation policy for the node group")
1562

    
1563
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1564
                              type="bool", metavar=_YORNO,
1565
                              dest="node_powered",
1566
                              help="Specify if the SoR for node is powered")
1567

    
1568
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1569
                             default=constants.OOB_TIMEOUT,
1570
                             help="Maximum time to wait for out-of-band helper")
1571

    
1572
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1573
                             default=constants.OOB_POWER_DELAY,
1574
                             help="Time in seconds to wait between power-ons")
1575

    
1576
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1577
                              action="store_true", default=False,
1578
                              help=("Whether command argument should be treated"
1579
                                    " as filter"))
1580

    
1581
NO_REMEMBER_OPT = cli_option("--no-remember",
1582
                             dest="no_remember",
1583
                             action="store_true", default=False,
1584
                             help="Perform but do not record the change"
1585
                             " in the configuration")
1586

    
1587
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1588
                              default=False, action="store_true",
1589
                              help="Evacuate primary instances only")
1590

    
1591
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1592
                                default=False, action="store_true",
1593
                                help="Evacuate secondary instances only"
1594
                                     " (applies only to internally mirrored"
1595
                                     " disk templates, e.g. %s)" %
1596
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1597

    
1598
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1599
                                action="store_true", default=False,
1600
                                help="Pause instance at startup")
1601

    
1602
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1603
                          help="Destination node group (name or uuid)",
1604
                          default=None, action="append",
1605
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1606

    
1607
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1608
                               action="append", dest="ignore_errors",
1609
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1610
                               help="Error code to be ignored")
1611

    
1612
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1613
                            action="append",
1614
                            help=("Specify disk state information in the"
1615
                                  " format"
1616
                                  " storage_type/identifier:option=value,...;"
1617
                                  " note this is unused for now"),
1618
                            type="identkeyval")
1619

    
1620
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1621
                          action="append",
1622
                          help=("Specify hypervisor state information in the"
1623
                                " format hypervisor:option=value,...;"
1624
                                " note this is unused for now"),
1625
                          type="identkeyval")
1626

    
1627
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1628
                                action="store_true", default=False,
1629
                                help="Ignore instance policy violations")
1630

    
1631
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1632
                             help="Sets the instance's runtime memory,"
1633
                             " ballooning it up or down to the new value",
1634
                             default=None, type="unit", metavar="<size>")
1635

    
1636
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1637
                          action="store_true", default=False,
1638
                          help="Marks the grow as absolute instead of the"
1639
                          " (default) relative mode")
1640

    
1641
NETWORK_OPT = cli_option("--network",
1642
                         action="store", default=None, dest="network",
1643
                         help="IP network in CIDR notation")
1644

    
1645
GATEWAY_OPT = cli_option("--gateway",
1646
                         action="store", default=None, dest="gateway",
1647
                         help="IP address of the router (gateway)")
1648

    
1649
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1650
                                  action="store", default=None,
1651
                                  dest="add_reserved_ips",
1652
                                  help="Comma-separated list of"
1653
                                  " reserved IPs to add")
1654

    
1655
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1656
                                     action="store", default=None,
1657
                                     dest="remove_reserved_ips",
1658
                                     help="Comma-delimited list of"
1659
                                     " reserved IPs to remove")
1660

    
1661
NETWORK6_OPT = cli_option("--network6",
1662
                          action="store", default=None, dest="network6",
1663
                          help="IP network in CIDR notation")
1664

    
1665
GATEWAY6_OPT = cli_option("--gateway6",
1666
                          action="store", default=None, dest="gateway6",
1667
                          help="IP6 address of the router (gateway)")
1668

    
1669
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1670
                                  dest="conflicts_check",
1671
                                  default=True,
1672
                                  action="store_false",
1673
                                  help="Don't check for conflicting IPs")
1674

    
1675
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1676
                                 default=False, action="store_true",
1677
                                 help="Include default values")
1678

    
1679
HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1680
                         action="store_true", default=False,
1681
                         help="Hotplug supported devices (NICs and Disks)")
1682

    
1683
HOTPLUG_IF_POSSIBLE_OPT = cli_option("--hotplug-if-possible",
1684
                                     dest="hotplug_if_possible",
1685
                                     action="store_true", default=False,
1686
                                     help="Hotplug devices in case"
1687
                                          " hotplug is supported")
1688

    
1689
#: Options provided by all commands
1690
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1691

    
1692
# options related to asynchronous job handling
1693

    
1694
SUBMIT_OPTS = [
1695
  SUBMIT_OPT,
1696
  PRINT_JOBID_OPT,
1697
  ]
1698

    
1699
# common options for creating instances. add and import then add their own
1700
# specific ones.
1701
COMMON_CREATE_OPTS = [
1702
  BACKEND_OPT,
1703
  DISK_OPT,
1704
  DISK_TEMPLATE_OPT,
1705
  FILESTORE_DIR_OPT,
1706
  FILESTORE_DRIVER_OPT,
1707
  HYPERVISOR_OPT,
1708
  IALLOCATOR_OPT,
1709
  NET_OPT,
1710
  NODE_PLACEMENT_OPT,
1711
  NOIPCHECK_OPT,
1712
  NOCONFLICTSCHECK_OPT,
1713
  NONAMECHECK_OPT,
1714
  NONICS_OPT,
1715
  NWSYNC_OPT,
1716
  OSPARAMS_OPT,
1717
  OS_SIZE_OPT,
1718
  SUBMIT_OPT,
1719
  PRINT_JOBID_OPT,
1720
  TAG_ADD_OPT,
1721
  DRY_RUN_OPT,
1722
  PRIORITY_OPT,
1723
  ]
1724

    
1725
# common instance policy options
1726
INSTANCE_POLICY_OPTS = [
1727
  IPOLICY_BOUNDS_SPECS_OPT,
1728
  IPOLICY_DISK_TEMPLATES,
1729
  IPOLICY_VCPU_RATIO,
1730
  IPOLICY_SPINDLE_RATIO,
1731
  ]
1732

    
1733
# instance policy split specs options
1734
SPLIT_ISPECS_OPTS = [
1735
  SPECS_CPU_COUNT_OPT,
1736
  SPECS_DISK_COUNT_OPT,
1737
  SPECS_DISK_SIZE_OPT,
1738
  SPECS_MEM_SIZE_OPT,
1739
  SPECS_NIC_COUNT_OPT,
1740
  ]
1741

    
1742

    
1743
class _ShowUsage(Exception):
1744
  """Exception class for L{_ParseArgs}.
1745

1746
  """
1747
  def __init__(self, exit_error):
1748
    """Initializes instances of this class.
1749

1750
    @type exit_error: bool
1751
    @param exit_error: Whether to report failure on exit
1752

1753
    """
1754
    Exception.__init__(self)
1755
    self.exit_error = exit_error
1756

    
1757

    
1758
class _ShowVersion(Exception):
1759
  """Exception class for L{_ParseArgs}.
1760

1761
  """
1762

    
1763

    
1764
def _ParseArgs(binary, argv, commands, aliases, env_override):
1765
  """Parser for the command line arguments.
1766

1767
  This function parses the arguments and returns the function which
1768
  must be executed together with its (modified) arguments.
1769

1770
  @param binary: Script name
1771
  @param argv: Command line arguments
1772
  @param commands: Dictionary containing command definitions
1773
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1774
  @param env_override: list of env variables allowed for default args
1775
  @raise _ShowUsage: If usage description should be shown
1776
  @raise _ShowVersion: If version should be shown
1777

1778
  """
1779
  assert not (env_override - set(commands))
1780
  assert not (set(aliases.keys()) & set(commands.keys()))
1781

    
1782
  if len(argv) > 1:
1783
    cmd = argv[1]
1784
  else:
1785
    # No option or command given
1786
    raise _ShowUsage(exit_error=True)
1787

    
1788
  if cmd == "--version":
1789
    raise _ShowVersion()
1790
  elif cmd == "--help":
1791
    raise _ShowUsage(exit_error=False)
1792
  elif not (cmd in commands or cmd in aliases):
1793
    raise _ShowUsage(exit_error=True)
1794

    
1795
  # get command, unalias it, and look it up in commands
1796
  if cmd in aliases:
1797
    if aliases[cmd] not in commands:
1798
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1799
                                   " command '%s'" % (cmd, aliases[cmd]))
1800

    
1801
    cmd = aliases[cmd]
1802

    
1803
  if cmd in env_override:
1804
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1805
    env_args = os.environ.get(args_env_name)
1806
    if env_args:
1807
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1808

    
1809
  func, args_def, parser_opts, usage, description = commands[cmd]
1810
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1811
                        description=description,
1812
                        formatter=TitledHelpFormatter(),
1813
                        usage="%%prog %s %s" % (cmd, usage))
1814
  parser.disable_interspersed_args()
1815
  options, args = parser.parse_args(args=argv[2:])
1816

    
1817
  if not _CheckArguments(cmd, args_def, args):
1818
    return None, None, None
1819

    
1820
  return func, options, args
1821

    
1822

    
1823
def _FormatUsage(binary, commands):
1824
  """Generates a nice description of all commands.
1825

1826
  @param binary: Script name
1827
  @param commands: Dictionary containing command definitions
1828

1829
  """
1830
  # compute the max line length for cmd + usage
1831
  mlen = min(60, max(map(len, commands)))
1832

    
1833
  yield "Usage: %s {command} [options...] [argument...]" % binary
1834
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1835
  yield ""
1836
  yield "Commands:"
1837

    
1838
  # and format a nice command list
1839
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1840
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1841
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1842
    for line in help_lines:
1843
      yield " %-*s   %s" % (mlen, "", line)
1844

    
1845
  yield ""
1846

    
1847

    
1848
def _CheckArguments(cmd, args_def, args):
1849
  """Verifies the arguments using the argument definition.
1850

1851
  Algorithm:
1852

1853
    1. Abort with error if values specified by user but none expected.
1854

1855
    1. For each argument in definition
1856

1857
      1. Keep running count of minimum number of values (min_count)
1858
      1. Keep running count of maximum number of values (max_count)
1859
      1. If it has an unlimited number of values
1860

1861
        1. Abort with error if it's not the last argument in the definition
1862

1863
    1. If last argument has limited number of values
1864

1865
      1. Abort with error if number of values doesn't match or is too large
1866

1867
    1. Abort with error if user didn't pass enough values (min_count)
1868

1869
  """
1870
  if args and not args_def:
1871
    ToStderr("Error: Command %s expects no arguments", cmd)
1872
    return False
1873

    
1874
  min_count = None
1875
  max_count = None
1876
  check_max = None
1877

    
1878
  last_idx = len(args_def) - 1
1879

    
1880
  for idx, arg in enumerate(args_def):
1881
    if min_count is None:
1882
      min_count = arg.min
1883
    elif arg.min is not None:
1884
      min_count += arg.min
1885

    
1886
    if max_count is None:
1887
      max_count = arg.max
1888
    elif arg.max is not None:
1889
      max_count += arg.max
1890

    
1891
    if idx == last_idx:
1892
      check_max = (arg.max is not None)
1893

    
1894
    elif arg.max is None:
1895
      raise errors.ProgrammerError("Only the last argument can have max=None")
1896

    
1897
  if check_max:
1898
    # Command with exact number of arguments
1899
    if (min_count is not None and max_count is not None and
1900
        min_count == max_count and len(args) != min_count):
1901
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1902
      return False
1903

    
1904
    # Command with limited number of arguments
1905
    if max_count is not None and len(args) > max_count:
1906
      ToStderr("Error: Command %s expects only %d argument(s)",
1907
               cmd, max_count)
1908
      return False
1909

    
1910
  # Command with some required arguments
1911
  if min_count is not None and len(args) < min_count:
1912
    ToStderr("Error: Command %s expects at least %d argument(s)",
1913
             cmd, min_count)
1914
    return False
1915

    
1916
  return True
1917

    
1918

    
1919
def SplitNodeOption(value):
1920
  """Splits the value of a --node option.
1921

1922
  """
1923
  if value and ":" in value:
1924
    return value.split(":", 1)
1925
  else:
1926
    return (value, None)
1927

    
1928

    
1929
def CalculateOSNames(os_name, os_variants):
1930
  """Calculates all the names an OS can be called, according to its variants.
1931

1932
  @type os_name: string
1933
  @param os_name: base name of the os
1934
  @type os_variants: list or None
1935
  @param os_variants: list of supported variants
1936
  @rtype: list
1937
  @return: list of valid names
1938

1939
  """
1940
  if os_variants:
1941
    return ["%s+%s" % (os_name, v) for v in os_variants]
1942
  else:
1943
    return [os_name]
1944

    
1945

    
1946
def ParseFields(selected, default):
1947
  """Parses the values of "--field"-like options.
1948

1949
  @type selected: string or None
1950
  @param selected: User-selected options
1951
  @type default: list
1952
  @param default: Default fields
1953

1954
  """
1955
  if selected is None:
1956
    return default
1957

    
1958
  if selected.startswith("+"):
1959
    return default + selected[1:].split(",")
1960

    
1961
  return selected.split(",")
1962

    
1963

    
1964
UsesRPC = rpc.RunWithRPC
1965

    
1966

    
1967
def AskUser(text, choices=None):
1968
  """Ask the user a question.
1969

1970
  @param text: the question to ask
1971

1972
  @param choices: list with elements tuples (input_char, return_value,
1973
      description); if not given, it will default to: [('y', True,
1974
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1975
      note that the '?' char is reserved for help
1976

1977
  @return: one of the return values from the choices list; if input is
1978
      not possible (i.e. not running with a tty, we return the last
1979
      entry from the list
1980

1981
  """
1982
  if choices is None:
1983
    choices = [("y", True, "Perform the operation"),
1984
               ("n", False, "Do not perform the operation")]
1985
  if not choices or not isinstance(choices, list):
1986
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1987
  for entry in choices:
1988
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1989
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1990

    
1991
  answer = choices[-1][1]
1992
  new_text = []
1993
  for line in text.splitlines():
1994
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1995
  text = "\n".join(new_text)
1996
  try:
1997
    f = file("/dev/tty", "a+")
1998
  except IOError:
1999
    return answer
2000
  try:
2001
    chars = [entry[0] for entry in choices]
2002
    chars[-1] = "[%s]" % chars[-1]
2003
    chars.append("?")
2004
    maps = dict([(entry[0], entry[1]) for entry in choices])
2005
    while True:
2006
      f.write(text)
2007
      f.write("\n")
2008
      f.write("/".join(chars))
2009
      f.write(": ")
2010
      line = f.readline(2).strip().lower()
2011
      if line in maps:
2012
        answer = maps[line]
2013
        break
2014
      elif line == "?":
2015
        for entry in choices:
2016
          f.write(" %s - %s\n" % (entry[0], entry[2]))
2017
        f.write("\n")
2018
        continue
2019
  finally:
2020
    f.close()
2021
  return answer
2022

    
2023

    
2024
class JobSubmittedException(Exception):
2025
  """Job was submitted, client should exit.
2026

2027
  This exception has one argument, the ID of the job that was
2028
  submitted. The handler should print this ID.
2029

2030
  This is not an error, just a structured way to exit from clients.
2031

2032
  """
2033

    
2034

    
2035
def SendJob(ops, cl=None):
2036
  """Function to submit an opcode without waiting for the results.
2037

2038
  @type ops: list
2039
  @param ops: list of opcodes
2040
  @type cl: luxi.Client
2041
  @param cl: the luxi client to use for communicating with the master;
2042
             if None, a new client will be created
2043

2044
  """
2045
  if cl is None:
2046
    cl = GetClient()
2047

    
2048
  job_id = cl.SubmitJob(ops)
2049

    
2050
  return job_id
2051

    
2052

    
2053
def GenericPollJob(job_id, cbs, report_cbs):
2054
  """Generic job-polling function.
2055

2056
  @type job_id: number
2057
  @param job_id: Job ID
2058
  @type cbs: Instance of L{JobPollCbBase}
2059
  @param cbs: Data callbacks
2060
  @type report_cbs: Instance of L{JobPollReportCbBase}
2061
  @param report_cbs: Reporting callbacks
2062

2063
  """
2064
  prev_job_info = None
2065
  prev_logmsg_serial = None
2066

    
2067
  status = None
2068

    
2069
  while True:
2070
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2071
                                      prev_logmsg_serial)
2072
    if not result:
2073
      # job not found, go away!
2074
      raise errors.JobLost("Job with id %s lost" % job_id)
2075

    
2076
    if result == constants.JOB_NOTCHANGED:
2077
      report_cbs.ReportNotChanged(job_id, status)
2078

    
2079
      # Wait again
2080
      continue
2081

    
2082
    # Split result, a tuple of (field values, log entries)
2083
    (job_info, log_entries) = result
2084
    (status, ) = job_info
2085

    
2086
    if log_entries:
2087
      for log_entry in log_entries:
2088
        (serial, timestamp, log_type, message) = log_entry
2089
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2090
                                    log_type, message)
2091
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2092

    
2093
    # TODO: Handle canceled and archived jobs
2094
    elif status in (constants.JOB_STATUS_SUCCESS,
2095
                    constants.JOB_STATUS_ERROR,
2096
                    constants.JOB_STATUS_CANCELING,
2097
                    constants.JOB_STATUS_CANCELED):
2098
      break
2099

    
2100
    prev_job_info = job_info
2101

    
2102
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2103
  if not jobs:
2104
    raise errors.JobLost("Job with id %s lost" % job_id)
2105

    
2106
  status, opstatus, result = jobs[0]
2107

    
2108
  if status == constants.JOB_STATUS_SUCCESS:
2109
    return result
2110

    
2111
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2112
    raise errors.OpExecError("Job was canceled")
2113

    
2114
  has_ok = False
2115
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2116
    if status == constants.OP_STATUS_SUCCESS:
2117
      has_ok = True
2118
    elif status == constants.OP_STATUS_ERROR:
2119
      errors.MaybeRaise(msg)
2120

    
2121
      if has_ok:
2122
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2123
                                 (idx, msg))
2124

    
2125
      raise errors.OpExecError(str(msg))
2126

    
2127
  # default failure mode
2128
  raise errors.OpExecError(result)
2129

    
2130

    
2131
class JobPollCbBase:
2132
  """Base class for L{GenericPollJob} callbacks.
2133

2134
  """
2135
  def __init__(self):
2136
    """Initializes this class.
2137

2138
    """
2139

    
2140
  def WaitForJobChangeOnce(self, job_id, fields,
2141
                           prev_job_info, prev_log_serial):
2142
    """Waits for changes on a job.
2143

2144
    """
2145
    raise NotImplementedError()
2146

    
2147
  def QueryJobs(self, job_ids, fields):
2148
    """Returns the selected fields for the selected job IDs.
2149

2150
    @type job_ids: list of numbers
2151
    @param job_ids: Job IDs
2152
    @type fields: list of strings
2153
    @param fields: Fields
2154

2155
    """
2156
    raise NotImplementedError()
2157

    
2158

    
2159
class JobPollReportCbBase:
2160
  """Base class for L{GenericPollJob} reporting callbacks.
2161

2162
  """
2163
  def __init__(self):
2164
    """Initializes this class.
2165

2166
    """
2167

    
2168
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2169
    """Handles a log message.
2170

2171
    """
2172
    raise NotImplementedError()
2173

    
2174
  def ReportNotChanged(self, job_id, status):
2175
    """Called for if a job hasn't changed in a while.
2176

2177
    @type job_id: number
2178
    @param job_id: Job ID
2179
    @type status: string or None
2180
    @param status: Job status if available
2181

2182
    """
2183
    raise NotImplementedError()
2184

    
2185

    
2186
class _LuxiJobPollCb(JobPollCbBase):
2187
  def __init__(self, cl):
2188
    """Initializes this class.
2189

2190
    """
2191
    JobPollCbBase.__init__(self)
2192
    self.cl = cl
2193

    
2194
  def WaitForJobChangeOnce(self, job_id, fields,
2195
                           prev_job_info, prev_log_serial):
2196
    """Waits for changes on a job.
2197

2198
    """
2199
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2200
                                        prev_job_info, prev_log_serial)
2201

    
2202
  def QueryJobs(self, job_ids, fields):
2203
    """Returns the selected fields for the selected job IDs.
2204

2205
    """
2206
    return self.cl.QueryJobs(job_ids, fields)
2207

    
2208

    
2209
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2210
  def __init__(self, feedback_fn):
2211
    """Initializes this class.
2212

2213
    """
2214
    JobPollReportCbBase.__init__(self)
2215

    
2216
    self.feedback_fn = feedback_fn
2217

    
2218
    assert callable(feedback_fn)
2219

    
2220
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2221
    """Handles a log message.
2222

2223
    """
2224
    self.feedback_fn((timestamp, log_type, log_msg))
2225

    
2226
  def ReportNotChanged(self, job_id, status):
2227
    """Called if a job hasn't changed in a while.
2228

2229
    """
2230
    # Ignore
2231

    
2232

    
2233
class StdioJobPollReportCb(JobPollReportCbBase):
2234
  def __init__(self):
2235
    """Initializes this class.
2236

2237
    """
2238
    JobPollReportCbBase.__init__(self)
2239

    
2240
    self.notified_queued = False
2241
    self.notified_waitlock = False
2242

    
2243
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2244
    """Handles a log message.
2245

2246
    """
2247
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2248
             FormatLogMessage(log_type, log_msg))
2249

    
2250
  def ReportNotChanged(self, job_id, status):
2251
    """Called if a job hasn't changed in a while.
2252

2253
    """
2254
    if status is None:
2255
      return
2256

    
2257
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2258
      ToStderr("Job %s is waiting in queue", job_id)
2259
      self.notified_queued = True
2260

    
2261
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2262
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2263
      self.notified_waitlock = True
2264

    
2265

    
2266
def FormatLogMessage(log_type, log_msg):
2267
  """Formats a job message according to its type.
2268

2269
  """
2270
  if log_type != constants.ELOG_MESSAGE:
2271
    log_msg = str(log_msg)
2272

    
2273
  return utils.SafeEncode(log_msg)
2274

    
2275

    
2276
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2277
  """Function to poll for the result of a job.
2278

2279
  @type job_id: job identified
2280
  @param job_id: the job to poll for results
2281
  @type cl: luxi.Client
2282
  @param cl: the luxi client to use for communicating with the master;
2283
             if None, a new client will be created
2284

2285
  """
2286
  if cl is None:
2287
    cl = GetClient()
2288

    
2289
  if reporter is None:
2290
    if feedback_fn:
2291
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2292
    else:
2293
      reporter = StdioJobPollReportCb()
2294
  elif feedback_fn:
2295
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2296

    
2297
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2298

    
2299

    
2300
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2301
  """Legacy function to submit an opcode.
2302

2303
  This is just a simple wrapper over the construction of the processor
2304
  instance. It should be extended to better handle feedback and
2305
  interaction functions.
2306

2307
  """
2308
  if cl is None:
2309
    cl = GetClient()
2310

    
2311
  SetGenericOpcodeOpts([op], opts)
2312

    
2313
  job_id = SendJob([op], cl=cl)
2314
  if hasattr(opts, "print_jobid") and opts.print_jobid:
2315
    ToStdout("%d" % job_id)
2316

    
2317
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2318
                       reporter=reporter)
2319

    
2320
  return op_results[0]
2321

    
2322

    
2323
def SubmitOpCodeToDrainedQueue(op):
2324
  """Forcefully insert a job in the queue, even if it is drained.
2325

2326
  """
2327
  cl = GetClient()
2328
  job_id = cl.SubmitJobToDrainedQueue([op])
2329
  op_results = PollJob(job_id, cl=cl)
2330
  return op_results[0]
2331

    
2332

    
2333
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2334
  """Wrapper around SubmitOpCode or SendJob.
2335

2336
  This function will decide, based on the 'opts' parameter, whether to
2337
  submit and wait for the result of the opcode (and return it), or
2338
  whether to just send the job and print its identifier. It is used in
2339
  order to simplify the implementation of the '--submit' option.
2340

2341
  It will also process the opcodes if we're sending the via SendJob
2342
  (otherwise SubmitOpCode does it).
2343

2344
  """
2345
  if opts and opts.submit_only:
2346
    job = [op]
2347
    SetGenericOpcodeOpts(job, opts)
2348
    job_id = SendJob(job, cl=cl)
2349
    if opts.print_jobid:
2350
      ToStdout("%d" % job_id)
2351
    raise JobSubmittedException(job_id)
2352
  else:
2353
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2354

    
2355

    
2356
def _InitReasonTrail(op, opts):
2357
  """Builds the first part of the reason trail
2358

2359
  Builds the initial part of the reason trail, adding the user provided reason
2360
  (if it exists) and the name of the command starting the operation.
2361

2362
  @param op: the opcode the reason trail will be added to
2363
  @param opts: the command line options selected by the user
2364

2365
  """
2366
  assert len(sys.argv) >= 2
2367
  trail = []
2368

    
2369
  if opts.reason:
2370
    trail.append((constants.OPCODE_REASON_SRC_USER,
2371
                  opts.reason,
2372
                  utils.EpochNano()))
2373

    
2374
  binary = os.path.basename(sys.argv[0])
2375
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2376
  command = sys.argv[1]
2377
  trail.append((source, command, utils.EpochNano()))
2378
  op.reason = trail
2379

    
2380

    
2381
def SetGenericOpcodeOpts(opcode_list, options):
2382
  """Processor for generic options.
2383

2384
  This function updates the given opcodes based on generic command
2385
  line options (like debug, dry-run, etc.).
2386

2387
  @param opcode_list: list of opcodes
2388
  @param options: command line options or None
2389
  @return: None (in-place modification)
2390

2391
  """
2392
  if not options:
2393
    return
2394
  for op in opcode_list:
2395
    op.debug_level = options.debug
2396
    if hasattr(options, "dry_run"):
2397
      op.dry_run = options.dry_run
2398
    if getattr(options, "priority", None) is not None:
2399
      op.priority = options.priority
2400
    _InitReasonTrail(op, options)
2401

    
2402

    
2403
def FormatError(err):
2404
  """Return a formatted error message for a given error.
2405

2406
  This function takes an exception instance and returns a tuple
2407
  consisting of two values: first, the recommended exit code, and
2408
  second, a string describing the error message (not
2409
  newline-terminated).
2410

2411
  """
2412
  retcode = 1
2413
  obuf = StringIO()
2414
  msg = str(err)
2415
  if isinstance(err, errors.ConfigurationError):
2416
    txt = "Corrupt configuration file: %s" % msg
2417
    logging.error(txt)
2418
    obuf.write(txt + "\n")
2419
    obuf.write("Aborting.")
2420
    retcode = 2
2421
  elif isinstance(err, errors.HooksAbort):
2422
    obuf.write("Failure: hooks execution failed:\n")
2423
    for node, script, out in err.args[0]:
2424
      if out:
2425
        obuf.write("  node: %s, script: %s, output: %s\n" %
2426
                   (node, script, out))
2427
      else:
2428
        obuf.write("  node: %s, script: %s (no output)\n" %
2429
                   (node, script))
2430
  elif isinstance(err, errors.HooksFailure):
2431
    obuf.write("Failure: hooks general failure: %s" % msg)
2432
  elif isinstance(err, errors.ResolverError):
2433
    this_host = netutils.Hostname.GetSysName()
2434
    if err.args[0] == this_host:
2435
      msg = "Failure: can't resolve my own hostname ('%s')"
2436
    else:
2437
      msg = "Failure: can't resolve hostname '%s'"
2438
    obuf.write(msg % err.args[0])
2439
  elif isinstance(err, errors.OpPrereqError):
2440
    if len(err.args) == 2:
2441
      obuf.write("Failure: prerequisites not met for this"
2442
                 " operation:\nerror type: %s, error details:\n%s" %
2443
                 (err.args[1], err.args[0]))
2444
    else:
2445
      obuf.write("Failure: prerequisites not met for this"
2446
                 " operation:\n%s" % msg)
2447
  elif isinstance(err, errors.OpExecError):
2448
    obuf.write("Failure: command execution error:\n%s" % msg)
2449
  elif isinstance(err, errors.TagError):
2450
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2451
  elif isinstance(err, errors.JobQueueDrainError):
2452
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2453
               " accept new requests\n")
2454
  elif isinstance(err, errors.JobQueueFull):
2455
    obuf.write("Failure: the job queue is full and doesn't accept new"
2456
               " job submissions until old jobs are archived\n")
2457
  elif isinstance(err, errors.TypeEnforcementError):
2458
    obuf.write("Parameter Error: %s" % msg)
2459
  elif isinstance(err, errors.ParameterError):
2460
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2461
  elif isinstance(err, rpcerr.NoMasterError):
2462
    if err.args[0] == pathutils.MASTER_SOCKET:
2463
      daemon = "the master daemon"
2464
    elif err.args[0] == pathutils.QUERY_SOCKET:
2465
      daemon = "the config daemon"
2466
    else:
2467
      daemon = "socket '%s'" % str(err.args[0])
2468
    obuf.write("Cannot communicate with %s.\nIs the process running"
2469
               " and listening for connections?" % daemon)
2470
  elif isinstance(err, rpcerr.TimeoutError):
2471
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2472
               " been submitted and will continue to run even if the call"
2473
               " timed out. Useful commands in this situation are \"gnt-job"
2474
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2475
    obuf.write(msg)
2476
  elif isinstance(err, rpcerr.PermissionError):
2477
    obuf.write("It seems you don't have permissions to connect to the"
2478
               " master daemon.\nPlease retry as a different user.")
2479
  elif isinstance(err, rpcerr.ProtocolError):
2480
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2481
               "%s" % msg)
2482
  elif isinstance(err, errors.JobLost):
2483
    obuf.write("Error checking job status: %s" % msg)
2484
  elif isinstance(err, errors.QueryFilterParseError):
2485
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2486
    obuf.write("\n".join(err.GetDetails()))
2487
  elif isinstance(err, errors.GenericError):
2488
    obuf.write("Unhandled Ganeti error: %s" % msg)
2489
  elif isinstance(err, JobSubmittedException):
2490
    obuf.write("JobID: %s\n" % err.args[0])
2491
    retcode = 0
2492
  else:
2493
    obuf.write("Unhandled exception: %s" % msg)
2494
  return retcode, obuf.getvalue().rstrip("\n")
2495

    
2496

    
2497
def GenericMain(commands, override=None, aliases=None,
2498
                env_override=frozenset()):
2499
  """Generic main function for all the gnt-* commands.
2500

2501
  @param commands: a dictionary with a special structure, see the design doc
2502
                   for command line handling.
2503
  @param override: if not None, we expect a dictionary with keys that will
2504
                   override command line options; this can be used to pass
2505
                   options from the scripts to generic functions
2506
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2507
  @param env_override: list of environment names which are allowed to submit
2508
                       default args for commands
2509

2510
  """
2511
  # save the program name and the entire command line for later logging
2512
  if sys.argv:
2513
    binary = os.path.basename(sys.argv[0])
2514
    if not binary:
2515
      binary = sys.argv[0]
2516

    
2517
    if len(sys.argv) >= 2:
2518
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2519
    else:
2520
      logname = binary
2521

    
2522
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2523
  else:
2524
    binary = "<unknown program>"
2525
    cmdline = "<unknown>"
2526

    
2527
  if aliases is None:
2528
    aliases = {}
2529

    
2530
  try:
2531
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2532
                                       env_override)
2533
  except _ShowVersion:
2534
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2535
             constants.RELEASE_VERSION)
2536
    return constants.EXIT_SUCCESS
2537
  except _ShowUsage, err:
2538
    for line in _FormatUsage(binary, commands):
2539
      ToStdout(line)
2540

    
2541
    if err.exit_error:
2542
      return constants.EXIT_FAILURE
2543
    else:
2544
      return constants.EXIT_SUCCESS
2545
  except errors.ParameterError, err:
2546
    result, err_msg = FormatError(err)
2547
    ToStderr(err_msg)
2548
    return 1
2549

    
2550
  if func is None: # parse error
2551
    return 1
2552

    
2553
  if override is not None:
2554
    for key, val in override.iteritems():
2555
      setattr(options, key, val)
2556

    
2557
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2558
                     stderr_logging=True)
2559

    
2560
  logging.info("Command line: %s", cmdline)
2561

    
2562
  try:
2563
    result = func(options, args)
2564
  except (errors.GenericError, rpcerr.ProtocolError,
2565
          JobSubmittedException), err:
2566
    result, err_msg = FormatError(err)
2567
    logging.exception("Error during command processing")
2568
    ToStderr(err_msg)
2569
  except KeyboardInterrupt:
2570
    result = constants.EXIT_FAILURE
2571
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2572
             " might have been submitted and"
2573
             " will continue to run in the background.")
2574
  except IOError, err:
2575
    if err.errno == errno.EPIPE:
2576
      # our terminal went away, we'll exit
2577
      sys.exit(constants.EXIT_FAILURE)
2578
    else:
2579
      raise
2580

    
2581
  return result
2582

    
2583

    
2584
def ParseNicOption(optvalue):
2585
  """Parses the value of the --net option(s).
2586

2587
  """
2588
  try:
2589
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2590
  except (TypeError, ValueError), err:
2591
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2592
                               errors.ECODE_INVAL)
2593

    
2594
  nics = [{}] * nic_max
2595
  for nidx, ndict in optvalue:
2596
    nidx = int(nidx)
2597

    
2598
    if not isinstance(ndict, dict):
2599
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2600
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2601

    
2602
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2603

    
2604
    nics[nidx] = ndict
2605

    
2606
  return nics
2607

    
2608

    
2609
def FixHvParams(hvparams):
2610
  # In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from
2611
  # comma to space because commas cannot be accepted on the command line
2612
  # (they already act as the separator between different hvparams). Still,
2613
  # RAPI should be able to accept commas for backwards compatibility.
2614
  # Therefore, we convert spaces into commas here, and we keep the old
2615
  # parsing logic everywhere else.
2616
  try:
2617
    new_usb_devices = hvparams[constants.HV_USB_DEVICES].replace(" ", ",")
2618
    hvparams[constants.HV_USB_DEVICES] = new_usb_devices
2619
  except KeyError:
2620
    #No usb_devices, no modification required
2621
    pass
2622

    
2623

    
2624
def GenericInstanceCreate(mode, opts, args):
2625
  """Add an instance to the cluster via either creation or import.
2626

2627
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2628
  @param opts: the command line options selected by the user
2629
  @type args: list
2630
  @param args: should contain only one element, the new instance name
2631
  @rtype: int
2632
  @return: the desired exit code
2633

2634
  """
2635
  instance = args[0]
2636

    
2637
  (pnode, snode) = SplitNodeOption(opts.node)
2638

    
2639
  hypervisor = None
2640
  hvparams = {}
2641
  if opts.hypervisor:
2642
    hypervisor, hvparams = opts.hypervisor
2643

    
2644
  if opts.nics:
2645
    nics = ParseNicOption(opts.nics)
2646
  elif opts.no_nics:
2647
    # no nics
2648
    nics = []
2649
  elif mode == constants.INSTANCE_CREATE:
2650
    # default of one nic, all auto
2651
    nics = [{}]
2652
  else:
2653
    # mode == import
2654
    nics = []
2655

    
2656
  if opts.disk_template == constants.DT_DISKLESS:
2657
    if opts.disks or opts.sd_size is not None:
2658
      raise errors.OpPrereqError("Diskless instance but disk"
2659
                                 " information passed", errors.ECODE_INVAL)
2660
    disks = []
2661
  else:
2662
    if (not opts.disks and not opts.sd_size
2663
        and mode == constants.INSTANCE_CREATE):
2664
      raise errors.OpPrereqError("No disk information specified",
2665
                                 errors.ECODE_INVAL)
2666
    if opts.disks and opts.sd_size is not None:
2667
      raise errors.OpPrereqError("Please use either the '--disk' or"
2668
                                 " '-s' option", errors.ECODE_INVAL)
2669
    if opts.sd_size is not None:
2670
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2671

    
2672
    if opts.disks:
2673
      try:
2674
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2675
      except ValueError, err:
2676
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2677
                                   errors.ECODE_INVAL)
2678
      disks = [{}] * disk_max
2679
    else:
2680
      disks = []
2681
    for didx, ddict in opts.disks:
2682
      didx = int(didx)
2683
      if not isinstance(ddict, dict):
2684
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2685
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2686
      elif constants.IDISK_SIZE in ddict:
2687
        if constants.IDISK_ADOPT in ddict:
2688
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2689
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2690
        try:
2691
          ddict[constants.IDISK_SIZE] = \
2692
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2693
        except ValueError, err:
2694
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2695
                                     (didx, err), errors.ECODE_INVAL)
2696
      elif constants.IDISK_ADOPT in ddict:
2697
        if constants.IDISK_SPINDLES in ddict:
2698
          raise errors.OpPrereqError("spindles is not a valid option when"
2699
                                     " adopting a disk", errors.ECODE_INVAL)
2700
        if mode == constants.INSTANCE_IMPORT:
2701
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2702
                                     " import", errors.ECODE_INVAL)
2703
        ddict[constants.IDISK_SIZE] = 0
2704
      else:
2705
        raise errors.OpPrereqError("Missing size or adoption source for"
2706
                                   " disk %d" % didx, errors.ECODE_INVAL)
2707
      if constants.IDISK_SPINDLES in ddict:
2708
        ddict[constants.IDISK_SPINDLES] = int(ddict[constants.IDISK_SPINDLES])
2709

    
2710
      disks[didx] = ddict
2711

    
2712
  if opts.tags is not None:
2713
    tags = opts.tags.split(",")
2714
  else:
2715
    tags = []
2716

    
2717
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2718
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2719
  FixHvParams(hvparams)
2720

    
2721
  if mode == constants.INSTANCE_CREATE:
2722
    start = opts.start
2723
    os_type = opts.os
2724
    force_variant = opts.force_variant
2725
    src_node = None
2726
    src_path = None
2727
    no_install = opts.no_install
2728
    identify_defaults = False
2729
    compress = constants.IEC_NONE
2730
  elif mode == constants.INSTANCE_IMPORT:
2731
    start = False
2732
    os_type = None
2733
    force_variant = False
2734
    src_node = opts.src_node
2735
    src_path = opts.src_dir
2736
    no_install = None
2737
    identify_defaults = opts.identify_defaults
2738
    compress = opts.compress
2739
  else:
2740
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2741

    
2742
  op = opcodes.OpInstanceCreate(instance_name=instance,
2743
                                disks=disks,
2744
                                disk_template=opts.disk_template,
2745
                                nics=nics,
2746
                                conflicts_check=opts.conflicts_check,
2747
                                pnode=pnode, snode=snode,
2748
                                ip_check=opts.ip_check,
2749
                                name_check=opts.name_check,
2750
                                wait_for_sync=opts.wait_for_sync,
2751
                                file_storage_dir=opts.file_storage_dir,
2752
                                file_driver=opts.file_driver,
2753
                                iallocator=opts.iallocator,
2754
                                hypervisor=hypervisor,
2755
                                hvparams=hvparams,
2756
                                beparams=opts.beparams,
2757
                                osparams=opts.osparams,
2758
                                mode=mode,
2759
                                start=start,
2760
                                os_type=os_type,
2761
                                force_variant=force_variant,
2762
                                src_node=src_node,
2763
                                src_path=src_path,
2764
                                compress=compress,
2765
                                tags=tags,
2766
                                no_install=no_install,
2767
                                identify_defaults=identify_defaults,
2768
                                ignore_ipolicy=opts.ignore_ipolicy)
2769

    
2770
  SubmitOrSend(op, opts)
2771
  return 0
2772

    
2773

    
2774
class _RunWhileClusterStoppedHelper:
2775
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2776

2777
  """
2778
  def __init__(self, feedback_fn, cluster_name, master_node,
2779
               online_nodes, ssh_ports):
2780
    """Initializes this class.
2781

2782
    @type feedback_fn: callable
2783
    @param feedback_fn: Feedback function
2784
    @type cluster_name: string
2785
    @param cluster_name: Cluster name
2786
    @type master_node: string
2787
    @param master_node Master node name
2788
    @type online_nodes: list
2789
    @param online_nodes: List of names of online nodes
2790
    @type ssh_ports: list
2791
    @param ssh_ports: List of SSH ports of online nodes
2792

2793
    """
2794
    self.feedback_fn = feedback_fn
2795
    self.cluster_name = cluster_name
2796
    self.master_node = master_node
2797
    self.online_nodes = online_nodes
2798
    self.ssh_ports = dict(zip(online_nodes, ssh_ports))
2799

    
2800
    self.ssh = ssh.SshRunner(self.cluster_name)
2801

    
2802
    self.nonmaster_nodes = [name for name in online_nodes
2803
                            if name != master_node]
2804

    
2805
    assert self.master_node not in self.nonmaster_nodes
2806

    
2807
  def _RunCmd(self, node_name, cmd):
2808
    """Runs a command on the local or a remote machine.
2809

2810
    @type node_name: string
2811
    @param node_name: Machine name
2812
    @type cmd: list
2813
    @param cmd: Command
2814

2815
    """
2816
    if node_name is None or node_name == self.master_node:
2817
      # No need to use SSH
2818
      result = utils.RunCmd(cmd)
2819
    else:
2820
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2821
                            utils.ShellQuoteArgs(cmd),
2822
                            port=self.ssh_ports[node_name])
2823

    
2824
    if result.failed:
2825
      errmsg = ["Failed to run command %s" % result.cmd]
2826
      if node_name:
2827
        errmsg.append("on node %s" % node_name)
2828
      errmsg.append(": exitcode %s and error %s" %
2829
                    (result.exit_code, result.output))
2830
      raise errors.OpExecError(" ".join(errmsg))
2831

    
2832
  def Call(self, fn, *args):
2833
    """Call function while all daemons are stopped.
2834

2835
    @type fn: callable
2836
    @param fn: Function to be called
2837

2838
    """
2839
    # Pause watcher by acquiring an exclusive lock on watcher state file
2840
    self.feedback_fn("Blocking watcher")
2841
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2842
    try:
2843
      # TODO: Currently, this just blocks. There's no timeout.
2844
      # TODO: Should it be a shared lock?
2845
      watcher_block.Exclusive(blocking=True)
2846

    
2847
      # Stop master daemons, so that no new jobs can come in and all running
2848
      # ones are finished
2849
      self.feedback_fn("Stopping master daemons")
2850
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2851
      try:
2852
        # Stop daemons on all nodes
2853
        for node_name in self.online_nodes:
2854
          self.feedback_fn("Stopping daemons on %s" % node_name)
2855
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2856

    
2857
        # All daemons are shut down now
2858
        try:
2859
          return fn(self, *args)
2860
        except Exception, err:
2861
          _, errmsg = FormatError(err)
2862
          logging.exception("Caught exception")
2863
          self.feedback_fn(errmsg)
2864
          raise
2865
      finally:
2866
        # Start cluster again, master node last
2867
        for node_name in self.nonmaster_nodes + [self.master_node]:
2868
          self.feedback_fn("Starting daemons on %s" % node_name)
2869
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2870
    finally:
2871
      # Resume watcher
2872
      watcher_block.Close()
2873

    
2874

    
2875
def RunWhileClusterStopped(feedback_fn, fn, *args):
2876
  """Calls a function while all cluster daemons are stopped.
2877

2878
  @type feedback_fn: callable
2879
  @param feedback_fn: Feedback function
2880
  @type fn: callable
2881
  @param fn: Function to be called when daemons are stopped
2882

2883
  """
2884
  feedback_fn("Gathering cluster information")
2885

    
2886
  # This ensures we're running on the master daemon
2887
  cl = GetClient()
2888
  # Query client
2889
  qcl = GetClient(query=True)
2890

    
2891
  (cluster_name, master_node) = \
2892
    cl.QueryConfigValues(["cluster_name", "master_node"])
2893

    
2894
  online_nodes = GetOnlineNodes([], cl=qcl)
2895
  ssh_ports = GetNodesSshPorts(online_nodes, qcl)
2896

    
2897
  # Don't keep a reference to the client. The master daemon will go away.
2898
  del cl
2899
  del qcl
2900

    
2901
  assert master_node in online_nodes
2902

    
2903
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2904
                                       online_nodes, ssh_ports).Call(fn, *args)
2905

    
2906

    
2907
def GenerateTable(headers, fields, separator, data,
2908
                  numfields=None, unitfields=None,
2909
                  units=None):
2910
  """Prints a table with headers and different fields.
2911

2912
  @type headers: dict
2913
  @param headers: dictionary mapping field names to headers for
2914
      the table
2915
  @type fields: list
2916
  @param fields: the field names corresponding to each row in
2917
      the data field
2918
  @param separator: the separator to be used; if this is None,
2919
      the default 'smart' algorithm is used which computes optimal
2920
      field width, otherwise just the separator is used between
2921
      each field
2922
  @type data: list
2923
  @param data: a list of lists, each sublist being one row to be output
2924
  @type numfields: list
2925
  @param numfields: a list with the fields that hold numeric
2926
      values and thus should be right-aligned
2927
  @type unitfields: list
2928
  @param unitfields: a list with the fields that hold numeric
2929
      values that should be formatted with the units field
2930
  @type units: string or None
2931
  @param units: the units we should use for formatting, or None for
2932
      automatic choice (human-readable for non-separator usage, otherwise
2933
      megabytes); this is a one-letter string
2934

2935
  """
2936
  if units is None:
2937
    if separator:
2938
      units = "m"
2939
    else:
2940
      units = "h"
2941

    
2942
  if numfields is None:
2943
    numfields = []
2944
  if unitfields is None:
2945
    unitfields = []
2946

    
2947
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2948
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2949

    
2950
  format_fields = []
2951
  for field in fields:
2952
    if headers and field not in headers:
2953
      # TODO: handle better unknown fields (either revert to old
2954
      # style of raising exception, or deal more intelligently with
2955
      # variable fields)
2956
      headers[field] = field
2957
    if separator is not None:
2958
      format_fields.append("%s")
2959
    elif numfields.Matches(field):
2960
      format_fields.append("%*s")
2961
    else:
2962
      format_fields.append("%-*s")
2963

    
2964
  if separator is None:
2965
    mlens = [0 for name in fields]
2966
    format_str = " ".join(format_fields)
2967
  else:
2968
    format_str = separator.replace("%", "%%").join(format_fields)
2969

    
2970
  for row in data:
2971
    if row is None:
2972
      continue
2973
    for idx, val in enumerate(row):
2974
      if unitfields.Matches(fields[idx]):
2975
        try:
2976
          val = int(val)
2977
        except (TypeError, ValueError):
2978
          pass
2979
        else:
2980
          val = row[idx] = utils.FormatUnit(val, units)
2981
      val = row[idx] = str(val)
2982
      if separator is None:
2983
        mlens[idx] = max(mlens[idx], len(val))
2984

    
2985
  result = []
2986
  if headers:
2987
    args = []
2988
    for idx, name in enumerate(fields):
2989
      hdr = headers[name]
2990
      if separator is None:
2991
        mlens[idx] = max(mlens[idx], len(hdr))
2992
        args.append(mlens[idx])
2993
      args.append(hdr)
2994
    result.append(format_str % tuple(args))
2995

    
2996
  if separator is None:
2997
    assert len(mlens) == len(fields)
2998

    
2999
    if fields and not numfields.Matches(fields[-1]):
3000
      mlens[-1] = 0
3001

    
3002
  for line in data:
3003
    args = []
3004
    if line is None:
3005
      line = ["-" for _ in fields]
3006
    for idx in range(len(fields)):
3007
      if separator is None:
3008
        args.append(mlens[idx])
3009
      args.append(line[idx])
3010
    result.append(format_str % tuple(args))
3011

    
3012
  return result
3013

    
3014

    
3015
def _FormatBool(value):
3016
  """Formats a boolean value as a string.
3017

3018
  """
3019
  if value:
3020
    return "Y"
3021
  return "N"
3022

    
3023

    
3024
#: Default formatting for query results; (callback, align right)
3025
_DEFAULT_FORMAT_QUERY = {
3026
  constants.QFT_TEXT: (str, False),
3027
  constants.QFT_BOOL: (_FormatBool, False),
3028
  constants.QFT_NUMBER: (str, True),
3029
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
3030
  constants.QFT_OTHER: (str, False),
3031
  constants.QFT_UNKNOWN: (str, False),
3032
  }
3033

    
3034

    
3035
def _GetColumnFormatter(fdef, override, unit):
3036
  """Returns formatting function for a field.
3037

3038
  @type fdef: L{objects.QueryFieldDefinition}
3039
  @type override: dict
3040
  @param override: Dictionary for overriding field formatting functions,
3041
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3042
  @type unit: string
3043
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
3044
  @rtype: tuple; (callable, bool)
3045
  @return: Returns the function to format a value (takes one parameter) and a
3046
    boolean for aligning the value on the right-hand side
3047

3048
  """
3049
  fmt = override.get(fdef.name, None)
3050
  if fmt is not None:
3051
    return fmt
3052

    
3053
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3054

    
3055
  if fdef.kind == constants.QFT_UNIT:
3056
    # Can't keep this information in the static dictionary
3057
    return (lambda value: utils.FormatUnit(value, unit), True)
3058

    
3059
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3060
  if fmt is not None:
3061
    return fmt
3062

    
3063
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3064

    
3065

    
3066
class _QueryColumnFormatter:
3067
  """Callable class for formatting fields of a query.
3068

3069
  """
3070
  def __init__(self, fn, status_fn, verbose):
3071
    """Initializes this class.
3072

3073
    @type fn: callable
3074
    @param fn: Formatting function
3075
    @type status_fn: callable
3076
    @param status_fn: Function to report fields' status
3077
    @type verbose: boolean
3078
    @param verbose: whether to use verbose field descriptions or not
3079

3080
    """
3081
    self._fn = fn
3082
    self._status_fn = status_fn
3083
    self._verbose = verbose
3084

    
3085
  def __call__(self, data):
3086
    """Returns a field's string representation.
3087

3088
    """
3089
    (status, value) = data
3090

    
3091
    # Report status
3092
    self._status_fn(status)
3093

    
3094
    if status == constants.RS_NORMAL:
3095
      return self._fn(value)
3096

    
3097
    assert value is None, \
3098
           "Found value %r for abnormal status %s" % (value, status)
3099

    
3100
    return FormatResultError(status, self._verbose)
3101

    
3102

    
3103
def FormatResultError(status, verbose):
3104
  """Formats result status other than L{constants.RS_NORMAL}.
3105

3106
  @param status: The result status
3107
  @type verbose: boolean
3108
  @param verbose: Whether to return the verbose text
3109
  @return: Text of result status
3110

3111
  """
3112
  assert status != constants.RS_NORMAL, \
3113
         "FormatResultError called with status equal to constants.RS_NORMAL"
3114
  try:
3115
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3116
  except KeyError:
3117
    raise NotImplementedError("Unknown status %s" % status)
3118
  else:
3119
    if verbose:
3120
      return verbose_text
3121
    return normal_text
3122

    
3123

    
3124
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3125
                      header=False, verbose=False):
3126
  """Formats data in L{objects.QueryResponse}.
3127

3128
  @type result: L{objects.QueryResponse}
3129
  @param result: result of query operation
3130
  @type unit: string
3131
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3132
    see L{utils.text.FormatUnit}
3133
  @type format_override: dict
3134
  @param format_override: Dictionary for overriding field formatting functions,
3135
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3136
  @type separator: string or None
3137
  @param separator: String used to separate fields
3138
  @type header: bool
3139
  @param header: Whether to output header row
3140
  @type verbose: boolean
3141
  @param verbose: whether to use verbose field descriptions or not
3142

3143
  """
3144
  if unit is None:
3145
    if separator:
3146
      unit = "m"
3147
    else:
3148
      unit = "h"
3149

    
3150
  if format_override is None:
3151
    format_override = {}
3152

    
3153
  stats = dict.fromkeys(constants.RS_ALL, 0)
3154

    
3155
  def _RecordStatus(status):
3156
    if status in stats:
3157
      stats[status] += 1
3158

    
3159
  columns = []
3160
  for fdef in result.fields:
3161
    assert fdef.title and fdef.name
3162
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3163
    columns.append(TableColumn(fdef.title,
3164
                               _QueryColumnFormatter(fn, _RecordStatus,
3165
                                                     verbose),
3166
                               align_right))
3167

    
3168
  table = FormatTable(result.data, columns, header, separator)
3169

    
3170
  # Collect statistics
3171
  assert len(stats) == len(constants.RS_ALL)
3172
  assert compat.all(count >= 0 for count in stats.values())
3173

    
3174
  # Determine overall status. If there was no data, unknown fields must be
3175
  # detected via the field definitions.
3176
  if (stats[constants.RS_UNKNOWN] or
3177
      (not result.data and _GetUnknownFields(result.fields))):
3178
    status = QR_UNKNOWN
3179
  elif compat.any(count > 0 for key, count in stats.items()
3180
                  if key != constants.RS_NORMAL):
3181
    status = QR_INCOMPLETE
3182
  else:
3183
    status = QR_NORMAL
3184

    
3185
  return (status, table)
3186

    
3187

    
3188
def _GetUnknownFields(fdefs):
3189
  """Returns list of unknown fields included in C{fdefs}.
3190

3191
  @type fdefs: list of L{objects.QueryFieldDefinition}
3192

3193
  """
3194
  return [fdef for fdef in fdefs
3195
          if fdef.kind == constants.QFT_UNKNOWN]
3196

    
3197

    
3198
def _WarnUnknownFields(fdefs):
3199
  """Prints a warning to stderr if a query included unknown fields.
3200

3201
  @type fdefs: list of L{objects.QueryFieldDefinition}
3202

3203
  """
3204
  unknown = _GetUnknownFields(fdefs)
3205
  if unknown:
3206
    ToStderr("Warning: Queried for unknown fields %s",
3207
             utils.CommaJoin(fdef.name for fdef in unknown))
3208
    return True
3209

    
3210
  return False
3211

    
3212

    
3213
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3214
                format_override=None, verbose=False, force_filter=False,
3215
                namefield=None, qfilter=None, isnumeric=False):
3216
  """Generic implementation for listing all items of a resource.
3217

3218
  @param resource: One of L{constants.QR_VIA_LUXI}
3219
  @type fields: list of strings
3220
  @param fields: List of fields to query for
3221
  @type names: list of strings
3222
  @param names: Names of items to query for
3223
  @type unit: string or None
3224
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3225
    None for automatic choice (human-readable for non-separator usage,
3226
    otherwise megabytes); this is a one-letter string
3227
  @type separator: string or None
3228
  @param separator: String used to separate fields
3229
  @type header: bool
3230
  @param header: Whether to show header row
3231
  @type force_filter: bool
3232
  @param force_filter: Whether to always treat names as filter
3233
  @type format_override: dict
3234
  @param format_override: Dictionary for overriding field formatting functions,
3235
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3236
  @type verbose: boolean
3237
  @param verbose: whether to use verbose field descriptions or not
3238
  @type namefield: string
3239
  @param namefield: Name of field to use for simple filters (see
3240
    L{qlang.MakeFilter} for details)
3241
  @type qfilter: list or None
3242
  @param qfilter: Query filter (in addition to names)
3243
  @param isnumeric: bool
3244
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3245
    any simple filters built by namefield should use integer values to
3246
    reflect that
3247

3248
  """
3249
  if not names:
3250
    names = None
3251

    
3252
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3253
                                isnumeric=isnumeric)
3254

    
3255
  if qfilter is None:
3256
    qfilter = namefilter
3257
  elif namefilter is not None:
3258
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3259

    
3260
  if cl is None:
3261
    cl = GetClient()
3262

    
3263
  response = cl.Query(resource, fields, qfilter)
3264

    
3265
  found_unknown = _WarnUnknownFields(response.fields)
3266

    
3267
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3268
                                     header=header,
3269
                                     format_override=format_override,
3270
                                     verbose=verbose)
3271

    
3272
  for line in data:
3273
    ToStdout(line)
3274

    
3275
  assert ((found_unknown and status == QR_UNKNOWN) or
3276
          (not found_unknown and status != QR_UNKNOWN))
3277

    
3278
  if status == QR_UNKNOWN:
3279
    return constants.EXIT_UNKNOWN_FIELD
3280

    
3281
  # TODO: Should the list command fail if not all data could be collected?
3282
  return constants.EXIT_SUCCESS
3283

    
3284

    
3285
def _FieldDescValues(fdef):
3286
  """Helper function for L{GenericListFields} to get query field description.
3287

3288
  @type fdef: L{objects.QueryFieldDefinition}
3289
  @rtype: list
3290

3291
  """
3292
  return [
3293
    fdef.name,
3294
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3295
    fdef.title,
3296
    fdef.doc,
3297
    ]
3298

    
3299

    
3300
def GenericListFields(resource, fields, separator, header, cl=None):
3301
  """Generic implementation for listing fields for a resource.
3302

3303
  @param resource: One of L{constants.QR_VIA_LUXI}
3304
  @type fields: list of strings
3305
  @param fields: List of fields to query for
3306
  @type separator: string or None
3307
  @param separator: String used to separate fields
3308
  @type header: bool
3309
  @param header: Whether to show header row
3310

3311
  """
3312
  if cl is None:
3313
    cl = GetClient()
3314

    
3315
  if not fields:
3316
    fields = None
3317

    
3318
  response = cl.QueryFields(resource, fields)
3319

    
3320
  found_unknown = _WarnUnknownFields(response.fields)
3321

    
3322
  columns = [
3323
    TableColumn("Name", str, False),
3324
    TableColumn("Type", str, False),
3325
    TableColumn("Title", str, False),
3326
    TableColumn("Description", str, False),
3327
    ]
3328

    
3329
  rows = map(_FieldDescValues, response.fields)
3330

    
3331
  for line in FormatTable(rows, columns, header, separator):
3332
    ToStdout(line)
3333

    
3334
  if found_unknown:
3335
    return constants.EXIT_UNKNOWN_FIELD
3336

    
3337
  return constants.EXIT_SUCCESS
3338

    
3339

    
3340
class TableColumn:
3341
  """Describes a column for L{FormatTable}.
3342

3343
  """
3344
  def __init__(self, title, fn, align_right):
3345
    """Initializes this class.
3346

3347
    @type title: string
3348
    @param title: Column title
3349
    @type fn: callable
3350
    @param fn: Formatting function
3351
    @type align_right: bool
3352
    @param align_right: Whether to align values on the right-hand side
3353

3354
    """
3355
    self.title = title
3356
    self.format = fn
3357
    self.align_right = align_right
3358

    
3359

    
3360
def _GetColFormatString(width, align_right):
3361
  """Returns the format string for a field.
3362

3363
  """
3364
  if align_right:
3365
    sign = ""
3366
  else:
3367
    sign = "-"
3368

    
3369
  return "%%%s%ss" % (sign, width)
3370

    
3371

    
3372
def FormatTable(rows, columns, header, separator):
3373
  """Formats data as a table.
3374

3375
  @type rows: list of lists
3376
  @param rows: Row data, one list per row
3377
  @type columns: list of L{TableColumn}
3378
  @param columns: Column descriptions
3379
  @type header: bool
3380
  @param header: Whether to show header row
3381
  @type separator: string or None
3382
  @param separator: String used to separate columns
3383

3384
  """
3385
  if header:
3386
    data = [[col.title for col in columns]]
3387
    colwidth = [len(col.title) for col in columns]
3388
  else:
3389
    data = []
3390
    colwidth = [0 for _ in columns]
3391

    
3392
  # Format row data
3393
  for row in rows:
3394
    assert len(row) == len(columns)
3395

    
3396
    formatted = [col.format(value) for value, col in zip(row, columns)]
3397

    
3398
    if separator is None:
3399
      # Update column widths
3400
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3401
        # Modifying a list's items while iterating is fine
3402
        colwidth[idx] = max(oldwidth, len(value))
3403

    
3404
    data.append(formatted)
3405

    
3406
  if separator is not None:
3407
    # Return early if a separator is used
3408
    return [separator.join(row) for row in data]
3409

    
3410
  if columns and not columns[-1].align_right:
3411
    # Avoid unnecessary spaces at end of line
3412
    colwidth[-1] = 0
3413

    
3414
  # Build format string
3415
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3416
                  for col, width in zip(columns, colwidth)])
3417

    
3418
  return [fmt % tuple(row) for row in data]
3419

    
3420

    
3421
def FormatTimestamp(ts):
3422
  """Formats a given timestamp.
3423

3424
  @type ts: timestamp
3425
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3426

3427
  @rtype: string
3428
  @return: a string with the formatted timestamp
3429

3430
  """
3431
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3432
    return "?"
3433

    
3434
  (sec, usecs) = ts
3435
  return utils.FormatTime(sec, usecs=usecs)
3436

    
3437

    
3438
def ParseTimespec(value):
3439
  """Parse a time specification.
3440

3441
  The following suffixed will be recognized:
3442

3443
    - s: seconds
3444
    - m: minutes
3445
    - h: hours
3446
    - d: day
3447
    - w: weeks
3448

3449
  Without any suffix, the value will be taken to be in seconds.
3450

3451
  """
3452
  value = str(value)
3453
  if not value:
3454
    raise errors.OpPrereqError("Empty time specification passed",
3455
                               errors.ECODE_INVAL)
3456
  suffix_map = {
3457
    "s": 1,
3458
    "m": 60,
3459
    "h": 3600,
3460
    "d": 86400,
3461
    "w": 604800,
3462
    }
3463
  if value[-1] not in suffix_map:
3464
    try:
3465
      value = int(value)
3466
    except (TypeError, ValueError):
3467
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3468
                                 errors.ECODE_INVAL)
3469
  else:
3470
    multiplier = suffix_map[value[-1]]
3471
    value = value[:-1]
3472
    if not value: # no data left after stripping the suffix
3473
      raise errors.OpPrereqError("Invalid time specification (only"
3474
                                 " suffix passed)", errors.ECODE_INVAL)
3475
    try:
3476
      value = int(value) * multiplier
3477
    except (TypeError, ValueError):
3478
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3479
                                 errors.ECODE_INVAL)
3480
  return value
3481

    
3482

    
3483
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3484
                   filter_master=False, nodegroup=None):
3485
  """Returns the names of online nodes.
3486

3487
  This function will also log a warning on stderr with the names of
3488
  the online nodes.
3489

3490
  @param nodes: if not empty, use only this subset of nodes (minus the
3491
      offline ones)
3492
  @param cl: if not None, luxi client to use
3493
  @type nowarn: boolean
3494
  @param nowarn: by default, this function will output a note with the
3495
      offline nodes that are skipped; if this parameter is True the
3496
      note is not displayed
3497
  @type secondary_ips: boolean
3498
  @param secondary_ips: if True, return the secondary IPs instead of the
3499
      names, useful for doing network traffic over the replication interface
3500
      (if any)
3501
  @type filter_master: boolean
3502
  @param filter_master: if True, do not return the master node in the list
3503
      (useful in coordination with secondary_ips where we cannot check our
3504
      node name against the list)
3505
  @type nodegroup: string
3506
  @param nodegroup: If set, only return nodes in this node group
3507

3508
  """
3509
  if cl is None:
3510
    cl = GetClient(query=True)
3511

    
3512
  qfilter = []
3513

    
3514
  if nodes:
3515
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3516

    
3517
  if nodegroup is not None:
3518
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3519
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3520

    
3521
  if filter_master:
3522
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3523

    
3524
  if qfilter:
3525
    if len(qfilter) > 1:
3526
      final_filter = [qlang.OP_AND] + qfilter
3527
    else:
3528
      assert len(qfilter) == 1
3529
      final_filter = qfilter[0]
3530
  else:
3531
    final_filter = None
3532

    
3533
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3534

    
3535
  def _IsOffline(row):
3536
    (_, (_, offline), _) = row
3537
    return offline
3538

    
3539
  def _GetName(row):
3540
    ((_, name), _, _) = row
3541
    return name
3542

    
3543
  def _GetSip(row):
3544
    (_, _, (_, sip)) = row
3545
    return sip
3546

    
3547
  (offline, online) = compat.partition(result.data, _IsOffline)
3548

    
3549
  if offline and not nowarn:
3550
    ToStderr("Note: skipping offline node(s): %s" %
3551
             utils.CommaJoin(map(_GetName, offline)))
3552

    
3553
  if secondary_ips:
3554
    fn = _GetSip
3555
  else:
3556
    fn = _GetName
3557

    
3558
  return map(fn, online)
3559

    
3560

    
3561
def GetNodesSshPorts(nodes, cl):
3562
  """Retrieves SSH ports of given nodes.
3563

3564
  @param nodes: the names of nodes
3565
  @type nodes: a list of strings
3566
  @param cl: a client to use for the query
3567
  @type cl: L{Client}
3568
  @return: the list of SSH ports corresponding to the nodes
3569
  @rtype: a list of tuples
3570
  """
3571
  return map(lambda t: t[0],
3572
             cl.QueryNodes(names=nodes,
3573
                           fields=["ndp/ssh_port"],
3574
                           use_locking=False))
3575

    
3576

    
3577
def _ToStream(stream, txt, *args):
3578
  """Write a message to a stream, bypassing the logging system
3579

3580
  @type stream: file object
3581
  @param stream: the file to which we should write
3582
  @type txt: str
3583
  @param txt: the message
3584

3585
  """
3586
  try:
3587
    if args:
3588
      args = tuple(args)
3589
      stream.write(txt % args)
3590
    else:
3591
      stream.write(txt)
3592
    stream.write("\n")
3593
    stream.flush()
3594
  except IOError, err:
3595
    if err.errno == errno.EPIPE:
3596
      # our terminal went away, we'll exit
3597
      sys.exit(constants.EXIT_FAILURE)
3598
    else:
3599
      raise
3600

    
3601

    
3602
def ToStdout(txt, *args):
3603
  """Write a message to stdout only, bypassing the logging system
3604

3605
  This is just a wrapper over _ToStream.
3606

3607
  @type txt: str
3608
  @param txt: the message
3609

3610
  """
3611
  _ToStream(sys.stdout, txt, *args)
3612

    
3613

    
3614
def ToStderr(txt, *args):
3615
  """Write a message to stderr only, bypassing the logging system
3616

3617
  This is just a wrapper over _ToStream.
3618

3619
  @type txt: str
3620
  @param txt: the message
3621

3622
  """
3623
  _ToStream(sys.stderr, txt, *args)
3624

    
3625

    
3626
class JobExecutor(object):
3627
  """Class which manages the submission and execution of multiple jobs.
3628

3629
  Note that instances of this class should not be reused between
3630
  GetResults() calls.
3631

3632
  """
3633
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3634
    self.queue = []
3635
    if cl is None:
3636
      cl = GetClient()
3637
    self.cl = cl
3638
    self.verbose = verbose
3639
    self.jobs = []
3640
    self.opts = opts
3641
    self.feedback_fn = feedback_fn
3642
    self._counter = itertools.count()
3643

    
3644
  @staticmethod
3645
  def _IfName(name, fmt):
3646
    """Helper function for formatting name.
3647

3648
    """
3649
    if name:
3650
      return fmt % name
3651

    
3652
    return ""
3653

    
3654
  def QueueJob(self, name, *ops):
3655
    """Record a job for later submit.
3656

3657
    @type name: string
3658
    @param name: a description of the job, will be used in WaitJobSet
3659

3660
    """
3661
    SetGenericOpcodeOpts(ops, self.opts)
3662
    self.queue.append((self._counter.next(), name, ops))
3663

    
3664
  def AddJobId(self, name, status, job_id):
3665
    """Adds a job ID to the internal queue.
3666

3667
    """
3668
    self.jobs.append((self._counter.next(), status, job_id, name))
3669

    
3670
  def SubmitPending(self, each=False):
3671
    """Submit all pending jobs.
3672

3673
    """
3674
    if each:
3675
      results = []
3676
      for (_, _, ops) in self.queue:
3677
        # SubmitJob will remove the success status, but raise an exception if
3678
        # the submission fails, so we'll notice that anyway.
3679
        results.append([True, self.cl.SubmitJob(ops)[0]])
3680
    else:
3681
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3682
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3683
      self.jobs.append((idx, status, data, name))
3684

    
3685
  def _ChooseJob(self):
3686
    """Choose a non-waiting/queued job to poll next.
3687

3688
    """
3689
    assert self.jobs, "_ChooseJob called with empty job list"
3690

    
3691
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3692
                               ["status"])
3693
    assert result
3694

    
3695
    for job_data, status in zip(self.jobs, result):
3696
      if (isinstance(status, list) and status and
3697
          status[0] in (constants.JOB_STATUS_QUEUED,
3698
                        constants.JOB_STATUS_WAITING,
3699
                        constants.JOB_STATUS_CANCELING)):
3700
        # job is still present and waiting
3701
        continue
3702
      # good candidate found (either running job or lost job)
3703
      self.jobs.remove(job_data)
3704
      return job_data
3705

    
3706
    # no job found
3707
    return self.jobs.pop(0)
3708

    
3709
  def GetResults(self):
3710
    """Wait for and return the results of all jobs.
3711

3712
    @rtype: list
3713
    @return: list of tuples (success, job results), in the same order
3714
        as the submitted jobs; if a job has failed, instead of the result
3715
        there will be the error message
3716

3717
    """
3718
    if not self.jobs:
3719
      self.SubmitPending()
3720
    results = []
3721
    if self.verbose:
3722
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3723
      if ok_jobs:
3724
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3725

    
3726
    # first, remove any non-submitted jobs
3727
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3728
    for idx, _, jid, name in failures:
3729
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3730
      results.append((idx, False, jid))
3731

    
3732
    while self.jobs:
3733
      (idx, _, jid, name) = self._ChooseJob()
3734
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3735
      try:
3736
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3737
        success = True
3738
      except errors.JobLost, err:
3739
        _, job_result = FormatError(err)
3740
        ToStderr("Job %s%s has been archived, cannot check its result",
3741
                 jid, self._IfName(name, " for %s"))
3742
        success = False
3743
      except (errors.GenericError, rpcerr.ProtocolError), err:
3744
        _, job_result = FormatError(err)
3745
        success = False
3746
        # the error message will always be shown, verbose or not
3747
        ToStderr("Job %s%s has failed: %s",
3748
                 jid, self._IfName(name, " for %s"), job_result)
3749

    
3750
      results.append((idx, success, job_result))
3751

    
3752
    # sort based on the index, then drop it
3753
    results.sort()
3754
    results = [i[1:] for i in results]
3755

    
3756
    return results
3757

    
3758
  def WaitOrShow(self, wait):
3759
    """Wait for job results or only print the job IDs.
3760

3761
    @type wait: boolean
3762
    @param wait: whether to wait or not
3763

3764
    """
3765
    if wait:
3766
      return self.GetResults()
3767
    else:
3768
      if not self.jobs:
3769
        self.SubmitPending()
3770
      for _, status, result, name in self.jobs:
3771
        if status:
3772
          ToStdout("%s: %s", result, name)
3773
        else:
3774
          ToStderr("Failure for %s: %s", name, result)
3775
      return [row[1:3] for row in self.jobs]
3776

    
3777

    
3778
def FormatParamsDictInfo(param_dict, actual):
3779
  """Formats a parameter dictionary.
3780

3781
  @type param_dict: dict
3782
  @param param_dict: the own parameters
3783
  @type actual: dict
3784
  @param actual: the current parameter set (including defaults)
3785
  @rtype: dict
3786
  @return: dictionary where the value of each parameter is either a fully
3787
      formatted string or a dictionary containing formatted strings
3788

3789
  """
3790
  ret = {}
3791
  for (key, data) in actual.items():
3792
    if isinstance(data, dict) and data:
3793
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3794
    else:
3795
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3796
  return ret
3797

    
3798

    
3799
def _FormatListInfoDefault(data, def_data):
3800
  if data is not None:
3801
    ret = utils.CommaJoin(data)
3802
  else:
3803
    ret = "default (%s)" % utils.CommaJoin(def_data)
3804
  return ret
3805

    
3806

    
3807
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3808
  """Formats an instance policy.
3809

3810
  @type custom_ipolicy: dict
3811
  @param custom_ipolicy: own policy
3812
  @type eff_ipolicy: dict
3813
  @param eff_ipolicy: effective policy (including defaults); ignored for
3814
      cluster
3815
  @type iscluster: bool
3816
  @param iscluster: the policy is at cluster level
3817
  @rtype: list of pairs
3818
  @return: formatted data, suitable for L{PrintGenericInfo}
3819

3820
  """
3821
  if iscluster:
3822
    eff_ipolicy = custom_ipolicy
3823

    
3824
  minmax_out = []
3825
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3826
  if custom_minmax:
3827
    for (k, minmax) in enumerate(custom_minmax):
3828
      minmax_out.append([
3829
        ("%s/%s" % (key, k),
3830
         FormatParamsDictInfo(minmax[key], minmax[key]))
3831
        for key in constants.ISPECS_MINMAX_KEYS
3832
        ])
3833
  else:
3834
    for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3835
      minmax_out.append([
3836
        ("%s/%s" % (key, k),
3837
         FormatParamsDictInfo({}, minmax[key]))
3838
        for key in constants.ISPECS_MINMAX_KEYS
3839
        ])
3840
  ret = [("bounds specs", minmax_out)]
3841

    
3842
  if iscluster:
3843
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3844
    ret.append(
3845
      (constants.ISPECS_STD,
3846
       FormatParamsDictInfo(stdspecs, stdspecs))
3847
      )
3848

    
3849
  ret.append(
3850
    ("allowed disk templates",
3851
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3852
                            eff_ipolicy[constants.IPOLICY_DTS]))
3853
    )
3854
  ret.extend([
3855
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3856
    for key in constants.IPOLICY_PARAMETERS
3857
    ])
3858
  return ret
3859

    
3860

    
3861
def _PrintSpecsParameters(buf, specs):
3862
  values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3863
  buf.write(",".join(values))
3864

    
3865

    
3866
def PrintIPolicyCommand(buf, ipolicy, isgroup):
3867
  """Print the command option used to generate the given instance policy.
3868

3869
  Currently only the parts dealing with specs are supported.
3870

3871
  @type buf: StringIO
3872
  @param buf: stream to write into
3873
  @type ipolicy: dict
3874
  @param ipolicy: instance policy
3875
  @type isgroup: bool
3876
  @param isgroup: whether the policy is at group level
3877

3878
  """
3879
  if not isgroup:
3880
    stdspecs = ipolicy.get("std")
3881
    if stdspecs:
3882
      buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3883
      _PrintSpecsParameters(buf, stdspecs)
3884
  minmaxes = ipolicy.get("minmax", [])
3885
  first = True
3886
  for minmax in minmaxes:
3887
    minspecs = minmax.get("min")
3888
    maxspecs = minmax.get("max")
3889
    if minspecs and maxspecs:
3890
      if first:
3891
        buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3892
        first = False
3893
      else:
3894
        buf.write("//")
3895
      buf.write("min:")
3896
      _PrintSpecsParameters(buf, minspecs)
3897
      buf.write("/max:")
3898
      _PrintSpecsParameters(buf, maxspecs)
3899

    
3900

    
3901
def ConfirmOperation(names, list_type, text, extra=""):
3902
  """Ask the user to confirm an operation on a list of list_type.
3903

3904
  This function is used to request confirmation for doing an operation
3905
  on a given list of list_type.
3906

3907
  @type names: list
3908
  @param names: the list of names that we display when
3909
      we ask for confirmation
3910
  @type list_type: str
3911
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3912
  @type text: str
3913
  @param text: the operation that the user should confirm
3914
  @rtype: boolean
3915
  @return: True or False depending on user's confirmation.
3916

3917
  """
3918
  count = len(names)
3919
  msg = ("The %s will operate on %d %s.\n%s"
3920
         "Do you want to continue?" % (text, count, list_type, extra))
3921
  affected = (("\nAffected %s:\n" % list_type) +
3922
              "\n".join(["  %s" % name for name in names]))
3923

    
3924
  choices = [("y", True, "Yes, execute the %s" % text),
3925
             ("n", False, "No, abort the %s" % text)]
3926

    
3927
  if count > 20:
3928
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3929
    question = msg
3930
  else:
3931
    question = msg + affected
3932

    
3933
  choice = AskUser(question, choices)
3934
  if choice == "v":
3935
    choices.pop(1)
3936
    choice = AskUser(msg + affected, choices)
3937
  return choice
3938

    
3939

    
3940
def _MaybeParseUnit(elements):
3941
  """Parses and returns an array of potential values with units.
3942

3943
  """
3944
  parsed = {}
3945
  for k, v in elements.items():
3946
    if v == constants.VALUE_DEFAULT:
3947
      parsed[k] = v
3948
    else:
3949
      parsed[k] = utils.ParseUnit(v)
3950
  return parsed
3951

    
3952

    
3953
def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3954
                             ispecs_disk_count, ispecs_disk_size,
3955
                             ispecs_nic_count, group_ipolicy, fill_all):
3956
  try:
3957
    if ispecs_mem_size:
3958
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3959
    if ispecs_disk_size:
3960
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3961
  except (TypeError, ValueError, errors.UnitParseError), err:
3962
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3963
                               " in policy: %s" %
3964
                               (ispecs_disk_size, ispecs_mem_size, err),
3965
                               errors.ECODE_INVAL)
3966

    
3967
  # prepare ipolicy dict
3968
  ispecs_transposed = {
3969
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3970
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3971
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3972
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3973
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3974
    }
3975

    
3976
  # first, check that the values given are correct
3977
  if group_ipolicy:
3978
    forced_type = TISPECS_GROUP_TYPES
3979
  else:
3980
    forced_type = TISPECS_CLUSTER_TYPES
3981
  for specs in ispecs_transposed.values():
3982
    assert type(specs) is dict
3983
    utils.ForceDictType(specs, forced_type)
3984

    
3985
  # then transpose
3986
  ispecs = {
3987
    constants.ISPECS_MIN: {},
3988
    constants.ISPECS_MAX: {},
3989
    constants.ISPECS_STD: {},
3990
    }
3991
  for (name, specs) in ispecs_transposed.iteritems():
3992
    assert name in constants.ISPECS_PARAMETERS
3993
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3994
      assert key in ispecs
3995
      ispecs[key][name] = val
3996
  minmax_out = {}
3997
  for key in constants.ISPECS_MINMAX_KEYS:
3998
    if fill_all:
3999
      minmax_out[key] = \
4000
        objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
4001
    else:
4002
      minmax_out[key] = ispecs[key]
4003
  ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
4004
  if fill_all:
4005
    ipolicy[constants.ISPECS_STD] = \
4006
        objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
4007
                         ispecs[constants.ISPECS_STD])
4008
  else:
4009
    ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
4010

    
4011

    
4012
def _ParseSpecUnit(spec, keyname):
4013
  ret = spec.copy()
4014
  for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
4015
    if k in ret:
4016
      try:
4017
        ret[k] = utils.ParseUnit(ret[k])
4018
      except (TypeError, ValueError, errors.UnitParseError), err:
4019
        raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
4020
                                    " specs: %s" % (k, ret[k], keyname, err)),
4021
                                   errors.ECODE_INVAL)
4022
  return ret
4023

    
4024

    
4025
def _ParseISpec(spec, keyname, required):
4026
  ret = _ParseSpecUnit(spec, keyname)
4027
  utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
4028
  missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
4029
  if required and missing:
4030
    raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
4031
                               (keyname, utils.CommaJoin(missing)),
4032
                               errors.ECODE_INVAL)
4033
  return ret
4034

    
4035

    
4036
def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
4037
  ret = None
4038
  if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
4039
      len(minmax_ispecs[0]) == 1):
4040
    for (key, spec) in minmax_ispecs[0].items():
4041
      # This loop is executed exactly once
4042
      if key in allowed_values and not spec:
4043
        ret = key
4044
  return ret
4045

    
4046

    
4047
def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4048
                            group_ipolicy, allowed_values):
4049
  found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
4050
  if found_allowed is not None:
4051
    ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
4052
  elif minmax_ispecs is not None:
4053
    minmax_out = []
4054
    for mmpair in minmax_ispecs:
4055
      mmpair_out = {}
4056
      for (key, spec) in mmpair.items():
4057
        if key not in constants.ISPECS_MINMAX_KEYS:
4058
          msg = "Invalid key in bounds instance specifications: %s" % key
4059
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
4060
        mmpair_out[key] = _ParseISpec(spec, key, True)
4061
      minmax_out.append(mmpair_out)
4062
    ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
4063
  if std_ispecs is not None:
4064
    assert not group_ipolicy # This is not an option for gnt-group
4065
    ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
4066

    
4067

    
4068
def CreateIPolicyFromOpts(ispecs_mem_size=None,
4069
                          ispecs_cpu_count=None,
4070
                          ispecs_disk_count=None,
4071
                          ispecs_disk_size=None,
4072
                          ispecs_nic_count=None,
4073
                          minmax_ispecs=None,
4074
                          std_ispecs=None,
4075
                          ipolicy_disk_templates=None,
4076
                          ipolicy_vcpu_ratio=None,
4077
                          ipolicy_spindle_ratio=None,
4078
                          group_ipolicy=False,
4079
                          allowed_values=None,
4080
                          fill_all=False):
4081
  """Creation of instance policy based on command line options.
4082

4083
  @param fill_all: whether for cluster policies we should ensure that
4084
    all values are filled
4085

4086
  """
4087
  assert not (fill_all and allowed_values)
4088

    
4089
  split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4090
                 ispecs_disk_size or ispecs_nic_count)
4091
  if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4092
    raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4093
                               " together with any --ipolicy-xxx-specs option",
4094
                               errors.ECODE_INVAL)
4095

    
4096
  ipolicy_out = objects.MakeEmptyIPolicy()
4097
  if split_specs:
4098
    assert fill_all
4099
    _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4100
                             ispecs_disk_count, ispecs_disk_size,
4101
                             ispecs_nic_count, group_ipolicy, fill_all)
4102
  elif (minmax_ispecs is not None or std_ispecs is not None):
4103
    _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4104
                            group_ipolicy, allowed_values)
4105

    
4106
  if ipolicy_disk_templates is not None:
4107
    if allowed_values and ipolicy_disk_templates in allowed_values:
4108
      ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4109
    else:
4110
      ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4111
  if ipolicy_vcpu_ratio is not None:
4112
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4113
  if ipolicy_spindle_ratio is not None:
4114
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4115

    
4116
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4117

    
4118
  if not group_ipolicy and fill_all:
4119
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4120

    
4121
  return ipolicy_out
4122

    
4123

    
4124
def _SerializeGenericInfo(buf, data, level, afterkey=False):
4125
  """Formatting core of L{PrintGenericInfo}.
4126

4127
  @param buf: (string) stream to accumulate the result into
4128
  @param data: data to format
4129
  @type level: int
4130
  @param level: depth in the data hierarchy, used for indenting
4131
  @type afterkey: bool
4132
  @param afterkey: True when we are in the middle of a line after a key (used
4133
      to properly add newlines or indentation)
4134

4135
  """
4136
  baseind = "  "
4137
  if isinstance(data, dict):
4138
    if not data:
4139
      buf.write("\n")
4140
    else:
4141
      if afterkey:
4142
        buf.write("\n")
4143
        doindent = True
4144
      else:
4145
        doindent = False
4146
      for key in sorted(data):
4147
        if doindent:
4148
          buf.write(baseind * level)
4149
        else:
4150
          doindent = True
4151
        buf.write(key)
4152
        buf.write(": ")
4153
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4154
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4155
    # list of tuples (an ordered dictionary)
4156
    if afterkey:
4157
      buf.write("\n")
4158
      doindent = True
4159
    else:
4160
      doindent = False
4161
    for (key, val) in data:
4162
      if doindent:
4163
        buf.write(baseind * level)
4164
      else:
4165
        doindent = True
4166
      buf.write(key)
4167
      buf.write(": ")
4168
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4169
  elif isinstance(data, list):
4170
    if not data:
4171
      buf.write("\n")
4172
    else:
4173
      if afterkey:
4174
        buf.write("\n")
4175
        doindent = True
4176
      else:
4177
        doindent = False
4178
      for item in data:
4179
        if doindent:
4180
          buf.write(baseind * level)
4181
        else:
4182
          doindent = True
4183
        buf.write("-")
4184
        buf.write(baseind[1:])
4185
        _SerializeGenericInfo(buf, item, level + 1)
4186
  else:
4187
    # This branch should be only taken for strings, but it's practically
4188
    # impossible to guarantee that no other types are produced somewhere
4189
    buf.write(str(data))
4190
    buf.write("\n")
4191

    
4192

    
4193
def PrintGenericInfo(data):
4194
  """Print information formatted according to the hierarchy.
4195

4196
  The output is a valid YAML string.
4197

4198
  @param data: the data to print. It's a hierarchical structure whose elements
4199
      can be:
4200
        - dictionaries, where keys are strings and values are of any of the
4201
          types listed here
4202
        - lists of pairs (key, value), where key is a string and value is of
4203
          any of the types listed here; it's a way to encode ordered
4204
          dictionaries
4205
        - lists of any of the types listed here
4206
        - strings
4207

4208
  """
4209
  buf = StringIO()
4210
  _SerializeGenericInfo(buf, data, 0)
4211
  ToStdout(buf.getvalue().rstrip("\n"))