Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ e15a00dc

History | View | Annotate | Download (136.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ADD_RESERVED_IPS_OPT",
58
  "ALLOCATABLE_OPT",
59
  "ALLOC_POLICY_OPT",
60
  "ALL_OPT",
61
  "ALLOW_FAILOVER_OPT",
62
  "AUTO_PROMOTE_OPT",
63
  "AUTO_REPLACE_OPT",
64
  "BACKEND_OPT",
65
  "BLK_OS_OPT",
66
  "CAPAB_MASTER_OPT",
67
  "CAPAB_VM_OPT",
68
  "CLEANUP_OPT",
69
  "CLUSTER_DOMAIN_SECRET_OPT",
70
  "CONFIRM_OPT",
71
  "CP_SIZE_OPT",
72
  "DEBUG_OPT",
73
  "DEBUG_SIMERR_OPT",
74
  "DISKIDX_OPT",
75
  "DISK_OPT",
76
  "DISK_PARAMS_OPT",
77
  "DISK_TEMPLATE_OPT",
78
  "DRAINED_OPT",
79
  "DRY_RUN_OPT",
80
  "DRBD_HELPER_OPT",
81
  "DST_NODE_OPT",
82
  "EARLY_RELEASE_OPT",
83
  "ENABLED_HV_OPT",
84
  "ENABLED_DISK_TEMPLATES_OPT",
85
  "ERROR_CODES_OPT",
86
  "FAILURE_ONLY_OPT",
87
  "FIELDS_OPT",
88
  "FILESTORE_DIR_OPT",
89
  "FILESTORE_DRIVER_OPT",
90
  "FORCE_FILTER_OPT",
91
  "FORCE_OPT",
92
  "FORCE_VARIANT_OPT",
93
  "GATEWAY_OPT",
94
  "GATEWAY6_OPT",
95
  "GLOBAL_FILEDIR_OPT",
96
  "HID_OS_OPT",
97
  "GLOBAL_SHARED_FILEDIR_OPT",
98
  "HOTPLUG_OPT",
99
  "HVLIST_OPT",
100
  "HVOPTS_OPT",
101
  "HYPERVISOR_OPT",
102
  "IALLOCATOR_OPT",
103
  "DEFAULT_IALLOCATOR_OPT",
104
  "IDENTIFY_DEFAULTS_OPT",
105
  "IGNORE_CONSIST_OPT",
106
  "IGNORE_ERRORS_OPT",
107
  "IGNORE_FAILURES_OPT",
108
  "IGNORE_OFFLINE_OPT",
109
  "IGNORE_REMOVE_FAILURES_OPT",
110
  "IGNORE_SECONDARIES_OPT",
111
  "IGNORE_SIZE_OPT",
112
  "INCLUDEDEFAULTS_OPT",
113
  "INTERVAL_OPT",
114
  "MAC_PREFIX_OPT",
115
  "MAINTAIN_NODE_HEALTH_OPT",
116
  "MASTER_NETDEV_OPT",
117
  "MASTER_NETMASK_OPT",
118
  "MC_OPT",
119
  "MIGRATION_MODE_OPT",
120
  "MODIFY_ETCHOSTS_OPT",
121
  "NET_OPT",
122
  "NETWORK_OPT",
123
  "NETWORK6_OPT",
124
  "NEW_CLUSTER_CERT_OPT",
125
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
126
  "NEW_CONFD_HMAC_KEY_OPT",
127
  "NEW_RAPI_CERT_OPT",
128
  "NEW_PRIMARY_OPT",
129
  "NEW_SECONDARY_OPT",
130
  "NEW_SPICE_CERT_OPT",
131
  "NIC_PARAMS_OPT",
132
  "NOCONFLICTSCHECK_OPT",
133
  "NODE_FORCE_JOIN_OPT",
134
  "NODE_LIST_OPT",
135
  "NODE_PLACEMENT_OPT",
136
  "NODEGROUP_OPT",
137
  "NODE_PARAMS_OPT",
138
  "NODE_POWERED_OPT",
139
  "NOHDR_OPT",
140
  "NOIPCHECK_OPT",
141
  "NO_INSTALL_OPT",
142
  "NONAMECHECK_OPT",
143
  "NOLVM_STORAGE_OPT",
144
  "NOMODIFY_ETCHOSTS_OPT",
145
  "NOMODIFY_SSH_SETUP_OPT",
146
  "NONICS_OPT",
147
  "NONLIVE_OPT",
148
  "NONPLUS1_OPT",
149
  "NORUNTIME_CHGS_OPT",
150
  "NOSHUTDOWN_OPT",
151
  "NOSTART_OPT",
152
  "NOSSH_KEYCHECK_OPT",
153
  "NOVOTING_OPT",
154
  "NO_REMEMBER_OPT",
155
  "NWSYNC_OPT",
156
  "OFFLINE_INST_OPT",
157
  "ONLINE_INST_OPT",
158
  "ON_PRIMARY_OPT",
159
  "ON_SECONDARY_OPT",
160
  "OFFLINE_OPT",
161
  "OSPARAMS_OPT",
162
  "OS_OPT",
163
  "OS_SIZE_OPT",
164
  "OOB_TIMEOUT_OPT",
165
  "POWER_DELAY_OPT",
166
  "PREALLOC_WIPE_DISKS_OPT",
167
  "PRIMARY_IP_VERSION_OPT",
168
  "PRIMARY_ONLY_OPT",
169
  "PRINT_JOBID_OPT",
170
  "PRIORITY_OPT",
171
  "RAPI_CERT_OPT",
172
  "READD_OPT",
173
  "REASON_OPT",
174
  "REBOOT_TYPE_OPT",
175
  "REMOVE_INSTANCE_OPT",
176
  "REMOVE_RESERVED_IPS_OPT",
177
  "REMOVE_UIDS_OPT",
178
  "RESERVED_LVS_OPT",
179
  "RUNTIME_MEM_OPT",
180
  "ROMAN_OPT",
181
  "SECONDARY_IP_OPT",
182
  "SECONDARY_ONLY_OPT",
183
  "SELECT_OS_OPT",
184
  "SEP_OPT",
185
  "SHOWCMD_OPT",
186
  "SHOW_MACHINE_OPT",
187
  "SHUTDOWN_TIMEOUT_OPT",
188
  "SINGLE_NODE_OPT",
189
  "SPECS_CPU_COUNT_OPT",
190
  "SPECS_DISK_COUNT_OPT",
191
  "SPECS_DISK_SIZE_OPT",
192
  "SPECS_MEM_SIZE_OPT",
193
  "SPECS_NIC_COUNT_OPT",
194
  "SPLIT_ISPECS_OPTS",
195
  "IPOLICY_STD_SPECS_OPT",
196
  "IPOLICY_DISK_TEMPLATES",
197
  "IPOLICY_VCPU_RATIO",
198
  "SPICE_CACERT_OPT",
199
  "SPICE_CERT_OPT",
200
  "SRC_DIR_OPT",
201
  "SRC_NODE_OPT",
202
  "SUBMIT_OPT",
203
  "SUBMIT_OPTS",
204
  "STARTUP_PAUSED_OPT",
205
  "STATIC_OPT",
206
  "SYNC_OPT",
207
  "TAG_ADD_OPT",
208
  "TAG_SRC_OPT",
209
  "TIMEOUT_OPT",
210
  "TO_GROUP_OPT",
211
  "UIDPOOL_OPT",
212
  "USEUNITS_OPT",
213
  "USE_EXTERNAL_MIP_SCRIPT",
214
  "USE_REPL_NET_OPT",
215
  "VERBOSE_OPT",
216
  "VG_NAME_OPT",
217
  "WFSYNC_OPT",
218
  "YES_DOIT_OPT",
219
  "DISK_STATE_OPT",
220
  "HV_STATE_OPT",
221
  "IGNORE_IPOLICY_OPT",
222
  "INSTANCE_POLICY_OPTS",
223
  # Generic functions for CLI programs
224
  "ConfirmOperation",
225
  "CreateIPolicyFromOpts",
226
  "GenericMain",
227
  "GenericInstanceCreate",
228
  "GenericList",
229
  "GenericListFields",
230
  "GetClient",
231
  "GetOnlineNodes",
232
  "JobExecutor",
233
  "JobSubmittedException",
234
  "ParseTimespec",
235
  "RunWhileClusterStopped",
236
  "SubmitOpCode",
237
  "SubmitOpCodeToDrainedQueue",
238
  "SubmitOrSend",
239
  "UsesRPC",
240
  # Formatting functions
241
  "ToStderr", "ToStdout",
242
  "FormatError",
243
  "FormatQueryResult",
244
  "FormatParamsDictInfo",
245
  "FormatPolicyInfo",
246
  "PrintIPolicyCommand",
247
  "PrintGenericInfo",
248
  "GenerateTable",
249
  "AskUser",
250
  "FormatTimestamp",
251
  "FormatLogMessage",
252
  # Tags functions
253
  "ListTags",
254
  "AddTags",
255
  "RemoveTags",
256
  # command line options support infrastructure
257
  "ARGS_MANY_INSTANCES",
258
  "ARGS_MANY_NODES",
259
  "ARGS_MANY_GROUPS",
260
  "ARGS_MANY_NETWORKS",
261
  "ARGS_NONE",
262
  "ARGS_ONE_INSTANCE",
263
  "ARGS_ONE_NODE",
264
  "ARGS_ONE_GROUP",
265
  "ARGS_ONE_OS",
266
  "ARGS_ONE_NETWORK",
267
  "ArgChoice",
268
  "ArgCommand",
269
  "ArgFile",
270
  "ArgGroup",
271
  "ArgHost",
272
  "ArgInstance",
273
  "ArgJobId",
274
  "ArgNetwork",
275
  "ArgNode",
276
  "ArgOs",
277
  "ArgExtStorage",
278
  "ArgSuggest",
279
  "ArgUnknown",
280
  "OPT_COMPL_INST_ADD_NODES",
281
  "OPT_COMPL_MANY_NODES",
282
  "OPT_COMPL_ONE_IALLOCATOR",
283
  "OPT_COMPL_ONE_INSTANCE",
284
  "OPT_COMPL_ONE_NODE",
285
  "OPT_COMPL_ONE_NODEGROUP",
286
  "OPT_COMPL_ONE_NETWORK",
287
  "OPT_COMPL_ONE_OS",
288
  "OPT_COMPL_ONE_EXTSTORAGE",
289
  "cli_option",
290
  "SplitNodeOption",
291
  "CalculateOSNames",
292
  "ParseFields",
293
  "COMMON_CREATE_OPTS",
294
  ]
295

    
296
NO_PREFIX = "no_"
297
UN_PREFIX = "-"
298

    
299
#: Priorities (sorted)
300
_PRIORITY_NAMES = [
301
  ("low", constants.OP_PRIO_LOW),
302
  ("normal", constants.OP_PRIO_NORMAL),
303
  ("high", constants.OP_PRIO_HIGH),
304
  ]
305

    
306
#: Priority dictionary for easier lookup
307
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
308
# we migrate to Python 2.6
309
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
310

    
311
# Query result status for clients
312
(QR_NORMAL,
313
 QR_UNKNOWN,
314
 QR_INCOMPLETE) = range(3)
315

    
316
#: Maximum batch size for ChooseJob
317
_CHOOSE_BATCH = 25
318

    
319

    
320
# constants used to create InstancePolicy dictionary
321
TISPECS_GROUP_TYPES = {
322
  constants.ISPECS_MIN: constants.VTYPE_INT,
323
  constants.ISPECS_MAX: constants.VTYPE_INT,
324
  }
325

    
326
TISPECS_CLUSTER_TYPES = {
327
  constants.ISPECS_MIN: constants.VTYPE_INT,
328
  constants.ISPECS_MAX: constants.VTYPE_INT,
329
  constants.ISPECS_STD: constants.VTYPE_INT,
330
  }
331

    
332
#: User-friendly names for query2 field types
333
_QFT_NAMES = {
334
  constants.QFT_UNKNOWN: "Unknown",
335
  constants.QFT_TEXT: "Text",
336
  constants.QFT_BOOL: "Boolean",
337
  constants.QFT_NUMBER: "Number",
338
  constants.QFT_UNIT: "Storage size",
339
  constants.QFT_TIMESTAMP: "Timestamp",
340
  constants.QFT_OTHER: "Custom",
341
  }
342

    
343

    
344
class _Argument:
345
  def __init__(self, min=0, max=None): # pylint: disable=W0622
346
    self.min = min
347
    self.max = max
348

    
349
  def __repr__(self):
350
    return ("<%s min=%s max=%s>" %
351
            (self.__class__.__name__, self.min, self.max))
352

    
353

    
354
class ArgSuggest(_Argument):
355
  """Suggesting argument.
356

357
  Value can be any of the ones passed to the constructor.
358

359
  """
360
  # pylint: disable=W0622
361
  def __init__(self, min=0, max=None, choices=None):
362
    _Argument.__init__(self, min=min, max=max)
363
    self.choices = choices
364

    
365
  def __repr__(self):
366
    return ("<%s min=%s max=%s choices=%r>" %
367
            (self.__class__.__name__, self.min, self.max, self.choices))
368

    
369

    
370
class ArgChoice(ArgSuggest):
371
  """Choice argument.
372

373
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
374
  but value must be one of the choices.
375

376
  """
377

    
378

    
379
class ArgUnknown(_Argument):
380
  """Unknown argument to program (e.g. determined at runtime).
381

382
  """
383

    
384

    
385
class ArgInstance(_Argument):
386
  """Instances argument.
387

388
  """
389

    
390

    
391
class ArgNode(_Argument):
392
  """Node argument.
393

394
  """
395

    
396

    
397
class ArgNetwork(_Argument):
398
  """Network argument.
399

400
  """
401

    
402

    
403
class ArgGroup(_Argument):
404
  """Node group argument.
405

406
  """
407

    
408

    
409
class ArgJobId(_Argument):
410
  """Job ID argument.
411

412
  """
413

    
414

    
415
class ArgFile(_Argument):
416
  """File path argument.
417

418
  """
419

    
420

    
421
class ArgCommand(_Argument):
422
  """Command argument.
423

424
  """
425

    
426

    
427
class ArgHost(_Argument):
428
  """Host argument.
429

430
  """
431

    
432

    
433
class ArgOs(_Argument):
434
  """OS argument.
435

436
  """
437

    
438

    
439
class ArgExtStorage(_Argument):
440
  """ExtStorage argument.
441

442
  """
443

    
444

    
445
ARGS_NONE = []
446
ARGS_MANY_INSTANCES = [ArgInstance()]
447
ARGS_MANY_NETWORKS = [ArgNetwork()]
448
ARGS_MANY_NODES = [ArgNode()]
449
ARGS_MANY_GROUPS = [ArgGroup()]
450
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
451
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
452
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
453
# TODO
454
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
455
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
456

    
457

    
458
def _ExtractTagsObject(opts, args):
459
  """Extract the tag type object.
460

461
  Note that this function will modify its args parameter.
462

463
  """
464
  if not hasattr(opts, "tag_type"):
465
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
466
  kind = opts.tag_type
467
  if kind == constants.TAG_CLUSTER:
468
    retval = kind, ""
469
  elif kind in (constants.TAG_NODEGROUP,
470
                constants.TAG_NODE,
471
                constants.TAG_NETWORK,
472
                constants.TAG_INSTANCE):
473
    if not args:
474
      raise errors.OpPrereqError("no arguments passed to the command",
475
                                 errors.ECODE_INVAL)
476
    name = args.pop(0)
477
    retval = kind, name
478
  else:
479
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
480
  return retval
481

    
482

    
483
def _ExtendTags(opts, args):
484
  """Extend the args if a source file has been given.
485

486
  This function will extend the tags with the contents of the file
487
  passed in the 'tags_source' attribute of the opts parameter. A file
488
  named '-' will be replaced by stdin.
489

490
  """
491
  fname = opts.tags_source
492
  if fname is None:
493
    return
494
  if fname == "-":
495
    new_fh = sys.stdin
496
  else:
497
    new_fh = open(fname, "r")
498
  new_data = []
499
  try:
500
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
501
    # because of python bug 1633941
502
    while True:
503
      line = new_fh.readline()
504
      if not line:
505
        break
506
      new_data.append(line.strip())
507
  finally:
508
    new_fh.close()
509
  args.extend(new_data)
510

    
511

    
512
def ListTags(opts, args):
513
  """List the tags on a given object.
514

515
  This is a generic implementation that knows how to deal with all
516
  three cases of tag objects (cluster, node, instance). The opts
517
  argument is expected to contain a tag_type field denoting what
518
  object type we work on.
519

520
  """
521
  kind, name = _ExtractTagsObject(opts, args)
522
  cl = GetClient(query=True)
523
  result = cl.QueryTags(kind, name)
524
  result = list(result)
525
  result.sort()
526
  for tag in result:
527
    ToStdout(tag)
528

    
529

    
530
def AddTags(opts, args):
531
  """Add tags on a given object.
532

533
  This is a generic implementation that knows how to deal with all
534
  three cases of tag objects (cluster, node, instance). The opts
535
  argument is expected to contain a tag_type field denoting what
536
  object type we work on.
537

538
  """
539
  kind, name = _ExtractTagsObject(opts, args)
540
  _ExtendTags(opts, args)
541
  if not args:
542
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
543
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
544
  SubmitOrSend(op, opts)
545

    
546

    
547
def RemoveTags(opts, args):
548
  """Remove tags from a given object.
549

550
  This is a generic implementation that knows how to deal with all
551
  three cases of tag objects (cluster, node, instance). The opts
552
  argument is expected to contain a tag_type field denoting what
553
  object type we work on.
554

555
  """
556
  kind, name = _ExtractTagsObject(opts, args)
557
  _ExtendTags(opts, args)
558
  if not args:
559
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
560
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
561
  SubmitOrSend(op, opts)
562

    
563

    
564
def check_unit(option, opt, value): # pylint: disable=W0613
565
  """OptParsers custom converter for units.
566

567
  """
568
  try:
569
    return utils.ParseUnit(value)
570
  except errors.UnitParseError, err:
571
    raise OptionValueError("option %s: %s" % (opt, err))
572

    
573

    
574
def _SplitKeyVal(opt, data, parse_prefixes):
575
  """Convert a KeyVal string into a dict.
576

577
  This function will convert a key=val[,...] string into a dict. Empty
578
  values will be converted specially: keys which have the prefix 'no_'
579
  will have the value=False and the prefix stripped, keys with the prefix
580
  "-" will have value=None and the prefix stripped, and the others will
581
  have value=True.
582

583
  @type opt: string
584
  @param opt: a string holding the option name for which we process the
585
      data, used in building error messages
586
  @type data: string
587
  @param data: a string of the format key=val,key=val,...
588
  @type parse_prefixes: bool
589
  @param parse_prefixes: whether to handle prefixes specially
590
  @rtype: dict
591
  @return: {key=val, key=val}
592
  @raises errors.ParameterError: if there are duplicate keys
593

594
  """
595
  kv_dict = {}
596
  if data:
597
    for elem in utils.UnescapeAndSplit(data, sep=","):
598
      if "=" in elem:
599
        key, val = elem.split("=", 1)
600
      elif parse_prefixes:
601
        if elem.startswith(NO_PREFIX):
602
          key, val = elem[len(NO_PREFIX):], False
603
        elif elem.startswith(UN_PREFIX):
604
          key, val = elem[len(UN_PREFIX):], None
605
        else:
606
          key, val = elem, True
607
      else:
608
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
609
                                    (elem, opt))
610
      if key in kv_dict:
611
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
612
                                    (key, opt))
613
      kv_dict[key] = val
614
  return kv_dict
615

    
616

    
617
def _SplitIdentKeyVal(opt, value, parse_prefixes):
618
  """Helper function to parse "ident:key=val,key=val" options.
619

620
  @type opt: string
621
  @param opt: option name, used in error messages
622
  @type value: string
623
  @param value: expected to be in the format "ident:key=val,key=val,..."
624
  @type parse_prefixes: bool
625
  @param parse_prefixes: whether to handle prefixes specially (see
626
      L{_SplitKeyVal})
627
  @rtype: tuple
628
  @return: (ident, {key=val, key=val})
629
  @raises errors.ParameterError: in case of duplicates or other parsing errors
630

631
  """
632
  if ":" not in value:
633
    ident, rest = value, ""
634
  else:
635
    ident, rest = value.split(":", 1)
636

    
637
  if parse_prefixes and ident.startswith(NO_PREFIX):
638
    if rest:
639
      msg = "Cannot pass options when removing parameter groups: %s" % value
640
      raise errors.ParameterError(msg)
641
    retval = (ident[len(NO_PREFIX):], False)
642
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
643
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
644
    if rest:
645
      msg = "Cannot pass options when removing parameter groups: %s" % value
646
      raise errors.ParameterError(msg)
647
    retval = (ident[len(UN_PREFIX):], None)
648
  else:
649
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
650
    retval = (ident, kv_dict)
651
  return retval
652

    
653

    
654
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
655
  """Custom parser for ident:key=val,key=val options.
656

657
  This will store the parsed values as a tuple (ident, {key: val}). As such,
658
  multiple uses of this option via action=append is possible.
659

660
  """
661
  return _SplitIdentKeyVal(opt, value, True)
662

    
663

    
664
def check_key_val(option, opt, value):  # pylint: disable=W0613
665
  """Custom parser class for key=val,key=val options.
666

667
  This will store the parsed values as a dict {key: val}.
668

669
  """
670
  return _SplitKeyVal(opt, value, True)
671

    
672

    
673
def _SplitListKeyVal(opt, value):
674
  retval = {}
675
  for elem in value.split("/"):
676
    if not elem:
677
      raise errors.ParameterError("Empty section in option '%s'" % opt)
678
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
679
    if ident in retval:
680
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
681
             (ident, opt, elem))
682
      raise errors.ParameterError(msg)
683
    retval[ident] = valdict
684
  return retval
685

    
686

    
687
def check_multilist_ident_key_val(_, opt, value):
688
  """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
689

690
  @rtype: list of dictionary
691
  @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
692

693
  """
694
  retval = []
695
  for line in value.split("//"):
696
    retval.append(_SplitListKeyVal(opt, line))
697
  return retval
698

    
699

    
700
def check_bool(option, opt, value): # pylint: disable=W0613
701
  """Custom parser for yes/no options.
702

703
  This will store the parsed value as either True or False.
704

705
  """
706
  value = value.lower()
707
  if value == constants.VALUE_FALSE or value == "no":
708
    return False
709
  elif value == constants.VALUE_TRUE or value == "yes":
710
    return True
711
  else:
712
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
713

    
714

    
715
def check_list(option, opt, value): # pylint: disable=W0613
716
  """Custom parser for comma-separated lists.
717

718
  """
719
  # we have to make this explicit check since "".split(",") is [""],
720
  # not an empty list :(
721
  if not value:
722
    return []
723
  else:
724
    return utils.UnescapeAndSplit(value)
725

    
726

    
727
def check_maybefloat(option, opt, value): # pylint: disable=W0613
728
  """Custom parser for float numbers which might be also defaults.
729

730
  """
731
  value = value.lower()
732

    
733
  if value == constants.VALUE_DEFAULT:
734
    return value
735
  else:
736
    return float(value)
737

    
738

    
739
# completion_suggestion is normally a list. Using numeric values not evaluating
740
# to False for dynamic completion.
741
(OPT_COMPL_MANY_NODES,
742
 OPT_COMPL_ONE_NODE,
743
 OPT_COMPL_ONE_INSTANCE,
744
 OPT_COMPL_ONE_OS,
745
 OPT_COMPL_ONE_EXTSTORAGE,
746
 OPT_COMPL_ONE_IALLOCATOR,
747
 OPT_COMPL_ONE_NETWORK,
748
 OPT_COMPL_INST_ADD_NODES,
749
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
750

    
751
OPT_COMPL_ALL = compat.UniqueFrozenset([
752
  OPT_COMPL_MANY_NODES,
753
  OPT_COMPL_ONE_NODE,
754
  OPT_COMPL_ONE_INSTANCE,
755
  OPT_COMPL_ONE_OS,
756
  OPT_COMPL_ONE_EXTSTORAGE,
757
  OPT_COMPL_ONE_IALLOCATOR,
758
  OPT_COMPL_ONE_NETWORK,
759
  OPT_COMPL_INST_ADD_NODES,
760
  OPT_COMPL_ONE_NODEGROUP,
761
  ])
762

    
763

    
764
class CliOption(Option):
765
  """Custom option class for optparse.
766

767
  """
768
  ATTRS = Option.ATTRS + [
769
    "completion_suggest",
770
    ]
771
  TYPES = Option.TYPES + (
772
    "multilistidentkeyval",
773
    "identkeyval",
774
    "keyval",
775
    "unit",
776
    "bool",
777
    "list",
778
    "maybefloat",
779
    )
780
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
781
  TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
782
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
783
  TYPE_CHECKER["keyval"] = check_key_val
784
  TYPE_CHECKER["unit"] = check_unit
785
  TYPE_CHECKER["bool"] = check_bool
786
  TYPE_CHECKER["list"] = check_list
787
  TYPE_CHECKER["maybefloat"] = check_maybefloat
788

    
789

    
790
# optparse.py sets make_option, so we do it for our own option class, too
791
cli_option = CliOption
792

    
793

    
794
_YORNO = "yes|no"
795

    
796
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
797
                       help="Increase debugging level")
798

    
799
NOHDR_OPT = cli_option("--no-headers", default=False,
800
                       action="store_true", dest="no_headers",
801
                       help="Don't display column headers")
802

    
803
SEP_OPT = cli_option("--separator", default=None,
804
                     action="store", dest="separator",
805
                     help=("Separator between output fields"
806
                           " (defaults to one space)"))
807

    
808
USEUNITS_OPT = cli_option("--units", default=None,
809
                          dest="units", choices=("h", "m", "g", "t"),
810
                          help="Specify units for output (one of h/m/g/t)")
811

    
812
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
813
                        type="string", metavar="FIELDS",
814
                        help="Comma separated list of output fields")
815

    
816
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
817
                       default=False, help="Force the operation")
818

    
819
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
820
                         default=False, help="Do not require confirmation")
821

    
822
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
823
                                  action="store_true", default=False,
824
                                  help=("Ignore offline nodes and do as much"
825
                                        " as possible"))
826

    
827
TAG_ADD_OPT = cli_option("--tags", dest="tags",
828
                         default=None, help="Comma-separated list of instance"
829
                                            " tags")
830

    
831
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
832
                         default=None, help="File with tag names")
833

    
834
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
835
                        default=False, action="store_true",
836
                        help=("Submit the job and return the job ID, but"
837
                              " don't wait for the job to finish"))
838

    
839
PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
840
                             default=False, action="store_true",
841
                             help=("Additionally print the job as first line"
842
                                   " on stdout (for scripting)."))
843

    
844
SYNC_OPT = cli_option("--sync", dest="do_locking",
845
                      default=False, action="store_true",
846
                      help=("Grab locks while doing the queries"
847
                            " in order to ensure more consistent results"))
848

    
849
DRY_RUN_OPT = cli_option("--dry-run", default=False,
850
                         action="store_true",
851
                         help=("Do not execute the operation, just run the"
852
                               " check steps and verify if it could be"
853
                               " executed"))
854

    
855
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
856
                         action="store_true",
857
                         help="Increase the verbosity of the operation")
858

    
859
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
860
                              action="store_true", dest="simulate_errors",
861
                              help="Debugging option that makes the operation"
862
                              " treat most runtime checks as failed")
863

    
864
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
865
                        default=True, action="store_false",
866
                        help="Don't wait for sync (DANGEROUS!)")
867

    
868
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
869
                        default=False, action="store_true",
870
                        help="Wait for disks to sync")
871

    
872
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
873
                             action="store_true", default=False,
874
                             help="Enable offline instance")
875

    
876
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
877
                              action="store_true", default=False,
878
                              help="Disable down instance")
879

    
880
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
881
                               help=("Custom disk setup (%s)" %
882
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
883
                               default=None, metavar="TEMPL",
884
                               choices=list(constants.DISK_TEMPLATES))
885

    
886
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
887
                        help="Do not create any network cards for"
888
                        " the instance")
889

    
890
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
891
                               help="Relative path under default cluster-wide"
892
                               " file storage dir to store file-based disks",
893
                               default=None, metavar="<DIR>")
894

    
895
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
896
                                  help="Driver to use for image files",
897
                                  default=None, metavar="<DRIVER>",
898
                                  choices=list(constants.FILE_DRIVER))
899

    
900
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
901
                            help="Select nodes for the instance automatically"
902
                            " using the <NAME> iallocator plugin",
903
                            default=None, type="string",
904
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
905

    
906
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
907
                                    metavar="<NAME>",
908
                                    help="Set the default instance"
909
                                    " allocator plugin",
910
                                    default=None, type="string",
911
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
912

    
913
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
914
                    metavar="<os>",
915
                    completion_suggest=OPT_COMPL_ONE_OS)
916

    
917
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
918
                          type="keyval", default={},
919
                          help="OS parameters")
920

    
921
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
922
                               action="store_true", default=False,
923
                               help="Force an unknown variant")
924

    
925
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
926
                            action="store_true", default=False,
927
                            help="Do not install the OS (will"
928
                            " enable no-start)")
929

    
930
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
931
                                dest="allow_runtime_chgs",
932
                                default=True, action="store_false",
933
                                help="Don't allow runtime changes")
934

    
935
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
936
                         type="keyval", default={},
937
                         help="Backend parameters")
938

    
939
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
940
                        default={}, dest="hvparams",
941
                        help="Hypervisor parameters")
942

    
943
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
944
                             help="Disk template parameters, in the format"
945
                             " template:option=value,option=value,...",
946
                             type="identkeyval", action="append", default=[])
947

    
948
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
949
                                 type="keyval", default={},
950
                                 help="Memory size specs: list of key=value,"
951
                                " where key is one of min, max, std"
952
                                 " (in MB or using a unit)")
953

    
954
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
955
                                 type="keyval", default={},
956
                                 help="CPU count specs: list of key=value,"
957
                                 " where key is one of min, max, std")
958

    
959
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
960
                                  dest="ispecs_disk_count",
961
                                  type="keyval", default={},
962
                                  help="Disk count specs: list of key=value,"
963
                                  " where key is one of min, max, std")
964

    
965
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
966
                                 type="keyval", default={},
967
                                 help="Disk size specs: list of key=value,"
968
                                 " where key is one of min, max, std"
969
                                 " (in MB or using a unit)")
970

    
971
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
972
                                 type="keyval", default={},
973
                                 help="NIC count specs: list of key=value,"
974
                                 " where key is one of min, max, std")
975

    
976
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
977
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
978
                                      dest="ipolicy_bounds_specs",
979
                                      type="multilistidentkeyval", default=None,
980
                                      help="Complete instance specs limits")
981

    
982
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
983
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
984
                                   dest="ipolicy_std_specs",
985
                                   type="keyval", default=None,
986
                                   help="Complte standard instance specs")
987

    
988
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
989
                                    dest="ipolicy_disk_templates",
990
                                    type="list", default=None,
991
                                    help="Comma-separated list of"
992
                                    " enabled disk templates")
993

    
994
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
995
                                 dest="ipolicy_vcpu_ratio",
996
                                 type="maybefloat", default=None,
997
                                 help="The maximum allowed vcpu-to-cpu ratio")
998

    
999
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
1000
                                   dest="ipolicy_spindle_ratio",
1001
                                   type="maybefloat", default=None,
1002
                                   help=("The maximum allowed instances to"
1003
                                         " spindle ratio"))
1004

    
1005
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1006
                            help="Hypervisor and hypervisor options, in the"
1007
                            " format hypervisor:option=value,option=value,...",
1008
                            default=None, type="identkeyval")
1009

    
1010
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1011
                        help="Hypervisor and hypervisor options, in the"
1012
                        " format hypervisor:option=value,option=value,...",
1013
                        default=[], action="append", type="identkeyval")
1014

    
1015
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1016
                           action="store_false",
1017
                           help="Don't check that the instance's IP"
1018
                           " is alive")
1019

    
1020
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1021
                             default=True, action="store_false",
1022
                             help="Don't check that the instance's name"
1023
                             " is resolvable")
1024

    
1025
NET_OPT = cli_option("--net",
1026
                     help="NIC parameters", default=[],
1027
                     dest="nics", action="append", type="identkeyval")
1028

    
1029
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1030
                      dest="disks", action="append", type="identkeyval")
1031

    
1032
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1033
                         help="Comma-separated list of disks"
1034
                         " indices to act on (e.g. 0,2) (optional,"
1035
                         " defaults to all disks)")
1036

    
1037
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1038
                         help="Enforces a single-disk configuration using the"
1039
                         " given disk size, in MiB unless a suffix is used",
1040
                         default=None, type="unit", metavar="<size>")
1041

    
1042
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1043
                                dest="ignore_consistency",
1044
                                action="store_true", default=False,
1045
                                help="Ignore the consistency of the disks on"
1046
                                " the secondary")
1047

    
1048
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1049
                                dest="allow_failover",
1050
                                action="store_true", default=False,
1051
                                help="If migration is not possible fallback to"
1052
                                     " failover")
1053

    
1054
NONLIVE_OPT = cli_option("--non-live", dest="live",
1055
                         default=True, action="store_false",
1056
                         help="Do a non-live migration (this usually means"
1057
                         " freeze the instance, save the state, transfer and"
1058
                         " only then resume running on the secondary node)")
1059

    
1060
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1061
                                default=None,
1062
                                choices=list(constants.HT_MIGRATION_MODES),
1063
                                help="Override default migration mode (choose"
1064
                                " either live or non-live")
1065

    
1066
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1067
                                help="Target node and optional secondary node",
1068
                                metavar="<pnode>[:<snode>]",
1069
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1070

    
1071
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1072
                           action="append", metavar="<node>",
1073
                           help="Use only this node (can be used multiple"
1074
                           " times, if not given defaults to all nodes)",
1075
                           completion_suggest=OPT_COMPL_ONE_NODE)
1076

    
1077
NODEGROUP_OPT_NAME = "--node-group"
1078
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1079
                           dest="nodegroup",
1080
                           help="Node group (name or uuid)",
1081
                           metavar="<nodegroup>",
1082
                           default=None, type="string",
1083
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1084

    
1085
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1086
                             metavar="<node>",
1087
                             completion_suggest=OPT_COMPL_ONE_NODE)
1088

    
1089
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1090
                         action="store_false",
1091
                         help="Don't start the instance after creation")
1092

    
1093
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1094
                         action="store_true", default=False,
1095
                         help="Show command instead of executing it")
1096

    
1097
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1098
                         default=False, action="store_true",
1099
                         help="Instead of performing the migration/failover,"
1100
                         " try to recover from a failed cleanup. This is safe"
1101
                         " to run even if the instance is healthy, but it"
1102
                         " will create extra replication traffic and "
1103
                         " disrupt briefly the replication (like during the"
1104
                         " migration/failover")
1105

    
1106
STATIC_OPT = cli_option("-s", "--static", dest="static",
1107
                        action="store_true", default=False,
1108
                        help="Only show configuration data, not runtime data")
1109

    
1110
ALL_OPT = cli_option("--all", dest="show_all",
1111
                     default=False, action="store_true",
1112
                     help="Show info on all instances on the cluster."
1113
                     " This can take a long time to run, use wisely")
1114

    
1115
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1116
                           action="store_true", default=False,
1117
                           help="Interactive OS reinstall, lists available"
1118
                           " OS templates for selection")
1119

    
1120
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1121
                                 action="store_true", default=False,
1122
                                 help="Remove the instance from the cluster"
1123
                                 " configuration even if there are failures"
1124
                                 " during the removal process")
1125

    
1126
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1127
                                        dest="ignore_remove_failures",
1128
                                        action="store_true", default=False,
1129
                                        help="Remove the instance from the"
1130
                                        " cluster configuration even if there"
1131
                                        " are failures during the removal"
1132
                                        " process")
1133

    
1134
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1135
                                 action="store_true", default=False,
1136
                                 help="Remove the instance from the cluster")
1137

    
1138
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1139
                               help="Specifies the new node for the instance",
1140
                               metavar="NODE", default=None,
1141
                               completion_suggest=OPT_COMPL_ONE_NODE)
1142

    
1143
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1144
                               help="Specifies the new secondary node",
1145
                               metavar="NODE", default=None,
1146
                               completion_suggest=OPT_COMPL_ONE_NODE)
1147

    
1148
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1149
                             help="Specifies the new primary node",
1150
                             metavar="<node>", default=None,
1151
                             completion_suggest=OPT_COMPL_ONE_NODE)
1152

    
1153
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1154
                            default=False, action="store_true",
1155
                            help="Replace the disk(s) on the primary"
1156
                                 " node (applies only to internally mirrored"
1157
                                 " disk templates, e.g. %s)" %
1158
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1159

    
1160
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1161
                              default=False, action="store_true",
1162
                              help="Replace the disk(s) on the secondary"
1163
                                   " node (applies only to internally mirrored"
1164
                                   " disk templates, e.g. %s)" %
1165
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1166

    
1167
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1168
                              default=False, action="store_true",
1169
                              help="Lock all nodes and auto-promote as needed"
1170
                              " to MC status")
1171

    
1172
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1173
                              default=False, action="store_true",
1174
                              help="Automatically replace faulty disks"
1175
                                   " (applies only to internally mirrored"
1176
                                   " disk templates, e.g. %s)" %
1177
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1178

    
1179
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1180
                             default=False, action="store_true",
1181
                             help="Ignore current recorded size"
1182
                             " (useful for forcing activation when"
1183
                             " the recorded size is wrong)")
1184

    
1185
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1186
                          metavar="<node>",
1187
                          completion_suggest=OPT_COMPL_ONE_NODE)
1188

    
1189
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1190
                         metavar="<dir>")
1191

    
1192
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1193
                              help="Specify the secondary ip for the node",
1194
                              metavar="ADDRESS", default=None)
1195

    
1196
READD_OPT = cli_option("--readd", dest="readd",
1197
                       default=False, action="store_true",
1198
                       help="Readd old node after replacing it")
1199

    
1200
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1201
                                default=True, action="store_false",
1202
                                help="Disable SSH key fingerprint checking")
1203

    
1204
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1205
                                 default=False, action="store_true",
1206
                                 help="Force the joining of a node")
1207

    
1208
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1209
                    type="bool", default=None, metavar=_YORNO,
1210
                    help="Set the master_candidate flag on the node")
1211

    
1212
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1213
                         type="bool", default=None,
1214
                         help=("Set the offline flag on the node"
1215
                               " (cluster does not communicate with offline"
1216
                               " nodes)"))
1217

    
1218
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1219
                         type="bool", default=None,
1220
                         help=("Set the drained flag on the node"
1221
                               " (excluded from allocation operations)"))
1222

    
1223
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1224
                              type="bool", default=None, metavar=_YORNO,
1225
                              help="Set the master_capable flag on the node")
1226

    
1227
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1228
                          type="bool", default=None, metavar=_YORNO,
1229
                          help="Set the vm_capable flag on the node")
1230

    
1231
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1232
                             type="bool", default=None, metavar=_YORNO,
1233
                             help="Set the allocatable flag on a volume")
1234

    
1235
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1236
                               help="Disable support for lvm based instances"
1237
                               " (cluster-wide)",
1238
                               action="store_false", default=True)
1239

    
1240
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1241
                            dest="enabled_hypervisors",
1242
                            help="Comma-separated list of hypervisors",
1243
                            type="string", default=None)
1244

    
1245
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1246
                                        dest="enabled_disk_templates",
1247
                                        help="Comma-separated list of "
1248
                                             "disk templates",
1249
                                        type="string", default=None)
1250

    
1251
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1252
                            type="keyval", default={},
1253
                            help="NIC parameters")
1254

    
1255
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1256
                         dest="candidate_pool_size", type="int",
1257
                         help="Set the candidate pool size")
1258

    
1259
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1260
                         help=("Enables LVM and specifies the volume group"
1261
                               " name (cluster-wide) for disk allocation"
1262
                               " [%s]" % constants.DEFAULT_VG),
1263
                         metavar="VG", default=None)
1264

    
1265
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1266
                          help="Destroy cluster", action="store_true")
1267

    
1268
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1269
                          help="Skip node agreement check (dangerous)",
1270
                          action="store_true", default=False)
1271

    
1272
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1273
                            help="Specify the mac prefix for the instance IP"
1274
                            " addresses, in the format XX:XX:XX",
1275
                            metavar="PREFIX",
1276
                            default=None)
1277

    
1278
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1279
                               help="Specify the node interface (cluster-wide)"
1280
                               " on which the master IP address will be added"
1281
                               " (cluster init default: %s)" %
1282
                               constants.DEFAULT_BRIDGE,
1283
                               metavar="NETDEV",
1284
                               default=None)
1285

    
1286
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1287
                                help="Specify the netmask of the master IP",
1288
                                metavar="NETMASK",
1289
                                default=None)
1290

    
1291
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1292
                                     dest="use_external_mip_script",
1293
                                     help="Specify whether to run a"
1294
                                     " user-provided script for the master"
1295
                                     " IP address turnup and"
1296
                                     " turndown operations",
1297
                                     type="bool", metavar=_YORNO, default=None)
1298

    
1299
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1300
                                help="Specify the default directory (cluster-"
1301
                                "wide) for storing the file-based disks [%s]" %
1302
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1303
                                metavar="DIR",
1304
                                default=None)
1305

    
1306
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1307
  "--shared-file-storage-dir",
1308
  dest="shared_file_storage_dir",
1309
  help="Specify the default directory (cluster-wide) for storing the"
1310
  " shared file-based disks [%s]" %
1311
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1312
  metavar="SHAREDDIR", default=None)
1313

    
1314
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1315
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1316
                                   action="store_false", default=True)
1317

    
1318
MODIFY_ETCHOSTS_OPT = \
1319
 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1320
            default=None, type="bool",
1321
            help="Defines whether the cluster should autonomously modify"
1322
            " and keep in sync the /etc/hosts file of the nodes")
1323

    
1324
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1325
                                    help="Don't initialize SSH keys",
1326
                                    action="store_false", default=True)
1327

    
1328
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1329
                             help="Enable parseable error messages",
1330
                             action="store_true", default=False)
1331

    
1332
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1333
                          help="Skip N+1 memory redundancy tests",
1334
                          action="store_true", default=False)
1335

    
1336
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1337
                             help="Type of reboot: soft/hard/full",
1338
                             default=constants.INSTANCE_REBOOT_HARD,
1339
                             metavar="<REBOOT>",
1340
                             choices=list(constants.REBOOT_TYPES))
1341

    
1342
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1343
                                    dest="ignore_secondaries",
1344
                                    default=False, action="store_true",
1345
                                    help="Ignore errors from secondaries")
1346

    
1347
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1348
                            action="store_false", default=True,
1349
                            help="Don't shutdown the instance (unsafe)")
1350

    
1351
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1352
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1353
                         help="Maximum time to wait")
1354

    
1355
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1356
                                  dest="shutdown_timeout", type="int",
1357
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1358
                                  help="Maximum time to wait for instance"
1359
                                  " shutdown")
1360

    
1361
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1362
                          default=None,
1363
                          help=("Number of seconds between repetions of the"
1364
                                " command"))
1365

    
1366
EARLY_RELEASE_OPT = cli_option("--early-release",
1367
                               dest="early_release", default=False,
1368
                               action="store_true",
1369
                               help="Release the locks on the secondary"
1370
                               " node(s) early")
1371

    
1372
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1373
                                  dest="new_cluster_cert",
1374
                                  default=False, action="store_true",
1375
                                  help="Generate a new cluster certificate")
1376

    
1377
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1378
                           default=None,
1379
                           help="File containing new RAPI certificate")
1380

    
1381
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1382
                               default=None, action="store_true",
1383
                               help=("Generate a new self-signed RAPI"
1384
                                     " certificate"))
1385

    
1386
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1387
                            default=None,
1388
                            help="File containing new SPICE certificate")
1389

    
1390
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1391
                              default=None,
1392
                              help="File containing the certificate of the CA"
1393
                              " which signed the SPICE certificate")
1394

    
1395
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1396
                                dest="new_spice_cert", default=None,
1397
                                action="store_true",
1398
                                help=("Generate a new self-signed SPICE"
1399
                                      " certificate"))
1400

    
1401
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1402
                                    dest="new_confd_hmac_key",
1403
                                    default=False, action="store_true",
1404
                                    help=("Create a new HMAC key for %s" %
1405
                                          constants.CONFD))
1406

    
1407
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1408
                                       dest="cluster_domain_secret",
1409
                                       default=None,
1410
                                       help=("Load new new cluster domain"
1411
                                             " secret from file"))
1412

    
1413
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1414
                                           dest="new_cluster_domain_secret",
1415
                                           default=False, action="store_true",
1416
                                           help=("Create a new cluster domain"
1417
                                                 " secret"))
1418

    
1419
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1420
                              dest="use_replication_network",
1421
                              help="Whether to use the replication network"
1422
                              " for talking to the nodes",
1423
                              action="store_true", default=False)
1424

    
1425
MAINTAIN_NODE_HEALTH_OPT = \
1426
    cli_option("--maintain-node-health", dest="maintain_node_health",
1427
               metavar=_YORNO, default=None, type="bool",
1428
               help="Configure the cluster to automatically maintain node"
1429
               " health, by shutting down unknown instances, shutting down"
1430
               " unknown DRBD devices, etc.")
1431

    
1432
IDENTIFY_DEFAULTS_OPT = \
1433
    cli_option("--identify-defaults", dest="identify_defaults",
1434
               default=False, action="store_true",
1435
               help="Identify which saved instance parameters are equal to"
1436
               " the current cluster defaults and set them as such, instead"
1437
               " of marking them as overridden")
1438

    
1439
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1440
                         action="store", dest="uid_pool",
1441
                         help=("A list of user-ids or user-id"
1442
                               " ranges separated by commas"))
1443

    
1444
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1445
                          action="store", dest="add_uids",
1446
                          help=("A list of user-ids or user-id"
1447
                                " ranges separated by commas, to be"
1448
                                " added to the user-id pool"))
1449

    
1450
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1451
                             action="store", dest="remove_uids",
1452
                             help=("A list of user-ids or user-id"
1453
                                   " ranges separated by commas, to be"
1454
                                   " removed from the user-id pool"))
1455

    
1456
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1457
                              action="store", dest="reserved_lvs",
1458
                              help=("A comma-separated list of reserved"
1459
                                    " logical volumes names, that will be"
1460
                                    " ignored by cluster verify"))
1461

    
1462
ROMAN_OPT = cli_option("--roman",
1463
                       dest="roman_integers", default=False,
1464
                       action="store_true",
1465
                       help="Use roman numbers for positive integers")
1466

    
1467
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1468
                             action="store", default=None,
1469
                             help="Specifies usermode helper for DRBD")
1470

    
1471
PRIMARY_IP_VERSION_OPT = \
1472
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1473
               action="store", dest="primary_ip_version",
1474
               metavar="%d|%d" % (constants.IP4_VERSION,
1475
                                  constants.IP6_VERSION),
1476
               help="Cluster-wide IP version for primary IP")
1477

    
1478
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1479
                              action="store_true",
1480
                              help="Show machine name for every line in output")
1481

    
1482
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1483
                              action="store_true",
1484
                              help=("Hide successful results and show failures"
1485
                                    " only (determined by the exit code)"))
1486

    
1487
REASON_OPT = cli_option("--reason", default=None,
1488
                        help="The reason for executing the command")
1489

    
1490

    
1491
def _PriorityOptionCb(option, _, value, parser):
1492
  """Callback for processing C{--priority} option.
1493

1494
  """
1495
  value = _PRIONAME_TO_VALUE[value]
1496

    
1497
  setattr(parser.values, option.dest, value)
1498

    
1499

    
1500
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1501
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1502
                          choices=_PRIONAME_TO_VALUE.keys(),
1503
                          action="callback", type="choice",
1504
                          callback=_PriorityOptionCb,
1505
                          help="Priority for opcode processing")
1506

    
1507
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1508
                        type="bool", default=None, metavar=_YORNO,
1509
                        help="Sets the hidden flag on the OS")
1510

    
1511
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1512
                        type="bool", default=None, metavar=_YORNO,
1513
                        help="Sets the blacklisted flag on the OS")
1514

    
1515
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1516
                                     type="bool", metavar=_YORNO,
1517
                                     dest="prealloc_wipe_disks",
1518
                                     help=("Wipe disks prior to instance"
1519
                                           " creation"))
1520

    
1521
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1522
                             type="keyval", default=None,
1523
                             help="Node parameters")
1524

    
1525
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1526
                              action="store", metavar="POLICY", default=None,
1527
                              help="Allocation policy for the node group")
1528

    
1529
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1530
                              type="bool", metavar=_YORNO,
1531
                              dest="node_powered",
1532
                              help="Specify if the SoR for node is powered")
1533

    
1534
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1535
                             default=constants.OOB_TIMEOUT,
1536
                             help="Maximum time to wait for out-of-band helper")
1537

    
1538
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1539
                             default=constants.OOB_POWER_DELAY,
1540
                             help="Time in seconds to wait between power-ons")
1541

    
1542
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1543
                              action="store_true", default=False,
1544
                              help=("Whether command argument should be treated"
1545
                                    " as filter"))
1546

    
1547
NO_REMEMBER_OPT = cli_option("--no-remember",
1548
                             dest="no_remember",
1549
                             action="store_true", default=False,
1550
                             help="Perform but do not record the change"
1551
                             " in the configuration")
1552

    
1553
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1554
                              default=False, action="store_true",
1555
                              help="Evacuate primary instances only")
1556

    
1557
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1558
                                default=False, action="store_true",
1559
                                help="Evacuate secondary instances only"
1560
                                     " (applies only to internally mirrored"
1561
                                     " disk templates, e.g. %s)" %
1562
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1563

    
1564
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1565
                                action="store_true", default=False,
1566
                                help="Pause instance at startup")
1567

    
1568
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1569
                          help="Destination node group (name or uuid)",
1570
                          default=None, action="append",
1571
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1572

    
1573
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1574
                               action="append", dest="ignore_errors",
1575
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1576
                               help="Error code to be ignored")
1577

    
1578
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1579
                            action="append",
1580
                            help=("Specify disk state information in the"
1581
                                  " format"
1582
                                  " storage_type/identifier:option=value,...;"
1583
                                  " note this is unused for now"),
1584
                            type="identkeyval")
1585

    
1586
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1587
                          action="append",
1588
                          help=("Specify hypervisor state information in the"
1589
                                " format hypervisor:option=value,...;"
1590
                                " note this is unused for now"),
1591
                          type="identkeyval")
1592

    
1593
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1594
                                action="store_true", default=False,
1595
                                help="Ignore instance policy violations")
1596

    
1597
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1598
                             help="Sets the instance's runtime memory,"
1599
                             " ballooning it up or down to the new value",
1600
                             default=None, type="unit", metavar="<size>")
1601

    
1602
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1603
                          action="store_true", default=False,
1604
                          help="Marks the grow as absolute instead of the"
1605
                          " (default) relative mode")
1606

    
1607
NETWORK_OPT = cli_option("--network",
1608
                         action="store", default=None, dest="network",
1609
                         help="IP network in CIDR notation")
1610

    
1611
GATEWAY_OPT = cli_option("--gateway",
1612
                         action="store", default=None, dest="gateway",
1613
                         help="IP address of the router (gateway)")
1614

    
1615
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1616
                                  action="store", default=None,
1617
                                  dest="add_reserved_ips",
1618
                                  help="Comma-separated list of"
1619
                                  " reserved IPs to add")
1620

    
1621
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1622
                                     action="store", default=None,
1623
                                     dest="remove_reserved_ips",
1624
                                     help="Comma-delimited list of"
1625
                                     " reserved IPs to remove")
1626

    
1627
NETWORK6_OPT = cli_option("--network6",
1628
                          action="store", default=None, dest="network6",
1629
                          help="IP network in CIDR notation")
1630

    
1631
GATEWAY6_OPT = cli_option("--gateway6",
1632
                          action="store", default=None, dest="gateway6",
1633
                          help="IP6 address of the router (gateway)")
1634

    
1635
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1636
                                  dest="conflicts_check",
1637
                                  default=True,
1638
                                  action="store_false",
1639
                                  help="Don't check for conflicting IPs")
1640

    
1641
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1642
                                 default=False, action="store_true",
1643
                                 help="Include default values")
1644

    
1645
HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1646
                         action="store_true", default=False,
1647
                         help="Hotplug supported devices (NICs and Disks)")
1648

    
1649
#: Options provided by all commands
1650
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1651

    
1652
# options related to asynchronous job handling
1653

    
1654
SUBMIT_OPTS = [
1655
  SUBMIT_OPT,
1656
  PRINT_JOBID_OPT,
1657
  ]
1658

    
1659
# common options for creating instances. add and import then add their own
1660
# specific ones.
1661
COMMON_CREATE_OPTS = [
1662
  BACKEND_OPT,
1663
  DISK_OPT,
1664
  DISK_TEMPLATE_OPT,
1665
  FILESTORE_DIR_OPT,
1666
  FILESTORE_DRIVER_OPT,
1667
  HYPERVISOR_OPT,
1668
  IALLOCATOR_OPT,
1669
  NET_OPT,
1670
  NODE_PLACEMENT_OPT,
1671
  NOIPCHECK_OPT,
1672
  NOCONFLICTSCHECK_OPT,
1673
  NONAMECHECK_OPT,
1674
  NONICS_OPT,
1675
  NWSYNC_OPT,
1676
  OSPARAMS_OPT,
1677
  OS_SIZE_OPT,
1678
  SUBMIT_OPT,
1679
  PRINT_JOBID_OPT,
1680
  TAG_ADD_OPT,
1681
  DRY_RUN_OPT,
1682
  PRIORITY_OPT,
1683
  ]
1684

    
1685
# common instance policy options
1686
INSTANCE_POLICY_OPTS = [
1687
  IPOLICY_BOUNDS_SPECS_OPT,
1688
  IPOLICY_DISK_TEMPLATES,
1689
  IPOLICY_VCPU_RATIO,
1690
  IPOLICY_SPINDLE_RATIO,
1691
  ]
1692

    
1693
# instance policy split specs options
1694
SPLIT_ISPECS_OPTS = [
1695
  SPECS_CPU_COUNT_OPT,
1696
  SPECS_DISK_COUNT_OPT,
1697
  SPECS_DISK_SIZE_OPT,
1698
  SPECS_MEM_SIZE_OPT,
1699
  SPECS_NIC_COUNT_OPT,
1700
  ]
1701

    
1702

    
1703
class _ShowUsage(Exception):
1704
  """Exception class for L{_ParseArgs}.
1705

1706
  """
1707
  def __init__(self, exit_error):
1708
    """Initializes instances of this class.
1709

1710
    @type exit_error: bool
1711
    @param exit_error: Whether to report failure on exit
1712

1713
    """
1714
    Exception.__init__(self)
1715
    self.exit_error = exit_error
1716

    
1717

    
1718
class _ShowVersion(Exception):
1719
  """Exception class for L{_ParseArgs}.
1720

1721
  """
1722

    
1723

    
1724
def _ParseArgs(binary, argv, commands, aliases, env_override):
1725
  """Parser for the command line arguments.
1726

1727
  This function parses the arguments and returns the function which
1728
  must be executed together with its (modified) arguments.
1729

1730
  @param binary: Script name
1731
  @param argv: Command line arguments
1732
  @param commands: Dictionary containing command definitions
1733
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1734
  @param env_override: list of env variables allowed for default args
1735
  @raise _ShowUsage: If usage description should be shown
1736
  @raise _ShowVersion: If version should be shown
1737

1738
  """
1739
  assert not (env_override - set(commands))
1740
  assert not (set(aliases.keys()) & set(commands.keys()))
1741

    
1742
  if len(argv) > 1:
1743
    cmd = argv[1]
1744
  else:
1745
    # No option or command given
1746
    raise _ShowUsage(exit_error=True)
1747

    
1748
  if cmd == "--version":
1749
    raise _ShowVersion()
1750
  elif cmd == "--help":
1751
    raise _ShowUsage(exit_error=False)
1752
  elif not (cmd in commands or cmd in aliases):
1753
    raise _ShowUsage(exit_error=True)
1754

    
1755
  # get command, unalias it, and look it up in commands
1756
  if cmd in aliases:
1757
    if aliases[cmd] not in commands:
1758
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1759
                                   " command '%s'" % (cmd, aliases[cmd]))
1760

    
1761
    cmd = aliases[cmd]
1762

    
1763
  if cmd in env_override:
1764
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1765
    env_args = os.environ.get(args_env_name)
1766
    if env_args:
1767
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1768

    
1769
  func, args_def, parser_opts, usage, description = commands[cmd]
1770
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1771
                        description=description,
1772
                        formatter=TitledHelpFormatter(),
1773
                        usage="%%prog %s %s" % (cmd, usage))
1774
  parser.disable_interspersed_args()
1775
  options, args = parser.parse_args(args=argv[2:])
1776

    
1777
  if not _CheckArguments(cmd, args_def, args):
1778
    return None, None, None
1779

    
1780
  return func, options, args
1781

    
1782

    
1783
def _FormatUsage(binary, commands):
1784
  """Generates a nice description of all commands.
1785

1786
  @param binary: Script name
1787
  @param commands: Dictionary containing command definitions
1788

1789
  """
1790
  # compute the max line length for cmd + usage
1791
  mlen = min(60, max(map(len, commands)))
1792

    
1793
  yield "Usage: %s {command} [options...] [argument...]" % binary
1794
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1795
  yield ""
1796
  yield "Commands:"
1797

    
1798
  # and format a nice command list
1799
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1800
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1801
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1802
    for line in help_lines:
1803
      yield " %-*s   %s" % (mlen, "", line)
1804

    
1805
  yield ""
1806

    
1807

    
1808
def _CheckArguments(cmd, args_def, args):
1809
  """Verifies the arguments using the argument definition.
1810

1811
  Algorithm:
1812

1813
    1. Abort with error if values specified by user but none expected.
1814

1815
    1. For each argument in definition
1816

1817
      1. Keep running count of minimum number of values (min_count)
1818
      1. Keep running count of maximum number of values (max_count)
1819
      1. If it has an unlimited number of values
1820

1821
        1. Abort with error if it's not the last argument in the definition
1822

1823
    1. If last argument has limited number of values
1824

1825
      1. Abort with error if number of values doesn't match or is too large
1826

1827
    1. Abort with error if user didn't pass enough values (min_count)
1828

1829
  """
1830
  if args and not args_def:
1831
    ToStderr("Error: Command %s expects no arguments", cmd)
1832
    return False
1833

    
1834
  min_count = None
1835
  max_count = None
1836
  check_max = None
1837

    
1838
  last_idx = len(args_def) - 1
1839

    
1840
  for idx, arg in enumerate(args_def):
1841
    if min_count is None:
1842
      min_count = arg.min
1843
    elif arg.min is not None:
1844
      min_count += arg.min
1845

    
1846
    if max_count is None:
1847
      max_count = arg.max
1848
    elif arg.max is not None:
1849
      max_count += arg.max
1850

    
1851
    if idx == last_idx:
1852
      check_max = (arg.max is not None)
1853

    
1854
    elif arg.max is None:
1855
      raise errors.ProgrammerError("Only the last argument can have max=None")
1856

    
1857
  if check_max:
1858
    # Command with exact number of arguments
1859
    if (min_count is not None and max_count is not None and
1860
        min_count == max_count and len(args) != min_count):
1861
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1862
      return False
1863

    
1864
    # Command with limited number of arguments
1865
    if max_count is not None and len(args) > max_count:
1866
      ToStderr("Error: Command %s expects only %d argument(s)",
1867
               cmd, max_count)
1868
      return False
1869

    
1870
  # Command with some required arguments
1871
  if min_count is not None and len(args) < min_count:
1872
    ToStderr("Error: Command %s expects at least %d argument(s)",
1873
             cmd, min_count)
1874
    return False
1875

    
1876
  return True
1877

    
1878

    
1879
def SplitNodeOption(value):
1880
  """Splits the value of a --node option.
1881

1882
  """
1883
  if value and ":" in value:
1884
    return value.split(":", 1)
1885
  else:
1886
    return (value, None)
1887

    
1888

    
1889
def CalculateOSNames(os_name, os_variants):
1890
  """Calculates all the names an OS can be called, according to its variants.
1891

1892
  @type os_name: string
1893
  @param os_name: base name of the os
1894
  @type os_variants: list or None
1895
  @param os_variants: list of supported variants
1896
  @rtype: list
1897
  @return: list of valid names
1898

1899
  """
1900
  if os_variants:
1901
    return ["%s+%s" % (os_name, v) for v in os_variants]
1902
  else:
1903
    return [os_name]
1904

    
1905

    
1906
def ParseFields(selected, default):
1907
  """Parses the values of "--field"-like options.
1908

1909
  @type selected: string or None
1910
  @param selected: User-selected options
1911
  @type default: list
1912
  @param default: Default fields
1913

1914
  """
1915
  if selected is None:
1916
    return default
1917

    
1918
  if selected.startswith("+"):
1919
    return default + selected[1:].split(",")
1920

    
1921
  return selected.split(",")
1922

    
1923

    
1924
UsesRPC = rpc.RunWithRPC
1925

    
1926

    
1927
def AskUser(text, choices=None):
1928
  """Ask the user a question.
1929

1930
  @param text: the question to ask
1931

1932
  @param choices: list with elements tuples (input_char, return_value,
1933
      description); if not given, it will default to: [('y', True,
1934
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1935
      note that the '?' char is reserved for help
1936

1937
  @return: one of the return values from the choices list; if input is
1938
      not possible (i.e. not running with a tty, we return the last
1939
      entry from the list
1940

1941
  """
1942
  if choices is None:
1943
    choices = [("y", True, "Perform the operation"),
1944
               ("n", False, "Do not perform the operation")]
1945
  if not choices or not isinstance(choices, list):
1946
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1947
  for entry in choices:
1948
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1949
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1950

    
1951
  answer = choices[-1][1]
1952
  new_text = []
1953
  for line in text.splitlines():
1954
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1955
  text = "\n".join(new_text)
1956
  try:
1957
    f = file("/dev/tty", "a+")
1958
  except IOError:
1959
    return answer
1960
  try:
1961
    chars = [entry[0] for entry in choices]
1962
    chars[-1] = "[%s]" % chars[-1]
1963
    chars.append("?")
1964
    maps = dict([(entry[0], entry[1]) for entry in choices])
1965
    while True:
1966
      f.write(text)
1967
      f.write("\n")
1968
      f.write("/".join(chars))
1969
      f.write(": ")
1970
      line = f.readline(2).strip().lower()
1971
      if line in maps:
1972
        answer = maps[line]
1973
        break
1974
      elif line == "?":
1975
        for entry in choices:
1976
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1977
        f.write("\n")
1978
        continue
1979
  finally:
1980
    f.close()
1981
  return answer
1982

    
1983

    
1984
class JobSubmittedException(Exception):
1985
  """Job was submitted, client should exit.
1986

1987
  This exception has one argument, the ID of the job that was
1988
  submitted. The handler should print this ID.
1989

1990
  This is not an error, just a structured way to exit from clients.
1991

1992
  """
1993

    
1994

    
1995
def SendJob(ops, cl=None):
1996
  """Function to submit an opcode without waiting for the results.
1997

1998
  @type ops: list
1999
  @param ops: list of opcodes
2000
  @type cl: luxi.Client
2001
  @param cl: the luxi client to use for communicating with the master;
2002
             if None, a new client will be created
2003

2004
  """
2005
  if cl is None:
2006
    cl = GetClient()
2007

    
2008
  job_id = cl.SubmitJob(ops)
2009

    
2010
  return job_id
2011

    
2012

    
2013
def GenericPollJob(job_id, cbs, report_cbs):
2014
  """Generic job-polling function.
2015

2016
  @type job_id: number
2017
  @param job_id: Job ID
2018
  @type cbs: Instance of L{JobPollCbBase}
2019
  @param cbs: Data callbacks
2020
  @type report_cbs: Instance of L{JobPollReportCbBase}
2021
  @param report_cbs: Reporting callbacks
2022

2023
  """
2024
  prev_job_info = None
2025
  prev_logmsg_serial = None
2026

    
2027
  status = None
2028

    
2029
  while True:
2030
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2031
                                      prev_logmsg_serial)
2032
    if not result:
2033
      # job not found, go away!
2034
      raise errors.JobLost("Job with id %s lost" % job_id)
2035

    
2036
    if result == constants.JOB_NOTCHANGED:
2037
      report_cbs.ReportNotChanged(job_id, status)
2038

    
2039
      # Wait again
2040
      continue
2041

    
2042
    # Split result, a tuple of (field values, log entries)
2043
    (job_info, log_entries) = result
2044
    (status, ) = job_info
2045

    
2046
    if log_entries:
2047
      for log_entry in log_entries:
2048
        (serial, timestamp, log_type, message) = log_entry
2049
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2050
                                    log_type, message)
2051
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2052

    
2053
    # TODO: Handle canceled and archived jobs
2054
    elif status in (constants.JOB_STATUS_SUCCESS,
2055
                    constants.JOB_STATUS_ERROR,
2056
                    constants.JOB_STATUS_CANCELING,
2057
                    constants.JOB_STATUS_CANCELED):
2058
      break
2059

    
2060
    prev_job_info = job_info
2061

    
2062
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2063
  if not jobs:
2064
    raise errors.JobLost("Job with id %s lost" % job_id)
2065

    
2066
  status, opstatus, result = jobs[0]
2067

    
2068
  if status == constants.JOB_STATUS_SUCCESS:
2069
    return result
2070

    
2071
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2072
    raise errors.OpExecError("Job was canceled")
2073

    
2074
  has_ok = False
2075
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2076
    if status == constants.OP_STATUS_SUCCESS:
2077
      has_ok = True
2078
    elif status == constants.OP_STATUS_ERROR:
2079
      errors.MaybeRaise(msg)
2080

    
2081
      if has_ok:
2082
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2083
                                 (idx, msg))
2084

    
2085
      raise errors.OpExecError(str(msg))
2086

    
2087
  # default failure mode
2088
  raise errors.OpExecError(result)
2089

    
2090

    
2091
class JobPollCbBase:
2092
  """Base class for L{GenericPollJob} callbacks.
2093

2094
  """
2095
  def __init__(self):
2096
    """Initializes this class.
2097

2098
    """
2099

    
2100
  def WaitForJobChangeOnce(self, job_id, fields,
2101
                           prev_job_info, prev_log_serial):
2102
    """Waits for changes on a job.
2103

2104
    """
2105
    raise NotImplementedError()
2106

    
2107
  def QueryJobs(self, job_ids, fields):
2108
    """Returns the selected fields for the selected job IDs.
2109

2110
    @type job_ids: list of numbers
2111
    @param job_ids: Job IDs
2112
    @type fields: list of strings
2113
    @param fields: Fields
2114

2115
    """
2116
    raise NotImplementedError()
2117

    
2118

    
2119
class JobPollReportCbBase:
2120
  """Base class for L{GenericPollJob} reporting callbacks.
2121

2122
  """
2123
  def __init__(self):
2124
    """Initializes this class.
2125

2126
    """
2127

    
2128
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2129
    """Handles a log message.
2130

2131
    """
2132
    raise NotImplementedError()
2133

    
2134
  def ReportNotChanged(self, job_id, status):
2135
    """Called for if a job hasn't changed in a while.
2136

2137
    @type job_id: number
2138
    @param job_id: Job ID
2139
    @type status: string or None
2140
    @param status: Job status if available
2141

2142
    """
2143
    raise NotImplementedError()
2144

    
2145

    
2146
class _LuxiJobPollCb(JobPollCbBase):
2147
  def __init__(self, cl):
2148
    """Initializes this class.
2149

2150
    """
2151
    JobPollCbBase.__init__(self)
2152
    self.cl = cl
2153

    
2154
  def WaitForJobChangeOnce(self, job_id, fields,
2155
                           prev_job_info, prev_log_serial):
2156
    """Waits for changes on a job.
2157

2158
    """
2159
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2160
                                        prev_job_info, prev_log_serial)
2161

    
2162
  def QueryJobs(self, job_ids, fields):
2163
    """Returns the selected fields for the selected job IDs.
2164

2165
    """
2166
    return self.cl.QueryJobs(job_ids, fields)
2167

    
2168

    
2169
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2170
  def __init__(self, feedback_fn):
2171
    """Initializes this class.
2172

2173
    """
2174
    JobPollReportCbBase.__init__(self)
2175

    
2176
    self.feedback_fn = feedback_fn
2177

    
2178
    assert callable(feedback_fn)
2179

    
2180
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2181
    """Handles a log message.
2182

2183
    """
2184
    self.feedback_fn((timestamp, log_type, log_msg))
2185

    
2186
  def ReportNotChanged(self, job_id, status):
2187
    """Called if a job hasn't changed in a while.
2188

2189
    """
2190
    # Ignore
2191

    
2192

    
2193
class StdioJobPollReportCb(JobPollReportCbBase):
2194
  def __init__(self):
2195
    """Initializes this class.
2196

2197
    """
2198
    JobPollReportCbBase.__init__(self)
2199

    
2200
    self.notified_queued = False
2201
    self.notified_waitlock = False
2202

    
2203
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2204
    """Handles a log message.
2205

2206
    """
2207
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2208
             FormatLogMessage(log_type, log_msg))
2209

    
2210
  def ReportNotChanged(self, job_id, status):
2211
    """Called if a job hasn't changed in a while.
2212

2213
    """
2214
    if status is None:
2215
      return
2216

    
2217
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2218
      ToStderr("Job %s is waiting in queue", job_id)
2219
      self.notified_queued = True
2220

    
2221
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2222
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2223
      self.notified_waitlock = True
2224

    
2225

    
2226
def FormatLogMessage(log_type, log_msg):
2227
  """Formats a job message according to its type.
2228

2229
  """
2230
  if log_type != constants.ELOG_MESSAGE:
2231
    log_msg = str(log_msg)
2232

    
2233
  return utils.SafeEncode(log_msg)
2234

    
2235

    
2236
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2237
  """Function to poll for the result of a job.
2238

2239
  @type job_id: job identified
2240
  @param job_id: the job to poll for results
2241
  @type cl: luxi.Client
2242
  @param cl: the luxi client to use for communicating with the master;
2243
             if None, a new client will be created
2244

2245
  """
2246
  if cl is None:
2247
    cl = GetClient()
2248

    
2249
  if reporter is None:
2250
    if feedback_fn:
2251
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2252
    else:
2253
      reporter = StdioJobPollReportCb()
2254
  elif feedback_fn:
2255
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2256

    
2257
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2258

    
2259

    
2260
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2261
  """Legacy function to submit an opcode.
2262

2263
  This is just a simple wrapper over the construction of the processor
2264
  instance. It should be extended to better handle feedback and
2265
  interaction functions.
2266

2267
  """
2268
  if cl is None:
2269
    cl = GetClient()
2270

    
2271
  SetGenericOpcodeOpts([op], opts)
2272

    
2273
  job_id = SendJob([op], cl=cl)
2274
  if hasattr(opts, "print_jobid") and opts.print_jobid:
2275
    ToStdout("%d" % job_id)
2276

    
2277
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2278
                       reporter=reporter)
2279

    
2280
  return op_results[0]
2281

    
2282

    
2283
def SubmitOpCodeToDrainedQueue(op):
2284
  """Forcefully insert a job in the queue, even if it is drained.
2285

2286
  """
2287
  cl = GetClient()
2288
  job_id = cl.SubmitJobToDrainedQueue([op])
2289
  op_results = PollJob(job_id, cl=cl)
2290
  return op_results[0]
2291

    
2292

    
2293
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2294
  """Wrapper around SubmitOpCode or SendJob.
2295

2296
  This function will decide, based on the 'opts' parameter, whether to
2297
  submit and wait for the result of the opcode (and return it), or
2298
  whether to just send the job and print its identifier. It is used in
2299
  order to simplify the implementation of the '--submit' option.
2300

2301
  It will also process the opcodes if we're sending the via SendJob
2302
  (otherwise SubmitOpCode does it).
2303

2304
  """
2305
  if opts and opts.submit_only:
2306
    job = [op]
2307
    SetGenericOpcodeOpts(job, opts)
2308
    job_id = SendJob(job, cl=cl)
2309
    if opts.print_jobid:
2310
      ToStdout("%d" % job_id)
2311
    raise JobSubmittedException(job_id)
2312
  else:
2313
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2314

    
2315

    
2316
def _InitReasonTrail(op, opts):
2317
  """Builds the first part of the reason trail
2318

2319
  Builds the initial part of the reason trail, adding the user provided reason
2320
  (if it exists) and the name of the command starting the operation.
2321

2322
  @param op: the opcode the reason trail will be added to
2323
  @param opts: the command line options selected by the user
2324

2325
  """
2326
  assert len(sys.argv) >= 2
2327
  trail = []
2328

    
2329
  if opts.reason:
2330
    trail.append((constants.OPCODE_REASON_SRC_USER,
2331
                  opts.reason,
2332
                  utils.EpochNano()))
2333

    
2334
  binary = os.path.basename(sys.argv[0])
2335
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2336
  command = sys.argv[1]
2337
  trail.append((source, command, utils.EpochNano()))
2338
  op.reason = trail
2339

    
2340

    
2341
def SetGenericOpcodeOpts(opcode_list, options):
2342
  """Processor for generic options.
2343

2344
  This function updates the given opcodes based on generic command
2345
  line options (like debug, dry-run, etc.).
2346

2347
  @param opcode_list: list of opcodes
2348
  @param options: command line options or None
2349
  @return: None (in-place modification)
2350

2351
  """
2352
  if not options:
2353
    return
2354
  for op in opcode_list:
2355
    op.debug_level = options.debug
2356
    if hasattr(options, "dry_run"):
2357
      op.dry_run = options.dry_run
2358
    if getattr(options, "priority", None) is not None:
2359
      op.priority = options.priority
2360
    _InitReasonTrail(op, options)
2361

    
2362

    
2363
def GetClient(query=False):
2364
  """Connects to the a luxi socket and returns a client.
2365

2366
  @type query: boolean
2367
  @param query: this signifies that the client will only be
2368
      used for queries; if the build-time parameter
2369
      enable-split-queries is enabled, then the client will be
2370
      connected to the query socket instead of the masterd socket
2371

2372
  """
2373
  override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2374
  if override_socket:
2375
    if override_socket == constants.LUXI_OVERRIDE_MASTER:
2376
      address = pathutils.MASTER_SOCKET
2377
    elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2378
      address = pathutils.QUERY_SOCKET
2379
    else:
2380
      address = override_socket
2381
  elif query and constants.ENABLE_SPLIT_QUERY:
2382
    address = pathutils.QUERY_SOCKET
2383
  else:
2384
    address = None
2385
  # TODO: Cache object?
2386
  try:
2387
    client = luxi.Client(address=address)
2388
  except luxi.NoMasterError:
2389
    ss = ssconf.SimpleStore()
2390

    
2391
    # Try to read ssconf file
2392
    try:
2393
      ss.GetMasterNode()
2394
    except errors.ConfigurationError:
2395
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2396
                                 " not part of a cluster",
2397
                                 errors.ECODE_INVAL)
2398

    
2399
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2400
    if master != myself:
2401
      raise errors.OpPrereqError("This is not the master node, please connect"
2402
                                 " to node '%s' and rerun the command" %
2403
                                 master, errors.ECODE_INVAL)
2404
    raise
2405
  return client
2406

    
2407

    
2408
def FormatError(err):
2409
  """Return a formatted error message for a given error.
2410

2411
  This function takes an exception instance and returns a tuple
2412
  consisting of two values: first, the recommended exit code, and
2413
  second, a string describing the error message (not
2414
  newline-terminated).
2415

2416
  """
2417
  retcode = 1
2418
  obuf = StringIO()
2419
  msg = str(err)
2420
  if isinstance(err, errors.ConfigurationError):
2421
    txt = "Corrupt configuration file: %s" % msg
2422
    logging.error(txt)
2423
    obuf.write(txt + "\n")
2424
    obuf.write("Aborting.")
2425
    retcode = 2
2426
  elif isinstance(err, errors.HooksAbort):
2427
    obuf.write("Failure: hooks execution failed:\n")
2428
    for node, script, out in err.args[0]:
2429
      if out:
2430
        obuf.write("  node: %s, script: %s, output: %s\n" %
2431
                   (node, script, out))
2432
      else:
2433
        obuf.write("  node: %s, script: %s (no output)\n" %
2434
                   (node, script))
2435
  elif isinstance(err, errors.HooksFailure):
2436
    obuf.write("Failure: hooks general failure: %s" % msg)
2437
  elif isinstance(err, errors.ResolverError):
2438
    this_host = netutils.Hostname.GetSysName()
2439
    if err.args[0] == this_host:
2440
      msg = "Failure: can't resolve my own hostname ('%s')"
2441
    else:
2442
      msg = "Failure: can't resolve hostname '%s'"
2443
    obuf.write(msg % err.args[0])
2444
  elif isinstance(err, errors.OpPrereqError):
2445
    if len(err.args) == 2:
2446
      obuf.write("Failure: prerequisites not met for this"
2447
                 " operation:\nerror type: %s, error details:\n%s" %
2448
                 (err.args[1], err.args[0]))
2449
    else:
2450
      obuf.write("Failure: prerequisites not met for this"
2451
                 " operation:\n%s" % msg)
2452
  elif isinstance(err, errors.OpExecError):
2453
    obuf.write("Failure: command execution error:\n%s" % msg)
2454
  elif isinstance(err, errors.TagError):
2455
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2456
  elif isinstance(err, errors.JobQueueDrainError):
2457
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2458
               " accept new requests\n")
2459
  elif isinstance(err, errors.JobQueueFull):
2460
    obuf.write("Failure: the job queue is full and doesn't accept new"
2461
               " job submissions until old jobs are archived\n")
2462
  elif isinstance(err, errors.TypeEnforcementError):
2463
    obuf.write("Parameter Error: %s" % msg)
2464
  elif isinstance(err, errors.ParameterError):
2465
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2466
  elif isinstance(err, luxi.NoMasterError):
2467
    if err.args[0] == pathutils.MASTER_SOCKET:
2468
      daemon = "the master daemon"
2469
    elif err.args[0] == pathutils.QUERY_SOCKET:
2470
      daemon = "the config daemon"
2471
    else:
2472
      daemon = "socket '%s'" % str(err.args[0])
2473
    obuf.write("Cannot communicate with %s.\nIs the process running"
2474
               " and listening for connections?" % daemon)
2475
  elif isinstance(err, luxi.TimeoutError):
2476
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2477
               " been submitted and will continue to run even if the call"
2478
               " timed out. Useful commands in this situation are \"gnt-job"
2479
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2480
    obuf.write(msg)
2481
  elif isinstance(err, luxi.PermissionError):
2482
    obuf.write("It seems you don't have permissions to connect to the"
2483
               " master daemon.\nPlease retry as a different user.")
2484
  elif isinstance(err, luxi.ProtocolError):
2485
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2486
               "%s" % msg)
2487
  elif isinstance(err, errors.JobLost):
2488
    obuf.write("Error checking job status: %s" % msg)
2489
  elif isinstance(err, errors.QueryFilterParseError):
2490
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2491
    obuf.write("\n".join(err.GetDetails()))
2492
  elif isinstance(err, errors.GenericError):
2493
    obuf.write("Unhandled Ganeti error: %s" % msg)
2494
  elif isinstance(err, JobSubmittedException):
2495
    obuf.write("JobID: %s\n" % err.args[0])
2496
    retcode = 0
2497
  else:
2498
    obuf.write("Unhandled exception: %s" % msg)
2499
  return retcode, obuf.getvalue().rstrip("\n")
2500

    
2501

    
2502
def GenericMain(commands, override=None, aliases=None,
2503
                env_override=frozenset()):
2504
  """Generic main function for all the gnt-* commands.
2505

2506
  @param commands: a dictionary with a special structure, see the design doc
2507
                   for command line handling.
2508
  @param override: if not None, we expect a dictionary with keys that will
2509
                   override command line options; this can be used to pass
2510
                   options from the scripts to generic functions
2511
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2512
  @param env_override: list of environment names which are allowed to submit
2513
                       default args for commands
2514

2515
  """
2516
  # save the program name and the entire command line for later logging
2517
  if sys.argv:
2518
    binary = os.path.basename(sys.argv[0])
2519
    if not binary:
2520
      binary = sys.argv[0]
2521

    
2522
    if len(sys.argv) >= 2:
2523
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2524
    else:
2525
      logname = binary
2526

    
2527
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2528
  else:
2529
    binary = "<unknown program>"
2530
    cmdline = "<unknown>"
2531

    
2532
  if aliases is None:
2533
    aliases = {}
2534

    
2535
  try:
2536
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2537
                                       env_override)
2538
  except _ShowVersion:
2539
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2540
             constants.RELEASE_VERSION)
2541
    return constants.EXIT_SUCCESS
2542
  except _ShowUsage, err:
2543
    for line in _FormatUsage(binary, commands):
2544
      ToStdout(line)
2545

    
2546
    if err.exit_error:
2547
      return constants.EXIT_FAILURE
2548
    else:
2549
      return constants.EXIT_SUCCESS
2550
  except errors.ParameterError, err:
2551
    result, err_msg = FormatError(err)
2552
    ToStderr(err_msg)
2553
    return 1
2554

    
2555
  if func is None: # parse error
2556
    return 1
2557

    
2558
  if override is not None:
2559
    for key, val in override.iteritems():
2560
      setattr(options, key, val)
2561

    
2562
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2563
                     stderr_logging=True)
2564

    
2565
  logging.info("Command line: %s", cmdline)
2566

    
2567
  try:
2568
    result = func(options, args)
2569
  except (errors.GenericError, luxi.ProtocolError,
2570
          JobSubmittedException), err:
2571
    result, err_msg = FormatError(err)
2572
    logging.exception("Error during command processing")
2573
    ToStderr(err_msg)
2574
  except KeyboardInterrupt:
2575
    result = constants.EXIT_FAILURE
2576
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2577
             " might have been submitted and"
2578
             " will continue to run in the background.")
2579
  except IOError, err:
2580
    if err.errno == errno.EPIPE:
2581
      # our terminal went away, we'll exit
2582
      sys.exit(constants.EXIT_FAILURE)
2583
    else:
2584
      raise
2585

    
2586
  return result
2587

    
2588

    
2589
def ParseNicOption(optvalue):
2590
  """Parses the value of the --net option(s).
2591

2592
  """
2593
  try:
2594
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2595
  except (TypeError, ValueError), err:
2596
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2597
                               errors.ECODE_INVAL)
2598

    
2599
  nics = [{}] * nic_max
2600
  for nidx, ndict in optvalue:
2601
    nidx = int(nidx)
2602

    
2603
    if not isinstance(ndict, dict):
2604
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2605
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2606

    
2607
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2608

    
2609
    nics[nidx] = ndict
2610

    
2611
  return nics
2612

    
2613

    
2614
def GenericInstanceCreate(mode, opts, args):
2615
  """Add an instance to the cluster via either creation or import.
2616

2617
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2618
  @param opts: the command line options selected by the user
2619
  @type args: list
2620
  @param args: should contain only one element, the new instance name
2621
  @rtype: int
2622
  @return: the desired exit code
2623

2624
  """
2625
  instance = args[0]
2626

    
2627
  (pnode, snode) = SplitNodeOption(opts.node)
2628

    
2629
  hypervisor = None
2630
  hvparams = {}
2631
  if opts.hypervisor:
2632
    hypervisor, hvparams = opts.hypervisor
2633

    
2634
  if opts.nics:
2635
    nics = ParseNicOption(opts.nics)
2636
  elif opts.no_nics:
2637
    # no nics
2638
    nics = []
2639
  elif mode == constants.INSTANCE_CREATE:
2640
    # default of one nic, all auto
2641
    nics = [{}]
2642
  else:
2643
    # mode == import
2644
    nics = []
2645

    
2646
  if opts.disk_template == constants.DT_DISKLESS:
2647
    if opts.disks or opts.sd_size is not None:
2648
      raise errors.OpPrereqError("Diskless instance but disk"
2649
                                 " information passed", errors.ECODE_INVAL)
2650
    disks = []
2651
  else:
2652
    if (not opts.disks and not opts.sd_size
2653
        and mode == constants.INSTANCE_CREATE):
2654
      raise errors.OpPrereqError("No disk information specified",
2655
                                 errors.ECODE_INVAL)
2656
    if opts.disks and opts.sd_size is not None:
2657
      raise errors.OpPrereqError("Please use either the '--disk' or"
2658
                                 " '-s' option", errors.ECODE_INVAL)
2659
    if opts.sd_size is not None:
2660
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2661

    
2662
    if opts.disks:
2663
      try:
2664
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2665
      except ValueError, err:
2666
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2667
                                   errors.ECODE_INVAL)
2668
      disks = [{}] * disk_max
2669
    else:
2670
      disks = []
2671
    for didx, ddict in opts.disks:
2672
      didx = int(didx)
2673
      if not isinstance(ddict, dict):
2674
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2675
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2676
      elif constants.IDISK_SIZE in ddict:
2677
        if constants.IDISK_ADOPT in ddict:
2678
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2679
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2680
        try:
2681
          ddict[constants.IDISK_SIZE] = \
2682
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2683
        except ValueError, err:
2684
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2685
                                     (didx, err), errors.ECODE_INVAL)
2686
      elif constants.IDISK_ADOPT in ddict:
2687
        if constants.IDISK_SPINDLES in ddict:
2688
          raise errors.OpPrereqError("spindles is not a valid option when"
2689
                                     " adopting a disk", errors.ECODE_INVAL)
2690
        if mode == constants.INSTANCE_IMPORT:
2691
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2692
                                     " import", errors.ECODE_INVAL)
2693
        ddict[constants.IDISK_SIZE] = 0
2694
      else:
2695
        raise errors.OpPrereqError("Missing size or adoption source for"
2696
                                   " disk %d" % didx, errors.ECODE_INVAL)
2697
      disks[didx] = ddict
2698

    
2699
  if opts.tags is not None:
2700
    tags = opts.tags.split(",")
2701
  else:
2702
    tags = []
2703

    
2704
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2705
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2706

    
2707
  if mode == constants.INSTANCE_CREATE:
2708
    start = opts.start
2709
    os_type = opts.os
2710
    force_variant = opts.force_variant
2711
    src_node = None
2712
    src_path = None
2713
    no_install = opts.no_install
2714
    identify_defaults = False
2715
  elif mode == constants.INSTANCE_IMPORT:
2716
    start = False
2717
    os_type = None
2718
    force_variant = False
2719
    src_node = opts.src_node
2720
    src_path = opts.src_dir
2721
    no_install = None
2722
    identify_defaults = opts.identify_defaults
2723
  else:
2724
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2725

    
2726
  op = opcodes.OpInstanceCreate(instance_name=instance,
2727
                                disks=disks,
2728
                                disk_template=opts.disk_template,
2729
                                nics=nics,
2730
                                conflicts_check=opts.conflicts_check,
2731
                                pnode=pnode, snode=snode,
2732
                                ip_check=opts.ip_check,
2733
                                name_check=opts.name_check,
2734
                                wait_for_sync=opts.wait_for_sync,
2735
                                file_storage_dir=opts.file_storage_dir,
2736
                                file_driver=opts.file_driver,
2737
                                iallocator=opts.iallocator,
2738
                                hypervisor=hypervisor,
2739
                                hvparams=hvparams,
2740
                                beparams=opts.beparams,
2741
                                osparams=opts.osparams,
2742
                                mode=mode,
2743
                                start=start,
2744
                                os_type=os_type,
2745
                                force_variant=force_variant,
2746
                                src_node=src_node,
2747
                                src_path=src_path,
2748
                                tags=tags,
2749
                                no_install=no_install,
2750
                                identify_defaults=identify_defaults,
2751
                                ignore_ipolicy=opts.ignore_ipolicy)
2752

    
2753
  SubmitOrSend(op, opts)
2754
  return 0
2755

    
2756

    
2757
class _RunWhileClusterStoppedHelper:
2758
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2759

2760
  """
2761
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2762
    """Initializes this class.
2763

2764
    @type feedback_fn: callable
2765
    @param feedback_fn: Feedback function
2766
    @type cluster_name: string
2767
    @param cluster_name: Cluster name
2768
    @type master_node: string
2769
    @param master_node Master node name
2770
    @type online_nodes: list
2771
    @param online_nodes: List of names of online nodes
2772

2773
    """
2774
    self.feedback_fn = feedback_fn
2775
    self.cluster_name = cluster_name
2776
    self.master_node = master_node
2777
    self.online_nodes = online_nodes
2778

    
2779
    self.ssh = ssh.SshRunner(self.cluster_name)
2780

    
2781
    self.nonmaster_nodes = [name for name in online_nodes
2782
                            if name != master_node]
2783

    
2784
    assert self.master_node not in self.nonmaster_nodes
2785

    
2786
  def _RunCmd(self, node_name, cmd):
2787
    """Runs a command on the local or a remote machine.
2788

2789
    @type node_name: string
2790
    @param node_name: Machine name
2791
    @type cmd: list
2792
    @param cmd: Command
2793

2794
    """
2795
    if node_name is None or node_name == self.master_node:
2796
      # No need to use SSH
2797
      result = utils.RunCmd(cmd)
2798
    else:
2799
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2800
                            utils.ShellQuoteArgs(cmd))
2801

    
2802
    if result.failed:
2803
      errmsg = ["Failed to run command %s" % result.cmd]
2804
      if node_name:
2805
        errmsg.append("on node %s" % node_name)
2806
      errmsg.append(": exitcode %s and error %s" %
2807
                    (result.exit_code, result.output))
2808
      raise errors.OpExecError(" ".join(errmsg))
2809

    
2810
  def Call(self, fn, *args):
2811
    """Call function while all daemons are stopped.
2812

2813
    @type fn: callable
2814
    @param fn: Function to be called
2815

2816
    """
2817
    # Pause watcher by acquiring an exclusive lock on watcher state file
2818
    self.feedback_fn("Blocking watcher")
2819
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2820
    try:
2821
      # TODO: Currently, this just blocks. There's no timeout.
2822
      # TODO: Should it be a shared lock?
2823
      watcher_block.Exclusive(blocking=True)
2824

    
2825
      # Stop master daemons, so that no new jobs can come in and all running
2826
      # ones are finished
2827
      self.feedback_fn("Stopping master daemons")
2828
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2829
      try:
2830
        # Stop daemons on all nodes
2831
        for node_name in self.online_nodes:
2832
          self.feedback_fn("Stopping daemons on %s" % node_name)
2833
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2834

    
2835
        # All daemons are shut down now
2836
        try:
2837
          return fn(self, *args)
2838
        except Exception, err:
2839
          _, errmsg = FormatError(err)
2840
          logging.exception("Caught exception")
2841
          self.feedback_fn(errmsg)
2842
          raise
2843
      finally:
2844
        # Start cluster again, master node last
2845
        for node_name in self.nonmaster_nodes + [self.master_node]:
2846
          self.feedback_fn("Starting daemons on %s" % node_name)
2847
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2848
    finally:
2849
      # Resume watcher
2850
      watcher_block.Close()
2851

    
2852

    
2853
def RunWhileClusterStopped(feedback_fn, fn, *args):
2854
  """Calls a function while all cluster daemons are stopped.
2855

2856
  @type feedback_fn: callable
2857
  @param feedback_fn: Feedback function
2858
  @type fn: callable
2859
  @param fn: Function to be called when daemons are stopped
2860

2861
  """
2862
  feedback_fn("Gathering cluster information")
2863

    
2864
  # This ensures we're running on the master daemon
2865
  cl = GetClient()
2866

    
2867
  (cluster_name, master_node) = \
2868
    cl.QueryConfigValues(["cluster_name", "master_node"])
2869

    
2870
  online_nodes = GetOnlineNodes([], cl=cl)
2871

    
2872
  # Don't keep a reference to the client. The master daemon will go away.
2873
  del cl
2874

    
2875
  assert master_node in online_nodes
2876

    
2877
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2878
                                       online_nodes).Call(fn, *args)
2879

    
2880

    
2881
def GenerateTable(headers, fields, separator, data,
2882
                  numfields=None, unitfields=None,
2883
                  units=None):
2884
  """Prints a table with headers and different fields.
2885

2886
  @type headers: dict
2887
  @param headers: dictionary mapping field names to headers for
2888
      the table
2889
  @type fields: list
2890
  @param fields: the field names corresponding to each row in
2891
      the data field
2892
  @param separator: the separator to be used; if this is None,
2893
      the default 'smart' algorithm is used which computes optimal
2894
      field width, otherwise just the separator is used between
2895
      each field
2896
  @type data: list
2897
  @param data: a list of lists, each sublist being one row to be output
2898
  @type numfields: list
2899
  @param numfields: a list with the fields that hold numeric
2900
      values and thus should be right-aligned
2901
  @type unitfields: list
2902
  @param unitfields: a list with the fields that hold numeric
2903
      values that should be formatted with the units field
2904
  @type units: string or None
2905
  @param units: the units we should use for formatting, or None for
2906
      automatic choice (human-readable for non-separator usage, otherwise
2907
      megabytes); this is a one-letter string
2908

2909
  """
2910
  if units is None:
2911
    if separator:
2912
      units = "m"
2913
    else:
2914
      units = "h"
2915

    
2916
  if numfields is None:
2917
    numfields = []
2918
  if unitfields is None:
2919
    unitfields = []
2920

    
2921
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2922
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2923

    
2924
  format_fields = []
2925
  for field in fields:
2926
    if headers and field not in headers:
2927
      # TODO: handle better unknown fields (either revert to old
2928
      # style of raising exception, or deal more intelligently with
2929
      # variable fields)
2930
      headers[field] = field
2931
    if separator is not None:
2932
      format_fields.append("%s")
2933
    elif numfields.Matches(field):
2934
      format_fields.append("%*s")
2935
    else:
2936
      format_fields.append("%-*s")
2937

    
2938
  if separator is None:
2939
    mlens = [0 for name in fields]
2940
    format_str = " ".join(format_fields)
2941
  else:
2942
    format_str = separator.replace("%", "%%").join(format_fields)
2943

    
2944
  for row in data:
2945
    if row is None:
2946
      continue
2947
    for idx, val in enumerate(row):
2948
      if unitfields.Matches(fields[idx]):
2949
        try:
2950
          val = int(val)
2951
        except (TypeError, ValueError):
2952
          pass
2953
        else:
2954
          val = row[idx] = utils.FormatUnit(val, units)
2955
      val = row[idx] = str(val)
2956
      if separator is None:
2957
        mlens[idx] = max(mlens[idx], len(val))
2958

    
2959
  result = []
2960
  if headers:
2961
    args = []
2962
    for idx, name in enumerate(fields):
2963
      hdr = headers[name]
2964
      if separator is None:
2965
        mlens[idx] = max(mlens[idx], len(hdr))
2966
        args.append(mlens[idx])
2967
      args.append(hdr)
2968
    result.append(format_str % tuple(args))
2969

    
2970
  if separator is None:
2971
    assert len(mlens) == len(fields)
2972

    
2973
    if fields and not numfields.Matches(fields[-1]):
2974
      mlens[-1] = 0
2975

    
2976
  for line in data:
2977
    args = []
2978
    if line is None:
2979
      line = ["-" for _ in fields]
2980
    for idx in range(len(fields)):
2981
      if separator is None:
2982
        args.append(mlens[idx])
2983
      args.append(line[idx])
2984
    result.append(format_str % tuple(args))
2985

    
2986
  return result
2987

    
2988

    
2989
def _FormatBool(value):
2990
  """Formats a boolean value as a string.
2991

2992
  """
2993
  if value:
2994
    return "Y"
2995
  return "N"
2996

    
2997

    
2998
#: Default formatting for query results; (callback, align right)
2999
_DEFAULT_FORMAT_QUERY = {
3000
  constants.QFT_TEXT: (str, False),
3001
  constants.QFT_BOOL: (_FormatBool, False),
3002
  constants.QFT_NUMBER: (str, True),
3003
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
3004
  constants.QFT_OTHER: (str, False),
3005
  constants.QFT_UNKNOWN: (str, False),
3006
  }
3007

    
3008

    
3009
def _GetColumnFormatter(fdef, override, unit):
3010
  """Returns formatting function for a field.
3011

3012
  @type fdef: L{objects.QueryFieldDefinition}
3013
  @type override: dict
3014
  @param override: Dictionary for overriding field formatting functions,
3015
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3016
  @type unit: string
3017
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
3018
  @rtype: tuple; (callable, bool)
3019
  @return: Returns the function to format a value (takes one parameter) and a
3020
    boolean for aligning the value on the right-hand side
3021

3022
  """
3023
  fmt = override.get(fdef.name, None)
3024
  if fmt is not None:
3025
    return fmt
3026

    
3027
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3028

    
3029
  if fdef.kind == constants.QFT_UNIT:
3030
    # Can't keep this information in the static dictionary
3031
    return (lambda value: utils.FormatUnit(value, unit), True)
3032

    
3033
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3034
  if fmt is not None:
3035
    return fmt
3036

    
3037
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3038

    
3039

    
3040
class _QueryColumnFormatter:
3041
  """Callable class for formatting fields of a query.
3042

3043
  """
3044
  def __init__(self, fn, status_fn, verbose):
3045
    """Initializes this class.
3046

3047
    @type fn: callable
3048
    @param fn: Formatting function
3049
    @type status_fn: callable
3050
    @param status_fn: Function to report fields' status
3051
    @type verbose: boolean
3052
    @param verbose: whether to use verbose field descriptions or not
3053

3054
    """
3055
    self._fn = fn
3056
    self._status_fn = status_fn
3057
    self._verbose = verbose
3058

    
3059
  def __call__(self, data):
3060
    """Returns a field's string representation.
3061

3062
    """
3063
    (status, value) = data
3064

    
3065
    # Report status
3066
    self._status_fn(status)
3067

    
3068
    if status == constants.RS_NORMAL:
3069
      return self._fn(value)
3070

    
3071
    assert value is None, \
3072
           "Found value %r for abnormal status %s" % (value, status)
3073

    
3074
    return FormatResultError(status, self._verbose)
3075

    
3076

    
3077
def FormatResultError(status, verbose):
3078
  """Formats result status other than L{constants.RS_NORMAL}.
3079

3080
  @param status: The result status
3081
  @type verbose: boolean
3082
  @param verbose: Whether to return the verbose text
3083
  @return: Text of result status
3084

3085
  """
3086
  assert status != constants.RS_NORMAL, \
3087
         "FormatResultError called with status equal to constants.RS_NORMAL"
3088
  try:
3089
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3090
  except KeyError:
3091
    raise NotImplementedError("Unknown status %s" % status)
3092
  else:
3093
    if verbose:
3094
      return verbose_text
3095
    return normal_text
3096

    
3097

    
3098
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3099
                      header=False, verbose=False):
3100
  """Formats data in L{objects.QueryResponse}.
3101

3102
  @type result: L{objects.QueryResponse}
3103
  @param result: result of query operation
3104
  @type unit: string
3105
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3106
    see L{utils.text.FormatUnit}
3107
  @type format_override: dict
3108
  @param format_override: Dictionary for overriding field formatting functions,
3109
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3110
  @type separator: string or None
3111
  @param separator: String used to separate fields
3112
  @type header: bool
3113
  @param header: Whether to output header row
3114
  @type verbose: boolean
3115
  @param verbose: whether to use verbose field descriptions or not
3116

3117
  """
3118
  if unit is None:
3119
    if separator:
3120
      unit = "m"
3121
    else:
3122
      unit = "h"
3123

    
3124
  if format_override is None:
3125
    format_override = {}
3126

    
3127
  stats = dict.fromkeys(constants.RS_ALL, 0)
3128

    
3129
  def _RecordStatus(status):
3130
    if status in stats:
3131
      stats[status] += 1
3132

    
3133
  columns = []
3134
  for fdef in result.fields:
3135
    assert fdef.title and fdef.name
3136
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3137
    columns.append(TableColumn(fdef.title,
3138
                               _QueryColumnFormatter(fn, _RecordStatus,
3139
                                                     verbose),
3140
                               align_right))
3141

    
3142
  table = FormatTable(result.data, columns, header, separator)
3143

    
3144
  # Collect statistics
3145
  assert len(stats) == len(constants.RS_ALL)
3146
  assert compat.all(count >= 0 for count in stats.values())
3147

    
3148
  # Determine overall status. If there was no data, unknown fields must be
3149
  # detected via the field definitions.
3150
  if (stats[constants.RS_UNKNOWN] or
3151
      (not result.data and _GetUnknownFields(result.fields))):
3152
    status = QR_UNKNOWN
3153
  elif compat.any(count > 0 for key, count in stats.items()
3154
                  if key != constants.RS_NORMAL):
3155
    status = QR_INCOMPLETE
3156
  else:
3157
    status = QR_NORMAL
3158

    
3159
  return (status, table)
3160

    
3161

    
3162
def _GetUnknownFields(fdefs):
3163
  """Returns list of unknown fields included in C{fdefs}.
3164

3165
  @type fdefs: list of L{objects.QueryFieldDefinition}
3166

3167
  """
3168
  return [fdef for fdef in fdefs
3169
          if fdef.kind == constants.QFT_UNKNOWN]
3170

    
3171

    
3172
def _WarnUnknownFields(fdefs):
3173
  """Prints a warning to stderr if a query included unknown fields.
3174

3175
  @type fdefs: list of L{objects.QueryFieldDefinition}
3176

3177
  """
3178
  unknown = _GetUnknownFields(fdefs)
3179
  if unknown:
3180
    ToStderr("Warning: Queried for unknown fields %s",
3181
             utils.CommaJoin(fdef.name for fdef in unknown))
3182
    return True
3183

    
3184
  return False
3185

    
3186

    
3187
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3188
                format_override=None, verbose=False, force_filter=False,
3189
                namefield=None, qfilter=None, isnumeric=False):
3190
  """Generic implementation for listing all items of a resource.
3191

3192
  @param resource: One of L{constants.QR_VIA_LUXI}
3193
  @type fields: list of strings
3194
  @param fields: List of fields to query for
3195
  @type names: list of strings
3196
  @param names: Names of items to query for
3197
  @type unit: string or None
3198
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3199
    None for automatic choice (human-readable for non-separator usage,
3200
    otherwise megabytes); this is a one-letter string
3201
  @type separator: string or None
3202
  @param separator: String used to separate fields
3203
  @type header: bool
3204
  @param header: Whether to show header row
3205
  @type force_filter: bool
3206
  @param force_filter: Whether to always treat names as filter
3207
  @type format_override: dict
3208
  @param format_override: Dictionary for overriding field formatting functions,
3209
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3210
  @type verbose: boolean
3211
  @param verbose: whether to use verbose field descriptions or not
3212
  @type namefield: string
3213
  @param namefield: Name of field to use for simple filters (see
3214
    L{qlang.MakeFilter} for details)
3215
  @type qfilter: list or None
3216
  @param qfilter: Query filter (in addition to names)
3217
  @param isnumeric: bool
3218
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3219
    any simple filters built by namefield should use integer values to
3220
    reflect that
3221

3222
  """
3223
  if not names:
3224
    names = None
3225

    
3226
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3227
                                isnumeric=isnumeric)
3228

    
3229
  if qfilter is None:
3230
    qfilter = namefilter
3231
  elif namefilter is not None:
3232
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3233

    
3234
  if cl is None:
3235
    cl = GetClient()
3236

    
3237
  response = cl.Query(resource, fields, qfilter)
3238

    
3239
  found_unknown = _WarnUnknownFields(response.fields)
3240

    
3241
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3242
                                     header=header,
3243
                                     format_override=format_override,
3244
                                     verbose=verbose)
3245

    
3246
  for line in data:
3247
    ToStdout(line)
3248

    
3249
  assert ((found_unknown and status == QR_UNKNOWN) or
3250
          (not found_unknown and status != QR_UNKNOWN))
3251

    
3252
  if status == QR_UNKNOWN:
3253
    return constants.EXIT_UNKNOWN_FIELD
3254

    
3255
  # TODO: Should the list command fail if not all data could be collected?
3256
  return constants.EXIT_SUCCESS
3257

    
3258

    
3259
def _FieldDescValues(fdef):
3260
  """Helper function for L{GenericListFields} to get query field description.
3261

3262
  @type fdef: L{objects.QueryFieldDefinition}
3263
  @rtype: list
3264

3265
  """
3266
  return [
3267
    fdef.name,
3268
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3269
    fdef.title,
3270
    fdef.doc,
3271
    ]
3272

    
3273

    
3274
def GenericListFields(resource, fields, separator, header, cl=None):
3275
  """Generic implementation for listing fields for a resource.
3276

3277
  @param resource: One of L{constants.QR_VIA_LUXI}
3278
  @type fields: list of strings
3279
  @param fields: List of fields to query for
3280
  @type separator: string or None
3281
  @param separator: String used to separate fields
3282
  @type header: bool
3283
  @param header: Whether to show header row
3284

3285
  """
3286
  if cl is None:
3287
    cl = GetClient()
3288

    
3289
  if not fields:
3290
    fields = None
3291

    
3292
  response = cl.QueryFields(resource, fields)
3293

    
3294
  found_unknown = _WarnUnknownFields(response.fields)
3295

    
3296
  columns = [
3297
    TableColumn("Name", str, False),
3298
    TableColumn("Type", str, False),
3299
    TableColumn("Title", str, False),
3300
    TableColumn("Description", str, False),
3301
    ]
3302

    
3303
  rows = map(_FieldDescValues, response.fields)
3304

    
3305
  for line in FormatTable(rows, columns, header, separator):
3306
    ToStdout(line)
3307

    
3308
  if found_unknown:
3309
    return constants.EXIT_UNKNOWN_FIELD
3310

    
3311
  return constants.EXIT_SUCCESS
3312

    
3313

    
3314
class TableColumn:
3315
  """Describes a column for L{FormatTable}.
3316

3317
  """
3318
  def __init__(self, title, fn, align_right):
3319
    """Initializes this class.
3320

3321
    @type title: string
3322
    @param title: Column title
3323
    @type fn: callable
3324
    @param fn: Formatting function
3325
    @type align_right: bool
3326
    @param align_right: Whether to align values on the right-hand side
3327

3328
    """
3329
    self.title = title
3330
    self.format = fn
3331
    self.align_right = align_right
3332

    
3333

    
3334
def _GetColFormatString(width, align_right):
3335
  """Returns the format string for a field.
3336

3337
  """
3338
  if align_right:
3339
    sign = ""
3340
  else:
3341
    sign = "-"
3342

    
3343
  return "%%%s%ss" % (sign, width)
3344

    
3345

    
3346
def FormatTable(rows, columns, header, separator):
3347
  """Formats data as a table.
3348

3349
  @type rows: list of lists
3350
  @param rows: Row data, one list per row
3351
  @type columns: list of L{TableColumn}
3352
  @param columns: Column descriptions
3353
  @type header: bool
3354
  @param header: Whether to show header row
3355
  @type separator: string or None
3356
  @param separator: String used to separate columns
3357

3358
  """
3359
  if header:
3360
    data = [[col.title for col in columns]]
3361
    colwidth = [len(col.title) for col in columns]
3362
  else:
3363
    data = []
3364
    colwidth = [0 for _ in columns]
3365

    
3366
  # Format row data
3367
  for row in rows:
3368
    assert len(row) == len(columns)
3369

    
3370
    formatted = [col.format(value) for value, col in zip(row, columns)]
3371

    
3372
    if separator is None:
3373
      # Update column widths
3374
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3375
        # Modifying a list's items while iterating is fine
3376
        colwidth[idx] = max(oldwidth, len(value))
3377

    
3378
    data.append(formatted)
3379

    
3380
  if separator is not None:
3381
    # Return early if a separator is used
3382
    return [separator.join(row) for row in data]
3383

    
3384
  if columns and not columns[-1].align_right:
3385
    # Avoid unnecessary spaces at end of line
3386
    colwidth[-1] = 0
3387

    
3388
  # Build format string
3389
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3390
                  for col, width in zip(columns, colwidth)])
3391

    
3392
  return [fmt % tuple(row) for row in data]
3393

    
3394

    
3395
def FormatTimestamp(ts):
3396
  """Formats a given timestamp.
3397

3398
  @type ts: timestamp
3399
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3400

3401
  @rtype: string
3402
  @return: a string with the formatted timestamp
3403

3404
  """
3405
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3406
    return "?"
3407

    
3408
  (sec, usecs) = ts
3409
  return utils.FormatTime(sec, usecs=usecs)
3410

    
3411

    
3412
def ParseTimespec(value):
3413
  """Parse a time specification.
3414

3415
  The following suffixed will be recognized:
3416

3417
    - s: seconds
3418
    - m: minutes
3419
    - h: hours
3420
    - d: day
3421
    - w: weeks
3422

3423
  Without any suffix, the value will be taken to be in seconds.
3424

3425
  """
3426
  value = str(value)
3427
  if not value:
3428
    raise errors.OpPrereqError("Empty time specification passed",
3429
                               errors.ECODE_INVAL)
3430
  suffix_map = {
3431
    "s": 1,
3432
    "m": 60,
3433
    "h": 3600,
3434
    "d": 86400,
3435
    "w": 604800,
3436
    }
3437
  if value[-1] not in suffix_map:
3438
    try:
3439
      value = int(value)
3440
    except (TypeError, ValueError):
3441
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3442
                                 errors.ECODE_INVAL)
3443
  else:
3444
    multiplier = suffix_map[value[-1]]
3445
    value = value[:-1]
3446
    if not value: # no data left after stripping the suffix
3447
      raise errors.OpPrereqError("Invalid time specification (only"
3448
                                 " suffix passed)", errors.ECODE_INVAL)
3449
    try:
3450
      value = int(value) * multiplier
3451
    except (TypeError, ValueError):
3452
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3453
                                 errors.ECODE_INVAL)
3454
  return value
3455

    
3456

    
3457
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3458
                   filter_master=False, nodegroup=None):
3459
  """Returns the names of online nodes.
3460

3461
  This function will also log a warning on stderr with the names of
3462
  the online nodes.
3463

3464
  @param nodes: if not empty, use only this subset of nodes (minus the
3465
      offline ones)
3466
  @param cl: if not None, luxi client to use
3467
  @type nowarn: boolean
3468
  @param nowarn: by default, this function will output a note with the
3469
      offline nodes that are skipped; if this parameter is True the
3470
      note is not displayed
3471
  @type secondary_ips: boolean
3472
  @param secondary_ips: if True, return the secondary IPs instead of the
3473
      names, useful for doing network traffic over the replication interface
3474
      (if any)
3475
  @type filter_master: boolean
3476
  @param filter_master: if True, do not return the master node in the list
3477
      (useful in coordination with secondary_ips where we cannot check our
3478
      node name against the list)
3479
  @type nodegroup: string
3480
  @param nodegroup: If set, only return nodes in this node group
3481

3482
  """
3483
  if cl is None:
3484
    cl = GetClient()
3485

    
3486
  qfilter = []
3487

    
3488
  if nodes:
3489
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3490

    
3491
  if nodegroup is not None:
3492
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3493
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3494

    
3495
  if filter_master:
3496
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3497

    
3498
  if qfilter:
3499
    if len(qfilter) > 1:
3500
      final_filter = [qlang.OP_AND] + qfilter
3501
    else:
3502
      assert len(qfilter) == 1
3503
      final_filter = qfilter[0]
3504
  else:
3505
    final_filter = None
3506

    
3507
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3508

    
3509
  def _IsOffline(row):
3510
    (_, (_, offline), _) = row
3511
    return offline
3512

    
3513
  def _GetName(row):
3514
    ((_, name), _, _) = row
3515
    return name
3516

    
3517
  def _GetSip(row):
3518
    (_, _, (_, sip)) = row
3519
    return sip
3520

    
3521
  (offline, online) = compat.partition(result.data, _IsOffline)
3522

    
3523
  if offline and not nowarn:
3524
    ToStderr("Note: skipping offline node(s): %s" %
3525
             utils.CommaJoin(map(_GetName, offline)))
3526

    
3527
  if secondary_ips:
3528
    fn = _GetSip
3529
  else:
3530
    fn = _GetName
3531

    
3532
  return map(fn, online)
3533

    
3534

    
3535
def _ToStream(stream, txt, *args):
3536
  """Write a message to a stream, bypassing the logging system
3537

3538
  @type stream: file object
3539
  @param stream: the file to which we should write
3540
  @type txt: str
3541
  @param txt: the message
3542

3543
  """
3544
  try:
3545
    if args:
3546
      args = tuple(args)
3547
      stream.write(txt % args)
3548
    else:
3549
      stream.write(txt)
3550
    stream.write("\n")
3551
    stream.flush()
3552
  except IOError, err:
3553
    if err.errno == errno.EPIPE:
3554
      # our terminal went away, we'll exit
3555
      sys.exit(constants.EXIT_FAILURE)
3556
    else:
3557
      raise
3558

    
3559

    
3560
def ToStdout(txt, *args):
3561
  """Write a message to stdout only, bypassing the logging system
3562

3563
  This is just a wrapper over _ToStream.
3564

3565
  @type txt: str
3566
  @param txt: the message
3567

3568
  """
3569
  _ToStream(sys.stdout, txt, *args)
3570

    
3571

    
3572
def ToStderr(txt, *args):
3573
  """Write a message to stderr only, bypassing the logging system
3574

3575
  This is just a wrapper over _ToStream.
3576

3577
  @type txt: str
3578
  @param txt: the message
3579

3580
  """
3581
  _ToStream(sys.stderr, txt, *args)
3582

    
3583

    
3584
class JobExecutor(object):
3585
  """Class which manages the submission and execution of multiple jobs.
3586

3587
  Note that instances of this class should not be reused between
3588
  GetResults() calls.
3589

3590
  """
3591
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3592
    self.queue = []
3593
    if cl is None:
3594
      cl = GetClient()
3595
    self.cl = cl
3596
    self.verbose = verbose
3597
    self.jobs = []
3598
    self.opts = opts
3599
    self.feedback_fn = feedback_fn
3600
    self._counter = itertools.count()
3601

    
3602
  @staticmethod
3603
  def _IfName(name, fmt):
3604
    """Helper function for formatting name.
3605

3606
    """
3607
    if name:
3608
      return fmt % name
3609

    
3610
    return ""
3611

    
3612
  def QueueJob(self, name, *ops):
3613
    """Record a job for later submit.
3614

3615
    @type name: string
3616
    @param name: a description of the job, will be used in WaitJobSet
3617

3618
    """
3619
    SetGenericOpcodeOpts(ops, self.opts)
3620
    self.queue.append((self._counter.next(), name, ops))
3621

    
3622
  def AddJobId(self, name, status, job_id):
3623
    """Adds a job ID to the internal queue.
3624

3625
    """
3626
    self.jobs.append((self._counter.next(), status, job_id, name))
3627

    
3628
  def SubmitPending(self, each=False):
3629
    """Submit all pending jobs.
3630

3631
    """
3632
    if each:
3633
      results = []
3634
      for (_, _, ops) in self.queue:
3635
        # SubmitJob will remove the success status, but raise an exception if
3636
        # the submission fails, so we'll notice that anyway.
3637
        results.append([True, self.cl.SubmitJob(ops)[0]])
3638
    else:
3639
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3640
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3641
      self.jobs.append((idx, status, data, name))
3642

    
3643
  def _ChooseJob(self):
3644
    """Choose a non-waiting/queued job to poll next.
3645

3646
    """
3647
    assert self.jobs, "_ChooseJob called with empty job list"
3648

    
3649
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3650
                               ["status"])
3651
    assert result
3652

    
3653
    for job_data, status in zip(self.jobs, result):
3654
      if (isinstance(status, list) and status and
3655
          status[0] in (constants.JOB_STATUS_QUEUED,
3656
                        constants.JOB_STATUS_WAITING,
3657
                        constants.JOB_STATUS_CANCELING)):
3658
        # job is still present and waiting
3659
        continue
3660
      # good candidate found (either running job or lost job)
3661
      self.jobs.remove(job_data)
3662
      return job_data
3663

    
3664
    # no job found
3665
    return self.jobs.pop(0)
3666

    
3667
  def GetResults(self):
3668
    """Wait for and return the results of all jobs.
3669

3670
    @rtype: list
3671
    @return: list of tuples (success, job results), in the same order
3672
        as the submitted jobs; if a job has failed, instead of the result
3673
        there will be the error message
3674

3675
    """
3676
    if not self.jobs:
3677
      self.SubmitPending()
3678
    results = []
3679
    if self.verbose:
3680
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3681
      if ok_jobs:
3682
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3683

    
3684
    # first, remove any non-submitted jobs
3685
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3686
    for idx, _, jid, name in failures:
3687
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3688
      results.append((idx, False, jid))
3689

    
3690
    while self.jobs:
3691
      (idx, _, jid, name) = self._ChooseJob()
3692
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3693
      try:
3694
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3695
        success = True
3696
      except errors.JobLost, err:
3697
        _, job_result = FormatError(err)
3698
        ToStderr("Job %s%s has been archived, cannot check its result",
3699
                 jid, self._IfName(name, " for %s"))
3700
        success = False
3701
      except (errors.GenericError, luxi.ProtocolError), err:
3702
        _, job_result = FormatError(err)
3703
        success = False
3704
        # the error message will always be shown, verbose or not
3705
        ToStderr("Job %s%s has failed: %s",
3706
                 jid, self._IfName(name, " for %s"), job_result)
3707

    
3708
      results.append((idx, success, job_result))
3709

    
3710
    # sort based on the index, then drop it
3711
    results.sort()
3712
    results = [i[1:] for i in results]
3713

    
3714
    return results
3715

    
3716
  def WaitOrShow(self, wait):
3717
    """Wait for job results or only print the job IDs.
3718

3719
    @type wait: boolean
3720
    @param wait: whether to wait or not
3721

3722
    """
3723
    if wait:
3724
      return self.GetResults()
3725
    else:
3726
      if not self.jobs:
3727
        self.SubmitPending()
3728
      for _, status, result, name in self.jobs:
3729
        if status:
3730
          ToStdout("%s: %s", result, name)
3731
        else:
3732
          ToStderr("Failure for %s: %s", name, result)
3733
      return [row[1:3] for row in self.jobs]
3734

    
3735

    
3736
def FormatParamsDictInfo(param_dict, actual):
3737
  """Formats a parameter dictionary.
3738

3739
  @type param_dict: dict
3740
  @param param_dict: the own parameters
3741
  @type actual: dict
3742
  @param actual: the current parameter set (including defaults)
3743
  @rtype: dict
3744
  @return: dictionary where the value of each parameter is either a fully
3745
      formatted string or a dictionary containing formatted strings
3746

3747
  """
3748
  ret = {}
3749
  for (key, data) in actual.items():
3750
    if isinstance(data, dict) and data:
3751
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3752
    else:
3753
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3754
  return ret
3755

    
3756

    
3757
def _FormatListInfoDefault(data, def_data):
3758
  if data is not None:
3759
    ret = utils.CommaJoin(data)
3760
  else:
3761
    ret = "default (%s)" % utils.CommaJoin(def_data)
3762
  return ret
3763

    
3764

    
3765
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3766
  """Formats an instance policy.
3767

3768
  @type custom_ipolicy: dict
3769
  @param custom_ipolicy: own policy
3770
  @type eff_ipolicy: dict
3771
  @param eff_ipolicy: effective policy (including defaults); ignored for
3772
      cluster
3773
  @type iscluster: bool
3774
  @param iscluster: the policy is at cluster level
3775
  @rtype: list of pairs
3776
  @return: formatted data, suitable for L{PrintGenericInfo}
3777

3778
  """
3779
  if iscluster:
3780
    eff_ipolicy = custom_ipolicy
3781

    
3782
  minmax_out = []
3783
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3784
  if custom_minmax:
3785
    for (k, minmax) in enumerate(custom_minmax):
3786
      minmax_out.append([
3787
        ("%s/%s" % (key, k),
3788
         FormatParamsDictInfo(minmax[key], minmax[key]))
3789
        for key in constants.ISPECS_MINMAX_KEYS
3790
        ])
3791
  else:
3792
    for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3793
      minmax_out.append([
3794
        ("%s/%s" % (key, k),
3795
         FormatParamsDictInfo({}, minmax[key]))
3796
        for key in constants.ISPECS_MINMAX_KEYS
3797
        ])
3798
  ret = [("bounds specs", minmax_out)]
3799

    
3800
  if iscluster:
3801
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3802
    ret.append(
3803
      (constants.ISPECS_STD,
3804
       FormatParamsDictInfo(stdspecs, stdspecs))
3805
      )
3806

    
3807
  ret.append(
3808
    ("allowed disk templates",
3809
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3810
                            eff_ipolicy[constants.IPOLICY_DTS]))
3811
    )
3812
  ret.extend([
3813
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3814
    for key in constants.IPOLICY_PARAMETERS
3815
    ])
3816
  return ret
3817

    
3818

    
3819
def _PrintSpecsParameters(buf, specs):
3820
  values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3821
  buf.write(",".join(values))
3822

    
3823

    
3824
def PrintIPolicyCommand(buf, ipolicy, isgroup):
3825
  """Print the command option used to generate the given instance policy.
3826

3827
  Currently only the parts dealing with specs are supported.
3828

3829
  @type buf: StringIO
3830
  @param buf: stream to write into
3831
  @type ipolicy: dict
3832
  @param ipolicy: instance policy
3833
  @type isgroup: bool
3834
  @param isgroup: whether the policy is at group level
3835

3836
  """
3837
  if not isgroup:
3838
    stdspecs = ipolicy.get("std")
3839
    if stdspecs:
3840
      buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3841
      _PrintSpecsParameters(buf, stdspecs)
3842
  minmaxes = ipolicy.get("minmax", [])
3843
  first = True
3844
  for minmax in minmaxes:
3845
    minspecs = minmax.get("min")
3846
    maxspecs = minmax.get("max")
3847
    if minspecs and maxspecs:
3848
      if first:
3849
        buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3850
        first = False
3851
      else:
3852
        buf.write("//")
3853
      buf.write("min:")
3854
      _PrintSpecsParameters(buf, minspecs)
3855
      buf.write("/max:")
3856
      _PrintSpecsParameters(buf, maxspecs)
3857

    
3858

    
3859
def ConfirmOperation(names, list_type, text, extra=""):
3860
  """Ask the user to confirm an operation on a list of list_type.
3861

3862
  This function is used to request confirmation for doing an operation
3863
  on a given list of list_type.
3864

3865
  @type names: list
3866
  @param names: the list of names that we display when
3867
      we ask for confirmation
3868
  @type list_type: str
3869
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3870
  @type text: str
3871
  @param text: the operation that the user should confirm
3872
  @rtype: boolean
3873
  @return: True or False depending on user's confirmation.
3874

3875
  """
3876
  count = len(names)
3877
  msg = ("The %s will operate on %d %s.\n%s"
3878
         "Do you want to continue?" % (text, count, list_type, extra))
3879
  affected = (("\nAffected %s:\n" % list_type) +
3880
              "\n".join(["  %s" % name for name in names]))
3881

    
3882
  choices = [("y", True, "Yes, execute the %s" % text),
3883
             ("n", False, "No, abort the %s" % text)]
3884

    
3885
  if count > 20:
3886
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3887
    question = msg
3888
  else:
3889
    question = msg + affected
3890

    
3891
  choice = AskUser(question, choices)
3892
  if choice == "v":
3893
    choices.pop(1)
3894
    choice = AskUser(msg + affected, choices)
3895
  return choice
3896

    
3897

    
3898
def _MaybeParseUnit(elements):
3899
  """Parses and returns an array of potential values with units.
3900

3901
  """
3902
  parsed = {}
3903
  for k, v in elements.items():
3904
    if v == constants.VALUE_DEFAULT:
3905
      parsed[k] = v
3906
    else:
3907
      parsed[k] = utils.ParseUnit(v)
3908
  return parsed
3909

    
3910

    
3911
def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3912
                             ispecs_disk_count, ispecs_disk_size,
3913
                             ispecs_nic_count, group_ipolicy, fill_all):
3914
  try:
3915
    if ispecs_mem_size:
3916
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3917
    if ispecs_disk_size:
3918
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3919
  except (TypeError, ValueError, errors.UnitParseError), err:
3920
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3921
                               " in policy: %s" %
3922
                               (ispecs_disk_size, ispecs_mem_size, err),
3923
                               errors.ECODE_INVAL)
3924

    
3925
  # prepare ipolicy dict
3926
  ispecs_transposed = {
3927
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3928
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3929
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3930
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3931
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3932
    }
3933

    
3934
  # first, check that the values given are correct
3935
  if group_ipolicy:
3936
    forced_type = TISPECS_GROUP_TYPES
3937
  else:
3938
    forced_type = TISPECS_CLUSTER_TYPES
3939
  for specs in ispecs_transposed.values():
3940
    assert type(specs) is dict
3941
    utils.ForceDictType(specs, forced_type)
3942

    
3943
  # then transpose
3944
  ispecs = {
3945
    constants.ISPECS_MIN: {},
3946
    constants.ISPECS_MAX: {},
3947
    constants.ISPECS_STD: {},
3948
    }
3949
  for (name, specs) in ispecs_transposed.iteritems():
3950
    assert name in constants.ISPECS_PARAMETERS
3951
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3952
      assert key in ispecs
3953
      ispecs[key][name] = val
3954
  minmax_out = {}
3955
  for key in constants.ISPECS_MINMAX_KEYS:
3956
    if fill_all:
3957
      minmax_out[key] = \
3958
        objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3959
    else:
3960
      minmax_out[key] = ispecs[key]
3961
  ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3962
  if fill_all:
3963
    ipolicy[constants.ISPECS_STD] = \
3964
        objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3965
                         ispecs[constants.ISPECS_STD])
3966
  else:
3967
    ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3968

    
3969

    
3970
def _ParseSpecUnit(spec, keyname):
3971
  ret = spec.copy()
3972
  for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3973
    if k in ret:
3974
      try:
3975
        ret[k] = utils.ParseUnit(ret[k])
3976
      except (TypeError, ValueError, errors.UnitParseError), err:
3977
        raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3978
                                    " specs: %s" % (k, ret[k], keyname, err)),
3979
                                   errors.ECODE_INVAL)
3980
  return ret
3981

    
3982

    
3983
def _ParseISpec(spec, keyname, required):
3984
  ret = _ParseSpecUnit(spec, keyname)
3985
  utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3986
  missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3987
  if required and missing:
3988
    raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3989
                               (keyname, utils.CommaJoin(missing)),
3990
                               errors.ECODE_INVAL)
3991
  return ret
3992

    
3993

    
3994
def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
3995
  ret = None
3996
  if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
3997
      len(minmax_ispecs[0]) == 1):
3998
    for (key, spec) in minmax_ispecs[0].items():
3999
      # This loop is executed exactly once
4000
      if key in allowed_values and not spec:
4001
        ret = key
4002
  return ret
4003

    
4004

    
4005
def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4006
                            group_ipolicy, allowed_values):
4007
  found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
4008
  if found_allowed is not None:
4009
    ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
4010
  elif minmax_ispecs is not None:
4011
    minmax_out = []
4012
    for mmpair in minmax_ispecs:
4013
      mmpair_out = {}
4014
      for (key, spec) in mmpair.items():
4015
        if key not in constants.ISPECS_MINMAX_KEYS:
4016
          msg = "Invalid key in bounds instance specifications: %s" % key
4017
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
4018
        mmpair_out[key] = _ParseISpec(spec, key, True)
4019
      minmax_out.append(mmpair_out)
4020
    ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
4021
  if std_ispecs is not None:
4022
    assert not group_ipolicy # This is not an option for gnt-group
4023
    ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
4024

    
4025

    
4026
def CreateIPolicyFromOpts(ispecs_mem_size=None,
4027
                          ispecs_cpu_count=None,
4028
                          ispecs_disk_count=None,
4029
                          ispecs_disk_size=None,
4030
                          ispecs_nic_count=None,
4031
                          minmax_ispecs=None,
4032
                          std_ispecs=None,
4033
                          ipolicy_disk_templates=None,
4034
                          ipolicy_vcpu_ratio=None,
4035
                          ipolicy_spindle_ratio=None,
4036
                          group_ipolicy=False,
4037
                          allowed_values=None,
4038
                          fill_all=False):
4039
  """Creation of instance policy based on command line options.
4040

4041
  @param fill_all: whether for cluster policies we should ensure that
4042
    all values are filled
4043

4044
  """
4045
  assert not (fill_all and allowed_values)
4046

    
4047
  split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4048
                 ispecs_disk_size or ispecs_nic_count)
4049
  if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4050
    raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4051
                               " together with any --ipolicy-xxx-specs option",
4052
                               errors.ECODE_INVAL)
4053

    
4054
  ipolicy_out = objects.MakeEmptyIPolicy()
4055
  if split_specs:
4056
    assert fill_all
4057
    _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4058
                             ispecs_disk_count, ispecs_disk_size,
4059
                             ispecs_nic_count, group_ipolicy, fill_all)
4060
  elif (minmax_ispecs is not None or std_ispecs is not None):
4061
    _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4062
                            group_ipolicy, allowed_values)
4063

    
4064
  if ipolicy_disk_templates is not None:
4065
    if allowed_values and ipolicy_disk_templates in allowed_values:
4066
      ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4067
    else:
4068
      ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4069
  if ipolicy_vcpu_ratio is not None:
4070
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4071
  if ipolicy_spindle_ratio is not None:
4072
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4073

    
4074
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4075

    
4076
  if not group_ipolicy and fill_all:
4077
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4078

    
4079
  return ipolicy_out
4080

    
4081

    
4082
def _SerializeGenericInfo(buf, data, level, afterkey=False):
4083
  """Formatting core of L{PrintGenericInfo}.
4084

4085
  @param buf: (string) stream to accumulate the result into
4086
  @param data: data to format
4087
  @type level: int
4088
  @param level: depth in the data hierarchy, used for indenting
4089
  @type afterkey: bool
4090
  @param afterkey: True when we are in the middle of a line after a key (used
4091
      to properly add newlines or indentation)
4092

4093
  """
4094
  baseind = "  "
4095
  if isinstance(data, dict):
4096
    if not data:
4097
      buf.write("\n")
4098
    else:
4099
      if afterkey:
4100
        buf.write("\n")
4101
        doindent = True
4102
      else:
4103
        doindent = False
4104
      for key in sorted(data):
4105
        if doindent:
4106
          buf.write(baseind * level)
4107
        else:
4108
          doindent = True
4109
        buf.write(key)
4110
        buf.write(": ")
4111
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4112
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4113
    # list of tuples (an ordered dictionary)
4114
    if afterkey:
4115
      buf.write("\n")
4116
      doindent = True
4117
    else:
4118
      doindent = False
4119
    for (key, val) in data:
4120
      if doindent:
4121
        buf.write(baseind * level)
4122
      else:
4123
        doindent = True
4124
      buf.write(key)
4125
      buf.write(": ")
4126
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4127
  elif isinstance(data, list):
4128
    if not data:
4129
      buf.write("\n")
4130
    else:
4131
      if afterkey:
4132
        buf.write("\n")
4133
        doindent = True
4134
      else:
4135
        doindent = False
4136
      for item in data:
4137
        if doindent:
4138
          buf.write(baseind * level)
4139
        else:
4140
          doindent = True
4141
        buf.write("-")
4142
        buf.write(baseind[1:])
4143
        _SerializeGenericInfo(buf, item, level + 1)
4144
  else:
4145
    # This branch should be only taken for strings, but it's practically
4146
    # impossible to guarantee that no other types are produced somewhere
4147
    buf.write(str(data))
4148
    buf.write("\n")
4149

    
4150

    
4151
def PrintGenericInfo(data):
4152
  """Print information formatted according to the hierarchy.
4153

4154
  The output is a valid YAML string.
4155

4156
  @param data: the data to print. It's a hierarchical structure whose elements
4157
      can be:
4158
        - dictionaries, where keys are strings and values are of any of the
4159
          types listed here
4160
        - lists of pairs (key, value), where key is a string and value is of
4161
          any of the types listed here; it's a way to encode ordered
4162
          dictionaries
4163
        - lists of any of the types listed here
4164
        - strings
4165

4166
  """
4167
  buf = StringIO()
4168
  _SerializeGenericInfo(buf, data, 0)
4169
  ToStdout(buf.getvalue().rstrip("\n"))