Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ ea9d3b40

History | View | Annotate | Download (133.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ADD_RESERVED_IPS_OPT",
58
  "ALLOCATABLE_OPT",
59
  "ALLOC_POLICY_OPT",
60
  "ALL_OPT",
61
  "ALLOW_FAILOVER_OPT",
62
  "AUTO_PROMOTE_OPT",
63
  "AUTO_REPLACE_OPT",
64
  "BACKEND_OPT",
65
  "BLK_OS_OPT",
66
  "CAPAB_MASTER_OPT",
67
  "CAPAB_VM_OPT",
68
  "CLEANUP_OPT",
69
  "CLUSTER_DOMAIN_SECRET_OPT",
70
  "CONFIRM_OPT",
71
  "CP_SIZE_OPT",
72
  "DEBUG_OPT",
73
  "DEBUG_SIMERR_OPT",
74
  "DISKIDX_OPT",
75
  "DISK_OPT",
76
  "DISK_PARAMS_OPT",
77
  "DISK_TEMPLATE_OPT",
78
  "DRAINED_OPT",
79
  "DRY_RUN_OPT",
80
  "DRBD_HELPER_OPT",
81
  "DST_NODE_OPT",
82
  "EARLY_RELEASE_OPT",
83
  "ENABLED_HV_OPT",
84
  "ENABLED_DISK_TEMPLATES_OPT",
85
  "ERROR_CODES_OPT",
86
  "FAILURE_ONLY_OPT",
87
  "FIELDS_OPT",
88
  "FILESTORE_DIR_OPT",
89
  "FILESTORE_DRIVER_OPT",
90
  "FORCE_FILTER_OPT",
91
  "FORCE_OPT",
92
  "FORCE_VARIANT_OPT",
93
  "GATEWAY_OPT",
94
  "GATEWAY6_OPT",
95
  "GLOBAL_FILEDIR_OPT",
96
  "HID_OS_OPT",
97
  "GLOBAL_SHARED_FILEDIR_OPT",
98
  "HVLIST_OPT",
99
  "HVOPTS_OPT",
100
  "HYPERVISOR_OPT",
101
  "IALLOCATOR_OPT",
102
  "DEFAULT_IALLOCATOR_OPT",
103
  "IDENTIFY_DEFAULTS_OPT",
104
  "IGNORE_CONSIST_OPT",
105
  "IGNORE_ERRORS_OPT",
106
  "IGNORE_FAILURES_OPT",
107
  "IGNORE_OFFLINE_OPT",
108
  "IGNORE_REMOVE_FAILURES_OPT",
109
  "IGNORE_SECONDARIES_OPT",
110
  "IGNORE_SIZE_OPT",
111
  "INCLUDEDEFAULTS_OPT",
112
  "INTERVAL_OPT",
113
  "MAC_PREFIX_OPT",
114
  "MAINTAIN_NODE_HEALTH_OPT",
115
  "MASTER_NETDEV_OPT",
116
  "MASTER_NETMASK_OPT",
117
  "MC_OPT",
118
  "MIGRATION_MODE_OPT",
119
  "NET_OPT",
120
  "NETWORK_OPT",
121
  "NETWORK6_OPT",
122
  "NEW_CLUSTER_CERT_OPT",
123
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
124
  "NEW_CONFD_HMAC_KEY_OPT",
125
  "NEW_RAPI_CERT_OPT",
126
  "NEW_PRIMARY_OPT",
127
  "NEW_SECONDARY_OPT",
128
  "NEW_SPICE_CERT_OPT",
129
  "NIC_PARAMS_OPT",
130
  "NOCONFLICTSCHECK_OPT",
131
  "NODE_FORCE_JOIN_OPT",
132
  "NODE_LIST_OPT",
133
  "NODE_PLACEMENT_OPT",
134
  "NODEGROUP_OPT",
135
  "NODE_PARAMS_OPT",
136
  "NODE_POWERED_OPT",
137
  "NODRBD_STORAGE_OPT",
138
  "NOHDR_OPT",
139
  "NOIPCHECK_OPT",
140
  "NO_INSTALL_OPT",
141
  "NONAMECHECK_OPT",
142
  "NOLVM_STORAGE_OPT",
143
  "NOMODIFY_ETCHOSTS_OPT",
144
  "NOMODIFY_SSH_SETUP_OPT",
145
  "NONICS_OPT",
146
  "NONLIVE_OPT",
147
  "NONPLUS1_OPT",
148
  "NORUNTIME_CHGS_OPT",
149
  "NOSHUTDOWN_OPT",
150
  "NOSTART_OPT",
151
  "NOSSH_KEYCHECK_OPT",
152
  "NOVOTING_OPT",
153
  "NO_REMEMBER_OPT",
154
  "NWSYNC_OPT",
155
  "OFFLINE_INST_OPT",
156
  "ONLINE_INST_OPT",
157
  "ON_PRIMARY_OPT",
158
  "ON_SECONDARY_OPT",
159
  "OFFLINE_OPT",
160
  "OSPARAMS_OPT",
161
  "OS_OPT",
162
  "OS_SIZE_OPT",
163
  "OOB_TIMEOUT_OPT",
164
  "POWER_DELAY_OPT",
165
  "PREALLOC_WIPE_DISKS_OPT",
166
  "PRIMARY_IP_VERSION_OPT",
167
  "PRIMARY_ONLY_OPT",
168
  "PRIORITY_OPT",
169
  "RAPI_CERT_OPT",
170
  "READD_OPT",
171
  "REASON_OPT",
172
  "REBOOT_TYPE_OPT",
173
  "REMOVE_INSTANCE_OPT",
174
  "REMOVE_RESERVED_IPS_OPT",
175
  "REMOVE_UIDS_OPT",
176
  "RESERVED_LVS_OPT",
177
  "RUNTIME_MEM_OPT",
178
  "ROMAN_OPT",
179
  "SECONDARY_IP_OPT",
180
  "SECONDARY_ONLY_OPT",
181
  "SELECT_OS_OPT",
182
  "SEP_OPT",
183
  "SHOWCMD_OPT",
184
  "SHOW_MACHINE_OPT",
185
  "SHUTDOWN_TIMEOUT_OPT",
186
  "SINGLE_NODE_OPT",
187
  "SPECS_CPU_COUNT_OPT",
188
  "SPECS_DISK_COUNT_OPT",
189
  "SPECS_DISK_SIZE_OPT",
190
  "SPECS_MEM_SIZE_OPT",
191
  "SPECS_NIC_COUNT_OPT",
192
  "IPOLICY_STD_SPECS_OPT",
193
  "IPOLICY_DISK_TEMPLATES",
194
  "IPOLICY_VCPU_RATIO",
195
  "SPICE_CACERT_OPT",
196
  "SPICE_CERT_OPT",
197
  "SRC_DIR_OPT",
198
  "SRC_NODE_OPT",
199
  "SUBMIT_OPT",
200
  "STARTUP_PAUSED_OPT",
201
  "STATIC_OPT",
202
  "SYNC_OPT",
203
  "TAG_ADD_OPT",
204
  "TAG_SRC_OPT",
205
  "TIMEOUT_OPT",
206
  "TO_GROUP_OPT",
207
  "UIDPOOL_OPT",
208
  "USEUNITS_OPT",
209
  "USE_EXTERNAL_MIP_SCRIPT",
210
  "USE_REPL_NET_OPT",
211
  "VERBOSE_OPT",
212
  "VG_NAME_OPT",
213
  "WFSYNC_OPT",
214
  "YES_DOIT_OPT",
215
  "DISK_STATE_OPT",
216
  "HV_STATE_OPT",
217
  "IGNORE_IPOLICY_OPT",
218
  "INSTANCE_POLICY_OPTS",
219
  # Generic functions for CLI programs
220
  "ConfirmOperation",
221
  "CreateIPolicyFromOpts",
222
  "GenericMain",
223
  "GenericInstanceCreate",
224
  "GenericList",
225
  "GenericListFields",
226
  "GetClient",
227
  "GetOnlineNodes",
228
  "JobExecutor",
229
  "JobSubmittedException",
230
  "ParseTimespec",
231
  "RunWhileClusterStopped",
232
  "SubmitOpCode",
233
  "SubmitOrSend",
234
  "UsesRPC",
235
  # Formatting functions
236
  "ToStderr", "ToStdout",
237
  "FormatError",
238
  "FormatQueryResult",
239
  "FormatParamsDictInfo",
240
  "FormatPolicyInfo",
241
  "PrintIPolicyCommand",
242
  "PrintGenericInfo",
243
  "GenerateTable",
244
  "AskUser",
245
  "FormatTimestamp",
246
  "FormatLogMessage",
247
  # Tags functions
248
  "ListTags",
249
  "AddTags",
250
  "RemoveTags",
251
  # command line options support infrastructure
252
  "ARGS_MANY_INSTANCES",
253
  "ARGS_MANY_NODES",
254
  "ARGS_MANY_GROUPS",
255
  "ARGS_MANY_NETWORKS",
256
  "ARGS_NONE",
257
  "ARGS_ONE_INSTANCE",
258
  "ARGS_ONE_NODE",
259
  "ARGS_ONE_GROUP",
260
  "ARGS_ONE_OS",
261
  "ARGS_ONE_NETWORK",
262
  "ArgChoice",
263
  "ArgCommand",
264
  "ArgFile",
265
  "ArgGroup",
266
  "ArgHost",
267
  "ArgInstance",
268
  "ArgJobId",
269
  "ArgNetwork",
270
  "ArgNode",
271
  "ArgOs",
272
  "ArgExtStorage",
273
  "ArgSuggest",
274
  "ArgUnknown",
275
  "OPT_COMPL_INST_ADD_NODES",
276
  "OPT_COMPL_MANY_NODES",
277
  "OPT_COMPL_ONE_IALLOCATOR",
278
  "OPT_COMPL_ONE_INSTANCE",
279
  "OPT_COMPL_ONE_NODE",
280
  "OPT_COMPL_ONE_NODEGROUP",
281
  "OPT_COMPL_ONE_NETWORK",
282
  "OPT_COMPL_ONE_OS",
283
  "OPT_COMPL_ONE_EXTSTORAGE",
284
  "cli_option",
285
  "SplitNodeOption",
286
  "CalculateOSNames",
287
  "ParseFields",
288
  "COMMON_CREATE_OPTS",
289
  ]
290

    
291
NO_PREFIX = "no_"
292
UN_PREFIX = "-"
293

    
294
#: Priorities (sorted)
295
_PRIORITY_NAMES = [
296
  ("low", constants.OP_PRIO_LOW),
297
  ("normal", constants.OP_PRIO_NORMAL),
298
  ("high", constants.OP_PRIO_HIGH),
299
  ]
300

    
301
#: Priority dictionary for easier lookup
302
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
303
# we migrate to Python 2.6
304
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
305

    
306
# Query result status for clients
307
(QR_NORMAL,
308
 QR_UNKNOWN,
309
 QR_INCOMPLETE) = range(3)
310

    
311
#: Maximum batch size for ChooseJob
312
_CHOOSE_BATCH = 25
313

    
314

    
315
# constants used to create InstancePolicy dictionary
316
TISPECS_GROUP_TYPES = {
317
  constants.ISPECS_MIN: constants.VTYPE_INT,
318
  constants.ISPECS_MAX: constants.VTYPE_INT,
319
  }
320

    
321
TISPECS_CLUSTER_TYPES = {
322
  constants.ISPECS_MIN: constants.VTYPE_INT,
323
  constants.ISPECS_MAX: constants.VTYPE_INT,
324
  constants.ISPECS_STD: constants.VTYPE_INT,
325
  }
326

    
327
#: User-friendly names for query2 field types
328
_QFT_NAMES = {
329
  constants.QFT_UNKNOWN: "Unknown",
330
  constants.QFT_TEXT: "Text",
331
  constants.QFT_BOOL: "Boolean",
332
  constants.QFT_NUMBER: "Number",
333
  constants.QFT_UNIT: "Storage size",
334
  constants.QFT_TIMESTAMP: "Timestamp",
335
  constants.QFT_OTHER: "Custom",
336
  }
337

    
338

    
339
class _Argument:
340
  def __init__(self, min=0, max=None): # pylint: disable=W0622
341
    self.min = min
342
    self.max = max
343

    
344
  def __repr__(self):
345
    return ("<%s min=%s max=%s>" %
346
            (self.__class__.__name__, self.min, self.max))
347

    
348

    
349
class ArgSuggest(_Argument):
350
  """Suggesting argument.
351

352
  Value can be any of the ones passed to the constructor.
353

354
  """
355
  # pylint: disable=W0622
356
  def __init__(self, min=0, max=None, choices=None):
357
    _Argument.__init__(self, min=min, max=max)
358
    self.choices = choices
359

    
360
  def __repr__(self):
361
    return ("<%s min=%s max=%s choices=%r>" %
362
            (self.__class__.__name__, self.min, self.max, self.choices))
363

    
364

    
365
class ArgChoice(ArgSuggest):
366
  """Choice argument.
367

368
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
369
  but value must be one of the choices.
370

371
  """
372

    
373

    
374
class ArgUnknown(_Argument):
375
  """Unknown argument to program (e.g. determined at runtime).
376

377
  """
378

    
379

    
380
class ArgInstance(_Argument):
381
  """Instances argument.
382

383
  """
384

    
385

    
386
class ArgNode(_Argument):
387
  """Node argument.
388

389
  """
390

    
391

    
392
class ArgNetwork(_Argument):
393
  """Network argument.
394

395
  """
396

    
397

    
398
class ArgGroup(_Argument):
399
  """Node group argument.
400

401
  """
402

    
403

    
404
class ArgJobId(_Argument):
405
  """Job ID argument.
406

407
  """
408

    
409

    
410
class ArgFile(_Argument):
411
  """File path argument.
412

413
  """
414

    
415

    
416
class ArgCommand(_Argument):
417
  """Command argument.
418

419
  """
420

    
421

    
422
class ArgHost(_Argument):
423
  """Host argument.
424

425
  """
426

    
427

    
428
class ArgOs(_Argument):
429
  """OS argument.
430

431
  """
432

    
433

    
434
class ArgExtStorage(_Argument):
435
  """ExtStorage argument.
436

437
  """
438

    
439

    
440
ARGS_NONE = []
441
ARGS_MANY_INSTANCES = [ArgInstance()]
442
ARGS_MANY_NETWORKS = [ArgNetwork()]
443
ARGS_MANY_NODES = [ArgNode()]
444
ARGS_MANY_GROUPS = [ArgGroup()]
445
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
446
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
447
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
448
# TODO
449
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
450
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
451

    
452

    
453
def _ExtractTagsObject(opts, args):
454
  """Extract the tag type object.
455

456
  Note that this function will modify its args parameter.
457

458
  """
459
  if not hasattr(opts, "tag_type"):
460
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
461
  kind = opts.tag_type
462
  if kind == constants.TAG_CLUSTER:
463
    retval = kind, None
464
  elif kind in (constants.TAG_NODEGROUP,
465
                constants.TAG_NODE,
466
                constants.TAG_NETWORK,
467
                constants.TAG_INSTANCE):
468
    if not args:
469
      raise errors.OpPrereqError("no arguments passed to the command",
470
                                 errors.ECODE_INVAL)
471
    name = args.pop(0)
472
    retval = kind, name
473
  else:
474
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
475
  return retval
476

    
477

    
478
def _ExtendTags(opts, args):
479
  """Extend the args if a source file has been given.
480

481
  This function will extend the tags with the contents of the file
482
  passed in the 'tags_source' attribute of the opts parameter. A file
483
  named '-' will be replaced by stdin.
484

485
  """
486
  fname = opts.tags_source
487
  if fname is None:
488
    return
489
  if fname == "-":
490
    new_fh = sys.stdin
491
  else:
492
    new_fh = open(fname, "r")
493
  new_data = []
494
  try:
495
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
496
    # because of python bug 1633941
497
    while True:
498
      line = new_fh.readline()
499
      if not line:
500
        break
501
      new_data.append(line.strip())
502
  finally:
503
    new_fh.close()
504
  args.extend(new_data)
505

    
506

    
507
def ListTags(opts, args):
508
  """List the tags on a given object.
509

510
  This is a generic implementation that knows how to deal with all
511
  three cases of tag objects (cluster, node, instance). The opts
512
  argument is expected to contain a tag_type field denoting what
513
  object type we work on.
514

515
  """
516
  kind, name = _ExtractTagsObject(opts, args)
517
  cl = GetClient(query=True)
518
  result = cl.QueryTags(kind, name)
519
  result = list(result)
520
  result.sort()
521
  for tag in result:
522
    ToStdout(tag)
523

    
524

    
525
def AddTags(opts, args):
526
  """Add tags on a given object.
527

528
  This is a generic implementation that knows how to deal with all
529
  three cases of tag objects (cluster, node, instance). The opts
530
  argument is expected to contain a tag_type field denoting what
531
  object type we work on.
532

533
  """
534
  kind, name = _ExtractTagsObject(opts, args)
535
  _ExtendTags(opts, args)
536
  if not args:
537
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
538
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
539
  SubmitOrSend(op, opts)
540

    
541

    
542
def RemoveTags(opts, args):
543
  """Remove tags from a given object.
544

545
  This is a generic implementation that knows how to deal with all
546
  three cases of tag objects (cluster, node, instance). The opts
547
  argument is expected to contain a tag_type field denoting what
548
  object type we work on.
549

550
  """
551
  kind, name = _ExtractTagsObject(opts, args)
552
  _ExtendTags(opts, args)
553
  if not args:
554
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
555
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
556
  SubmitOrSend(op, opts)
557

    
558

    
559
def check_unit(option, opt, value): # pylint: disable=W0613
560
  """OptParsers custom converter for units.
561

562
  """
563
  try:
564
    return utils.ParseUnit(value)
565
  except errors.UnitParseError, err:
566
    raise OptionValueError("option %s: %s" % (opt, err))
567

    
568

    
569
def _SplitKeyVal(opt, data, parse_prefixes):
570
  """Convert a KeyVal string into a dict.
571

572
  This function will convert a key=val[,...] string into a dict. Empty
573
  values will be converted specially: keys which have the prefix 'no_'
574
  will have the value=False and the prefix stripped, keys with the prefix
575
  "-" will have value=None and the prefix stripped, and the others will
576
  have value=True.
577

578
  @type opt: string
579
  @param opt: a string holding the option name for which we process the
580
      data, used in building error messages
581
  @type data: string
582
  @param data: a string of the format key=val,key=val,...
583
  @type parse_prefixes: bool
584
  @param parse_prefixes: whether to handle prefixes specially
585
  @rtype: dict
586
  @return: {key=val, key=val}
587
  @raises errors.ParameterError: if there are duplicate keys
588

589
  """
590
  kv_dict = {}
591
  if data:
592
    for elem in utils.UnescapeAndSplit(data, sep=","):
593
      if "=" in elem:
594
        key, val = elem.split("=", 1)
595
      elif parse_prefixes:
596
        if elem.startswith(NO_PREFIX):
597
          key, val = elem[len(NO_PREFIX):], False
598
        elif elem.startswith(UN_PREFIX):
599
          key, val = elem[len(UN_PREFIX):], None
600
        else:
601
          key, val = elem, True
602
      else:
603
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
604
                                    (elem, opt))
605
      if key in kv_dict:
606
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
607
                                    (key, opt))
608
      kv_dict[key] = val
609
  return kv_dict
610

    
611

    
612
def _SplitIdentKeyVal(opt, value, parse_prefixes):
613
  """Helper function to parse "ident:key=val,key=val" options.
614

615
  @type opt: string
616
  @param opt: option name, used in error messages
617
  @type value: string
618
  @param value: expected to be in the format "ident:key=val,key=val,..."
619
  @type parse_prefixes: bool
620
  @param parse_prefixes: whether to handle prefixes specially (see
621
      L{_SplitKeyVal})
622
  @rtype: tuple
623
  @return: (ident, {key=val, key=val})
624
  @raises errors.ParameterError: in case of duplicates or other parsing errors
625

626
  """
627
  if ":" not in value:
628
    ident, rest = value, ""
629
  else:
630
    ident, rest = value.split(":", 1)
631

    
632
  if parse_prefixes and ident.startswith(NO_PREFIX):
633
    if rest:
634
      msg = "Cannot pass options when removing parameter groups: %s" % value
635
      raise errors.ParameterError(msg)
636
    retval = (ident[len(NO_PREFIX):], False)
637
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
638
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
639
    if rest:
640
      msg = "Cannot pass options when removing parameter groups: %s" % value
641
      raise errors.ParameterError(msg)
642
    retval = (ident[len(UN_PREFIX):], None)
643
  else:
644
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
645
    retval = (ident, kv_dict)
646
  return retval
647

    
648

    
649
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
650
  """Custom parser for ident:key=val,key=val options.
651

652
  This will store the parsed values as a tuple (ident, {key: val}). As such,
653
  multiple uses of this option via action=append is possible.
654

655
  """
656
  return _SplitIdentKeyVal(opt, value, True)
657

    
658

    
659
def check_key_val(option, opt, value):  # pylint: disable=W0613
660
  """Custom parser class for key=val,key=val options.
661

662
  This will store the parsed values as a dict {key: val}.
663

664
  """
665
  return _SplitKeyVal(opt, value, True)
666

    
667

    
668
def _SplitListKeyVal(opt, value):
669
  retval = {}
670
  for elem in value.split("/"):
671
    if not elem:
672
      raise errors.ParameterError("Empty section in option '%s'" % opt)
673
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
674
    if ident in retval:
675
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
676
             (ident, opt, elem))
677
      raise errors.ParameterError(msg)
678
    retval[ident] = valdict
679
  return retval
680

    
681

    
682
def check_list_ident_key_val(_, opt, value):
683
  """Custom parser for "ident:key=val,key=val/ident:key=val" options.
684

685
  @rtype: list of dictionary
686
  @return: {ident: {key: val, key: val}, ident: {key: val}}
687

688
  """
689
  return _SplitListKeyVal(opt, value)
690

    
691

    
692
def check_bool(option, opt, value): # pylint: disable=W0613
693
  """Custom parser for yes/no options.
694

695
  This will store the parsed value as either True or False.
696

697
  """
698
  value = value.lower()
699
  if value == constants.VALUE_FALSE or value == "no":
700
    return False
701
  elif value == constants.VALUE_TRUE or value == "yes":
702
    return True
703
  else:
704
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
705

    
706

    
707
def check_list(option, opt, value): # pylint: disable=W0613
708
  """Custom parser for comma-separated lists.
709

710
  """
711
  # we have to make this explicit check since "".split(",") is [""],
712
  # not an empty list :(
713
  if not value:
714
    return []
715
  else:
716
    return utils.UnescapeAndSplit(value)
717

    
718

    
719
def check_maybefloat(option, opt, value): # pylint: disable=W0613
720
  """Custom parser for float numbers which might be also defaults.
721

722
  """
723
  value = value.lower()
724

    
725
  if value == constants.VALUE_DEFAULT:
726
    return value
727
  else:
728
    return float(value)
729

    
730

    
731
# completion_suggestion is normally a list. Using numeric values not evaluating
732
# to False for dynamic completion.
733
(OPT_COMPL_MANY_NODES,
734
 OPT_COMPL_ONE_NODE,
735
 OPT_COMPL_ONE_INSTANCE,
736
 OPT_COMPL_ONE_OS,
737
 OPT_COMPL_ONE_EXTSTORAGE,
738
 OPT_COMPL_ONE_IALLOCATOR,
739
 OPT_COMPL_ONE_NETWORK,
740
 OPT_COMPL_INST_ADD_NODES,
741
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
742

    
743
OPT_COMPL_ALL = compat.UniqueFrozenset([
744
  OPT_COMPL_MANY_NODES,
745
  OPT_COMPL_ONE_NODE,
746
  OPT_COMPL_ONE_INSTANCE,
747
  OPT_COMPL_ONE_OS,
748
  OPT_COMPL_ONE_EXTSTORAGE,
749
  OPT_COMPL_ONE_IALLOCATOR,
750
  OPT_COMPL_ONE_NETWORK,
751
  OPT_COMPL_INST_ADD_NODES,
752
  OPT_COMPL_ONE_NODEGROUP,
753
  ])
754

    
755

    
756
class CliOption(Option):
757
  """Custom option class for optparse.
758

759
  """
760
  ATTRS = Option.ATTRS + [
761
    "completion_suggest",
762
    ]
763
  TYPES = Option.TYPES + (
764
    "listidentkeyval",
765
    "identkeyval",
766
    "keyval",
767
    "unit",
768
    "bool",
769
    "list",
770
    "maybefloat",
771
    )
772
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
773
  TYPE_CHECKER["listidentkeyval"] = check_list_ident_key_val
774
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
775
  TYPE_CHECKER["keyval"] = check_key_val
776
  TYPE_CHECKER["unit"] = check_unit
777
  TYPE_CHECKER["bool"] = check_bool
778
  TYPE_CHECKER["list"] = check_list
779
  TYPE_CHECKER["maybefloat"] = check_maybefloat
780

    
781

    
782
# optparse.py sets make_option, so we do it for our own option class, too
783
cli_option = CliOption
784

    
785

    
786
_YORNO = "yes|no"
787

    
788
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
789
                       help="Increase debugging level")
790

    
791
NOHDR_OPT = cli_option("--no-headers", default=False,
792
                       action="store_true", dest="no_headers",
793
                       help="Don't display column headers")
794

    
795
SEP_OPT = cli_option("--separator", default=None,
796
                     action="store", dest="separator",
797
                     help=("Separator between output fields"
798
                           " (defaults to one space)"))
799

    
800
USEUNITS_OPT = cli_option("--units", default=None,
801
                          dest="units", choices=("h", "m", "g", "t"),
802
                          help="Specify units for output (one of h/m/g/t)")
803

    
804
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
805
                        type="string", metavar="FIELDS",
806
                        help="Comma separated list of output fields")
807

    
808
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
809
                       default=False, help="Force the operation")
810

    
811
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
812
                         default=False, help="Do not require confirmation")
813

    
814
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
815
                                  action="store_true", default=False,
816
                                  help=("Ignore offline nodes and do as much"
817
                                        " as possible"))
818

    
819
TAG_ADD_OPT = cli_option("--tags", dest="tags",
820
                         default=None, help="Comma-separated list of instance"
821
                                            " tags")
822

    
823
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
824
                         default=None, help="File with tag names")
825

    
826
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
827
                        default=False, action="store_true",
828
                        help=("Submit the job and return the job ID, but"
829
                              " don't wait for the job to finish"))
830

    
831
SYNC_OPT = cli_option("--sync", dest="do_locking",
832
                      default=False, action="store_true",
833
                      help=("Grab locks while doing the queries"
834
                            " in order to ensure more consistent results"))
835

    
836
DRY_RUN_OPT = cli_option("--dry-run", default=False,
837
                         action="store_true",
838
                         help=("Do not execute the operation, just run the"
839
                               " check steps and verify if it could be"
840
                               " executed"))
841

    
842
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
843
                         action="store_true",
844
                         help="Increase the verbosity of the operation")
845

    
846
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
847
                              action="store_true", dest="simulate_errors",
848
                              help="Debugging option that makes the operation"
849
                              " treat most runtime checks as failed")
850

    
851
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
852
                        default=True, action="store_false",
853
                        help="Don't wait for sync (DANGEROUS!)")
854

    
855
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
856
                        default=False, action="store_true",
857
                        help="Wait for disks to sync")
858

    
859
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
860
                             action="store_true", default=False,
861
                             help="Enable offline instance")
862

    
863
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
864
                              action="store_true", default=False,
865
                              help="Disable down instance")
866

    
867
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
868
                               help=("Custom disk setup (%s)" %
869
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
870
                               default=None, metavar="TEMPL",
871
                               choices=list(constants.DISK_TEMPLATES))
872

    
873
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
874
                        help="Do not create any network cards for"
875
                        " the instance")
876

    
877
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
878
                               help="Relative path under default cluster-wide"
879
                               " file storage dir to store file-based disks",
880
                               default=None, metavar="<DIR>")
881

    
882
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
883
                                  help="Driver to use for image files",
884
                                  default="loop", metavar="<DRIVER>",
885
                                  choices=list(constants.FILE_DRIVER))
886

    
887
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
888
                            help="Select nodes for the instance automatically"
889
                            " using the <NAME> iallocator plugin",
890
                            default=None, type="string",
891
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
892

    
893
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
894
                                    metavar="<NAME>",
895
                                    help="Set the default instance"
896
                                    " allocator plugin",
897
                                    default=None, type="string",
898
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
899

    
900
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
901
                    metavar="<os>",
902
                    completion_suggest=OPT_COMPL_ONE_OS)
903

    
904
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
905
                          type="keyval", default={},
906
                          help="OS parameters")
907

    
908
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
909
                               action="store_true", default=False,
910
                               help="Force an unknown variant")
911

    
912
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
913
                            action="store_true", default=False,
914
                            help="Do not install the OS (will"
915
                            " enable no-start)")
916

    
917
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
918
                                dest="allow_runtime_chgs",
919
                                default=True, action="store_false",
920
                                help="Don't allow runtime changes")
921

    
922
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
923
                         type="keyval", default={},
924
                         help="Backend parameters")
925

    
926
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
927
                        default={}, dest="hvparams",
928
                        help="Hypervisor parameters")
929

    
930
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
931
                             help="Disk template parameters, in the format"
932
                             " template:option=value,option=value,...",
933
                             type="identkeyval", action="append", default=[])
934

    
935
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
936
                                 type="keyval", default={},
937
                                 help="Memory size specs: list of key=value,"
938
                                " where key is one of min, max, std"
939
                                 " (in MB or using a unit)")
940

    
941
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
942
                                 type="keyval", default={},
943
                                 help="CPU count specs: list of key=value,"
944
                                 " where key is one of min, max, std")
945

    
946
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
947
                                  dest="ispecs_disk_count",
948
                                  type="keyval", default={},
949
                                  help="Disk count specs: list of key=value,"
950
                                  " where key is one of min, max, std")
951

    
952
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
953
                                 type="keyval", default={},
954
                                 help="Disk size specs: list of key=value,"
955
                                 " where key is one of min, max, std"
956
                                 " (in MB or using a unit)")
957

    
958
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
959
                                 type="keyval", default={},
960
                                 help="NIC count specs: list of key=value,"
961
                                 " where key is one of min, max, std")
962

    
963
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
964
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
965
                                      dest="ipolicy_bounds_specs",
966
                                      type="listidentkeyval", default=None,
967
                                      help="Complete instance specs limits")
968

    
969
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
970
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
971
                                   dest="ipolicy_std_specs",
972
                                   type="keyval", default=None,
973
                                   help="Complte standard instance specs")
974

    
975
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
976
                                    dest="ipolicy_disk_templates",
977
                                    type="list", default=None,
978
                                    help="Comma-separated list of"
979
                                    " enabled disk templates")
980

    
981
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
982
                                 dest="ipolicy_vcpu_ratio",
983
                                 type="maybefloat", default=None,
984
                                 help="The maximum allowed vcpu-to-cpu ratio")
985

    
986
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
987
                                   dest="ipolicy_spindle_ratio",
988
                                   type="maybefloat", default=None,
989
                                   help=("The maximum allowed instances to"
990
                                         " spindle ratio"))
991

    
992
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
993
                            help="Hypervisor and hypervisor options, in the"
994
                            " format hypervisor:option=value,option=value,...",
995
                            default=None, type="identkeyval")
996

    
997
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
998
                        help="Hypervisor and hypervisor options, in the"
999
                        " format hypervisor:option=value,option=value,...",
1000
                        default=[], action="append", type="identkeyval")
1001

    
1002
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1003
                           action="store_false",
1004
                           help="Don't check that the instance's IP"
1005
                           " is alive")
1006

    
1007
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1008
                             default=True, action="store_false",
1009
                             help="Don't check that the instance's name"
1010
                             " is resolvable")
1011

    
1012
NET_OPT = cli_option("--net",
1013
                     help="NIC parameters", default=[],
1014
                     dest="nics", action="append", type="identkeyval")
1015

    
1016
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1017
                      dest="disks", action="append", type="identkeyval")
1018

    
1019
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1020
                         help="Comma-separated list of disks"
1021
                         " indices to act on (e.g. 0,2) (optional,"
1022
                         " defaults to all disks)")
1023

    
1024
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1025
                         help="Enforces a single-disk configuration using the"
1026
                         " given disk size, in MiB unless a suffix is used",
1027
                         default=None, type="unit", metavar="<size>")
1028

    
1029
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1030
                                dest="ignore_consistency",
1031
                                action="store_true", default=False,
1032
                                help="Ignore the consistency of the disks on"
1033
                                " the secondary")
1034

    
1035
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1036
                                dest="allow_failover",
1037
                                action="store_true", default=False,
1038
                                help="If migration is not possible fallback to"
1039
                                     " failover")
1040

    
1041
NONLIVE_OPT = cli_option("--non-live", dest="live",
1042
                         default=True, action="store_false",
1043
                         help="Do a non-live migration (this usually means"
1044
                         " freeze the instance, save the state, transfer and"
1045
                         " only then resume running on the secondary node)")
1046

    
1047
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1048
                                default=None,
1049
                                choices=list(constants.HT_MIGRATION_MODES),
1050
                                help="Override default migration mode (choose"
1051
                                " either live or non-live")
1052

    
1053
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1054
                                help="Target node and optional secondary node",
1055
                                metavar="<pnode>[:<snode>]",
1056
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1057

    
1058
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1059
                           action="append", metavar="<node>",
1060
                           help="Use only this node (can be used multiple"
1061
                           " times, if not given defaults to all nodes)",
1062
                           completion_suggest=OPT_COMPL_ONE_NODE)
1063

    
1064
NODEGROUP_OPT_NAME = "--node-group"
1065
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1066
                           dest="nodegroup",
1067
                           help="Node group (name or uuid)",
1068
                           metavar="<nodegroup>",
1069
                           default=None, type="string",
1070
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1071

    
1072
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1073
                             metavar="<node>",
1074
                             completion_suggest=OPT_COMPL_ONE_NODE)
1075

    
1076
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1077
                         action="store_false",
1078
                         help="Don't start the instance after creation")
1079

    
1080
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1081
                         action="store_true", default=False,
1082
                         help="Show command instead of executing it")
1083

    
1084
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1085
                         default=False, action="store_true",
1086
                         help="Instead of performing the migration, try to"
1087
                         " recover from a failed cleanup. This is safe"
1088
                         " to run even if the instance is healthy, but it"
1089
                         " will create extra replication traffic and "
1090
                         " disrupt briefly the replication (like during the"
1091
                         " migration")
1092

    
1093
STATIC_OPT = cli_option("-s", "--static", dest="static",
1094
                        action="store_true", default=False,
1095
                        help="Only show configuration data, not runtime data")
1096

    
1097
ALL_OPT = cli_option("--all", dest="show_all",
1098
                     default=False, action="store_true",
1099
                     help="Show info on all instances on the cluster."
1100
                     " This can take a long time to run, use wisely")
1101

    
1102
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1103
                           action="store_true", default=False,
1104
                           help="Interactive OS reinstall, lists available"
1105
                           " OS templates for selection")
1106

    
1107
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1108
                                 action="store_true", default=False,
1109
                                 help="Remove the instance from the cluster"
1110
                                 " configuration even if there are failures"
1111
                                 " during the removal process")
1112

    
1113
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1114
                                        dest="ignore_remove_failures",
1115
                                        action="store_true", default=False,
1116
                                        help="Remove the instance from the"
1117
                                        " cluster configuration even if there"
1118
                                        " are failures during the removal"
1119
                                        " process")
1120

    
1121
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1122
                                 action="store_true", default=False,
1123
                                 help="Remove the instance from the cluster")
1124

    
1125
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1126
                               help="Specifies the new node for the instance",
1127
                               metavar="NODE", default=None,
1128
                               completion_suggest=OPT_COMPL_ONE_NODE)
1129

    
1130
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1131
                               help="Specifies the new secondary node",
1132
                               metavar="NODE", default=None,
1133
                               completion_suggest=OPT_COMPL_ONE_NODE)
1134

    
1135
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1136
                             help="Specifies the new primary node",
1137
                             metavar="<node>", default=None,
1138
                             completion_suggest=OPT_COMPL_ONE_NODE)
1139

    
1140
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1141
                            default=False, action="store_true",
1142
                            help="Replace the disk(s) on the primary"
1143
                                 " node (applies only to internally mirrored"
1144
                                 " disk templates, e.g. %s)" %
1145
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1146

    
1147
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1148
                              default=False, action="store_true",
1149
                              help="Replace the disk(s) on the secondary"
1150
                                   " node (applies only to internally mirrored"
1151
                                   " disk templates, e.g. %s)" %
1152
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1153

    
1154
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1155
                              default=False, action="store_true",
1156
                              help="Lock all nodes and auto-promote as needed"
1157
                              " to MC status")
1158

    
1159
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1160
                              default=False, action="store_true",
1161
                              help="Automatically replace faulty disks"
1162
                                   " (applies only to internally mirrored"
1163
                                   " disk templates, e.g. %s)" %
1164
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1165

    
1166
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1167
                             default=False, action="store_true",
1168
                             help="Ignore current recorded size"
1169
                             " (useful for forcing activation when"
1170
                             " the recorded size is wrong)")
1171

    
1172
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1173
                          metavar="<node>",
1174
                          completion_suggest=OPT_COMPL_ONE_NODE)
1175

    
1176
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1177
                         metavar="<dir>")
1178

    
1179
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1180
                              help="Specify the secondary ip for the node",
1181
                              metavar="ADDRESS", default=None)
1182

    
1183
READD_OPT = cli_option("--readd", dest="readd",
1184
                       default=False, action="store_true",
1185
                       help="Readd old node after replacing it")
1186

    
1187
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1188
                                default=True, action="store_false",
1189
                                help="Disable SSH key fingerprint checking")
1190

    
1191
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1192
                                 default=False, action="store_true",
1193
                                 help="Force the joining of a node")
1194

    
1195
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1196
                    type="bool", default=None, metavar=_YORNO,
1197
                    help="Set the master_candidate flag on the node")
1198

    
1199
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1200
                         type="bool", default=None,
1201
                         help=("Set the offline flag on the node"
1202
                               " (cluster does not communicate with offline"
1203
                               " nodes)"))
1204

    
1205
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1206
                         type="bool", default=None,
1207
                         help=("Set the drained flag on the node"
1208
                               " (excluded from allocation operations)"))
1209

    
1210
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1211
                              type="bool", default=None, metavar=_YORNO,
1212
                              help="Set the master_capable flag on the node")
1213

    
1214
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1215
                          type="bool", default=None, metavar=_YORNO,
1216
                          help="Set the vm_capable flag on the node")
1217

    
1218
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1219
                             type="bool", default=None, metavar=_YORNO,
1220
                             help="Set the allocatable flag on a volume")
1221

    
1222
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1223
                               help="Disable support for lvm based instances"
1224
                               " (cluster-wide)",
1225
                               action="store_false", default=True)
1226

    
1227
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1228
                            dest="enabled_hypervisors",
1229
                            help="Comma-separated list of hypervisors",
1230
                            type="string", default=None)
1231

    
1232
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1233
                                        dest="enabled_disk_templates",
1234
                                        help="Comma-separated list of "
1235
                                             "disk templates",
1236
                                        type="string", default=None)
1237

    
1238
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1239
                            type="keyval", default={},
1240
                            help="NIC parameters")
1241

    
1242
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1243
                         dest="candidate_pool_size", type="int",
1244
                         help="Set the candidate pool size")
1245

    
1246
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1247
                         help=("Enables LVM and specifies the volume group"
1248
                               " name (cluster-wide) for disk allocation"
1249
                               " [%s]" % constants.DEFAULT_VG),
1250
                         metavar="VG", default=None)
1251

    
1252
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1253
                          help="Destroy cluster", action="store_true")
1254

    
1255
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1256
                          help="Skip node agreement check (dangerous)",
1257
                          action="store_true", default=False)
1258

    
1259
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1260
                            help="Specify the mac prefix for the instance IP"
1261
                            " addresses, in the format XX:XX:XX",
1262
                            metavar="PREFIX",
1263
                            default=None)
1264

    
1265
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1266
                               help="Specify the node interface (cluster-wide)"
1267
                               " on which the master IP address will be added"
1268
                               " (cluster init default: %s)" %
1269
                               constants.DEFAULT_BRIDGE,
1270
                               metavar="NETDEV",
1271
                               default=None)
1272

    
1273
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1274
                                help="Specify the netmask of the master IP",
1275
                                metavar="NETMASK",
1276
                                default=None)
1277

    
1278
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1279
                                     dest="use_external_mip_script",
1280
                                     help="Specify whether to run a"
1281
                                     " user-provided script for the master"
1282
                                     " IP address turnup and"
1283
                                     " turndown operations",
1284
                                     type="bool", metavar=_YORNO, default=None)
1285

    
1286
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1287
                                help="Specify the default directory (cluster-"
1288
                                "wide) for storing the file-based disks [%s]" %
1289
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1290
                                metavar="DIR",
1291
                                default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1292

    
1293
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1294
  "--shared-file-storage-dir",
1295
  dest="shared_file_storage_dir",
1296
  help="Specify the default directory (cluster-wide) for storing the"
1297
  " shared file-based disks [%s]" %
1298
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1299
  metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1300

    
1301
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1302
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1303
                                   action="store_false", default=True)
1304

    
1305
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1306
                                    help="Don't initialize SSH keys",
1307
                                    action="store_false", default=True)
1308

    
1309
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1310
                             help="Enable parseable error messages",
1311
                             action="store_true", default=False)
1312

    
1313
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1314
                          help="Skip N+1 memory redundancy tests",
1315
                          action="store_true", default=False)
1316

    
1317
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1318
                             help="Type of reboot: soft/hard/full",
1319
                             default=constants.INSTANCE_REBOOT_HARD,
1320
                             metavar="<REBOOT>",
1321
                             choices=list(constants.REBOOT_TYPES))
1322

    
1323
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1324
                                    dest="ignore_secondaries",
1325
                                    default=False, action="store_true",
1326
                                    help="Ignore errors from secondaries")
1327

    
1328
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1329
                            action="store_false", default=True,
1330
                            help="Don't shutdown the instance (unsafe)")
1331

    
1332
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1333
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1334
                         help="Maximum time to wait")
1335

    
1336
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1337
                                  dest="shutdown_timeout", type="int",
1338
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1339
                                  help="Maximum time to wait for instance"
1340
                                  " shutdown")
1341

    
1342
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1343
                          default=None,
1344
                          help=("Number of seconds between repetions of the"
1345
                                " command"))
1346

    
1347
EARLY_RELEASE_OPT = cli_option("--early-release",
1348
                               dest="early_release", default=False,
1349
                               action="store_true",
1350
                               help="Release the locks on the secondary"
1351
                               " node(s) early")
1352

    
1353
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1354
                                  dest="new_cluster_cert",
1355
                                  default=False, action="store_true",
1356
                                  help="Generate a new cluster certificate")
1357

    
1358
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1359
                           default=None,
1360
                           help="File containing new RAPI certificate")
1361

    
1362
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1363
                               default=None, action="store_true",
1364
                               help=("Generate a new self-signed RAPI"
1365
                                     " certificate"))
1366

    
1367
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1368
                            default=None,
1369
                            help="File containing new SPICE certificate")
1370

    
1371
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1372
                              default=None,
1373
                              help="File containing the certificate of the CA"
1374
                              " which signed the SPICE certificate")
1375

    
1376
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1377
                                dest="new_spice_cert", default=None,
1378
                                action="store_true",
1379
                                help=("Generate a new self-signed SPICE"
1380
                                      " certificate"))
1381

    
1382
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1383
                                    dest="new_confd_hmac_key",
1384
                                    default=False, action="store_true",
1385
                                    help=("Create a new HMAC key for %s" %
1386
                                          constants.CONFD))
1387

    
1388
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1389
                                       dest="cluster_domain_secret",
1390
                                       default=None,
1391
                                       help=("Load new new cluster domain"
1392
                                             " secret from file"))
1393

    
1394
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1395
                                           dest="new_cluster_domain_secret",
1396
                                           default=False, action="store_true",
1397
                                           help=("Create a new cluster domain"
1398
                                                 " secret"))
1399

    
1400
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1401
                              dest="use_replication_network",
1402
                              help="Whether to use the replication network"
1403
                              " for talking to the nodes",
1404
                              action="store_true", default=False)
1405

    
1406
MAINTAIN_NODE_HEALTH_OPT = \
1407
    cli_option("--maintain-node-health", dest="maintain_node_health",
1408
               metavar=_YORNO, default=None, type="bool",
1409
               help="Configure the cluster to automatically maintain node"
1410
               " health, by shutting down unknown instances, shutting down"
1411
               " unknown DRBD devices, etc.")
1412

    
1413
IDENTIFY_DEFAULTS_OPT = \
1414
    cli_option("--identify-defaults", dest="identify_defaults",
1415
               default=False, action="store_true",
1416
               help="Identify which saved instance parameters are equal to"
1417
               " the current cluster defaults and set them as such, instead"
1418
               " of marking them as overridden")
1419

    
1420
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1421
                         action="store", dest="uid_pool",
1422
                         help=("A list of user-ids or user-id"
1423
                               " ranges separated by commas"))
1424

    
1425
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1426
                          action="store", dest="add_uids",
1427
                          help=("A list of user-ids or user-id"
1428
                                " ranges separated by commas, to be"
1429
                                " added to the user-id pool"))
1430

    
1431
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1432
                             action="store", dest="remove_uids",
1433
                             help=("A list of user-ids or user-id"
1434
                                   " ranges separated by commas, to be"
1435
                                   " removed from the user-id pool"))
1436

    
1437
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1438
                              action="store", dest="reserved_lvs",
1439
                              help=("A comma-separated list of reserved"
1440
                                    " logical volumes names, that will be"
1441
                                    " ignored by cluster verify"))
1442

    
1443
ROMAN_OPT = cli_option("--roman",
1444
                       dest="roman_integers", default=False,
1445
                       action="store_true",
1446
                       help="Use roman numbers for positive integers")
1447

    
1448
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1449
                             action="store", default=None,
1450
                             help="Specifies usermode helper for DRBD")
1451

    
1452
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1453
                                action="store_false", default=True,
1454
                                help="Disable support for DRBD")
1455

    
1456
PRIMARY_IP_VERSION_OPT = \
1457
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1458
               action="store", dest="primary_ip_version",
1459
               metavar="%d|%d" % (constants.IP4_VERSION,
1460
                                  constants.IP6_VERSION),
1461
               help="Cluster-wide IP version for primary IP")
1462

    
1463
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1464
                              action="store_true",
1465
                              help="Show machine name for every line in output")
1466

    
1467
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1468
                              action="store_true",
1469
                              help=("Hide successful results and show failures"
1470
                                    " only (determined by the exit code)"))
1471

    
1472
REASON_OPT = cli_option("--reason", default=None,
1473
                        help="The reason for executing the command")
1474

    
1475

    
1476
def _PriorityOptionCb(option, _, value, parser):
1477
  """Callback for processing C{--priority} option.
1478

1479
  """
1480
  value = _PRIONAME_TO_VALUE[value]
1481

    
1482
  setattr(parser.values, option.dest, value)
1483

    
1484

    
1485
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1486
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1487
                          choices=_PRIONAME_TO_VALUE.keys(),
1488
                          action="callback", type="choice",
1489
                          callback=_PriorityOptionCb,
1490
                          help="Priority for opcode processing")
1491

    
1492
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1493
                        type="bool", default=None, metavar=_YORNO,
1494
                        help="Sets the hidden flag on the OS")
1495

    
1496
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1497
                        type="bool", default=None, metavar=_YORNO,
1498
                        help="Sets the blacklisted flag on the OS")
1499

    
1500
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1501
                                     type="bool", metavar=_YORNO,
1502
                                     dest="prealloc_wipe_disks",
1503
                                     help=("Wipe disks prior to instance"
1504
                                           " creation"))
1505

    
1506
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1507
                             type="keyval", default=None,
1508
                             help="Node parameters")
1509

    
1510
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1511
                              action="store", metavar="POLICY", default=None,
1512
                              help="Allocation policy for the node group")
1513

    
1514
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1515
                              type="bool", metavar=_YORNO,
1516
                              dest="node_powered",
1517
                              help="Specify if the SoR for node is powered")
1518

    
1519
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1520
                             default=constants.OOB_TIMEOUT,
1521
                             help="Maximum time to wait for out-of-band helper")
1522

    
1523
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1524
                             default=constants.OOB_POWER_DELAY,
1525
                             help="Time in seconds to wait between power-ons")
1526

    
1527
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1528
                              action="store_true", default=False,
1529
                              help=("Whether command argument should be treated"
1530
                                    " as filter"))
1531

    
1532
NO_REMEMBER_OPT = cli_option("--no-remember",
1533
                             dest="no_remember",
1534
                             action="store_true", default=False,
1535
                             help="Perform but do not record the change"
1536
                             " in the configuration")
1537

    
1538
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1539
                              default=False, action="store_true",
1540
                              help="Evacuate primary instances only")
1541

    
1542
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1543
                                default=False, action="store_true",
1544
                                help="Evacuate secondary instances only"
1545
                                     " (applies only to internally mirrored"
1546
                                     " disk templates, e.g. %s)" %
1547
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1548

    
1549
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1550
                                action="store_true", default=False,
1551
                                help="Pause instance at startup")
1552

    
1553
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1554
                          help="Destination node group (name or uuid)",
1555
                          default=None, action="append",
1556
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1557

    
1558
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1559
                               action="append", dest="ignore_errors",
1560
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1561
                               help="Error code to be ignored")
1562

    
1563
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1564
                            action="append",
1565
                            help=("Specify disk state information in the"
1566
                                  " format"
1567
                                  " storage_type/identifier:option=value,...;"
1568
                                  " note this is unused for now"),
1569
                            type="identkeyval")
1570

    
1571
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1572
                          action="append",
1573
                          help=("Specify hypervisor state information in the"
1574
                                " format hypervisor:option=value,...;"
1575
                                " note this is unused for now"),
1576
                          type="identkeyval")
1577

    
1578
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1579
                                action="store_true", default=False,
1580
                                help="Ignore instance policy violations")
1581

    
1582
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1583
                             help="Sets the instance's runtime memory,"
1584
                             " ballooning it up or down to the new value",
1585
                             default=None, type="unit", metavar="<size>")
1586

    
1587
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1588
                          action="store_true", default=False,
1589
                          help="Marks the grow as absolute instead of the"
1590
                          " (default) relative mode")
1591

    
1592
NETWORK_OPT = cli_option("--network",
1593
                         action="store", default=None, dest="network",
1594
                         help="IP network in CIDR notation")
1595

    
1596
GATEWAY_OPT = cli_option("--gateway",
1597
                         action="store", default=None, dest="gateway",
1598
                         help="IP address of the router (gateway)")
1599

    
1600
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1601
                                  action="store", default=None,
1602
                                  dest="add_reserved_ips",
1603
                                  help="Comma-separated list of"
1604
                                  " reserved IPs to add")
1605

    
1606
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1607
                                     action="store", default=None,
1608
                                     dest="remove_reserved_ips",
1609
                                     help="Comma-delimited list of"
1610
                                     " reserved IPs to remove")
1611

    
1612
NETWORK6_OPT = cli_option("--network6",
1613
                          action="store", default=None, dest="network6",
1614
                          help="IP network in CIDR notation")
1615

    
1616
GATEWAY6_OPT = cli_option("--gateway6",
1617
                          action="store", default=None, dest="gateway6",
1618
                          help="IP6 address of the router (gateway)")
1619

    
1620
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1621
                                  dest="conflicts_check",
1622
                                  default=True,
1623
                                  action="store_false",
1624
                                  help="Don't check for conflicting IPs")
1625

    
1626
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1627
                                 default=False, action="store_true",
1628
                                 help="Include default values")
1629

    
1630
#: Options provided by all commands
1631
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1632

    
1633
# common options for creating instances. add and import then add their own
1634
# specific ones.
1635
COMMON_CREATE_OPTS = [
1636
  BACKEND_OPT,
1637
  DISK_OPT,
1638
  DISK_TEMPLATE_OPT,
1639
  FILESTORE_DIR_OPT,
1640
  FILESTORE_DRIVER_OPT,
1641
  HYPERVISOR_OPT,
1642
  IALLOCATOR_OPT,
1643
  NET_OPT,
1644
  NODE_PLACEMENT_OPT,
1645
  NOIPCHECK_OPT,
1646
  NOCONFLICTSCHECK_OPT,
1647
  NONAMECHECK_OPT,
1648
  NONICS_OPT,
1649
  NWSYNC_OPT,
1650
  OSPARAMS_OPT,
1651
  OS_SIZE_OPT,
1652
  SUBMIT_OPT,
1653
  TAG_ADD_OPT,
1654
  DRY_RUN_OPT,
1655
  PRIORITY_OPT,
1656
  ]
1657

    
1658
# common instance policy options
1659
INSTANCE_POLICY_OPTS = [
1660
  SPECS_CPU_COUNT_OPT,
1661
  SPECS_DISK_COUNT_OPT,
1662
  SPECS_DISK_SIZE_OPT,
1663
  SPECS_MEM_SIZE_OPT,
1664
  SPECS_NIC_COUNT_OPT,
1665
  IPOLICY_BOUNDS_SPECS_OPT,
1666
  IPOLICY_DISK_TEMPLATES,
1667
  IPOLICY_VCPU_RATIO,
1668
  IPOLICY_SPINDLE_RATIO,
1669
  ]
1670

    
1671

    
1672
class _ShowUsage(Exception):
1673
  """Exception class for L{_ParseArgs}.
1674

1675
  """
1676
  def __init__(self, exit_error):
1677
    """Initializes instances of this class.
1678

1679
    @type exit_error: bool
1680
    @param exit_error: Whether to report failure on exit
1681

1682
    """
1683
    Exception.__init__(self)
1684
    self.exit_error = exit_error
1685

    
1686

    
1687
class _ShowVersion(Exception):
1688
  """Exception class for L{_ParseArgs}.
1689

1690
  """
1691

    
1692

    
1693
def _ParseArgs(binary, argv, commands, aliases, env_override):
1694
  """Parser for the command line arguments.
1695

1696
  This function parses the arguments and returns the function which
1697
  must be executed together with its (modified) arguments.
1698

1699
  @param binary: Script name
1700
  @param argv: Command line arguments
1701
  @param commands: Dictionary containing command definitions
1702
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1703
  @param env_override: list of env variables allowed for default args
1704
  @raise _ShowUsage: If usage description should be shown
1705
  @raise _ShowVersion: If version should be shown
1706

1707
  """
1708
  assert not (env_override - set(commands))
1709
  assert not (set(aliases.keys()) & set(commands.keys()))
1710

    
1711
  if len(argv) > 1:
1712
    cmd = argv[1]
1713
  else:
1714
    # No option or command given
1715
    raise _ShowUsage(exit_error=True)
1716

    
1717
  if cmd == "--version":
1718
    raise _ShowVersion()
1719
  elif cmd == "--help":
1720
    raise _ShowUsage(exit_error=False)
1721
  elif not (cmd in commands or cmd in aliases):
1722
    raise _ShowUsage(exit_error=True)
1723

    
1724
  # get command, unalias it, and look it up in commands
1725
  if cmd in aliases:
1726
    if aliases[cmd] not in commands:
1727
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1728
                                   " command '%s'" % (cmd, aliases[cmd]))
1729

    
1730
    cmd = aliases[cmd]
1731

    
1732
  if cmd in env_override:
1733
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1734
    env_args = os.environ.get(args_env_name)
1735
    if env_args:
1736
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1737

    
1738
  func, args_def, parser_opts, usage, description = commands[cmd]
1739
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1740
                        description=description,
1741
                        formatter=TitledHelpFormatter(),
1742
                        usage="%%prog %s %s" % (cmd, usage))
1743
  parser.disable_interspersed_args()
1744
  options, args = parser.parse_args(args=argv[2:])
1745

    
1746
  if not _CheckArguments(cmd, args_def, args):
1747
    return None, None, None
1748

    
1749
  return func, options, args
1750

    
1751

    
1752
def _FormatUsage(binary, commands):
1753
  """Generates a nice description of all commands.
1754

1755
  @param binary: Script name
1756
  @param commands: Dictionary containing command definitions
1757

1758
  """
1759
  # compute the max line length for cmd + usage
1760
  mlen = min(60, max(map(len, commands)))
1761

    
1762
  yield "Usage: %s {command} [options...] [argument...]" % binary
1763
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1764
  yield ""
1765
  yield "Commands:"
1766

    
1767
  # and format a nice command list
1768
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1769
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1770
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1771
    for line in help_lines:
1772
      yield " %-*s   %s" % (mlen, "", line)
1773

    
1774
  yield ""
1775

    
1776

    
1777
def _CheckArguments(cmd, args_def, args):
1778
  """Verifies the arguments using the argument definition.
1779

1780
  Algorithm:
1781

1782
    1. Abort with error if values specified by user but none expected.
1783

1784
    1. For each argument in definition
1785

1786
      1. Keep running count of minimum number of values (min_count)
1787
      1. Keep running count of maximum number of values (max_count)
1788
      1. If it has an unlimited number of values
1789

1790
        1. Abort with error if it's not the last argument in the definition
1791

1792
    1. If last argument has limited number of values
1793

1794
      1. Abort with error if number of values doesn't match or is too large
1795

1796
    1. Abort with error if user didn't pass enough values (min_count)
1797

1798
  """
1799
  if args and not args_def:
1800
    ToStderr("Error: Command %s expects no arguments", cmd)
1801
    return False
1802

    
1803
  min_count = None
1804
  max_count = None
1805
  check_max = None
1806

    
1807
  last_idx = len(args_def) - 1
1808

    
1809
  for idx, arg in enumerate(args_def):
1810
    if min_count is None:
1811
      min_count = arg.min
1812
    elif arg.min is not None:
1813
      min_count += arg.min
1814

    
1815
    if max_count is None:
1816
      max_count = arg.max
1817
    elif arg.max is not None:
1818
      max_count += arg.max
1819

    
1820
    if idx == last_idx:
1821
      check_max = (arg.max is not None)
1822

    
1823
    elif arg.max is None:
1824
      raise errors.ProgrammerError("Only the last argument can have max=None")
1825

    
1826
  if check_max:
1827
    # Command with exact number of arguments
1828
    if (min_count is not None and max_count is not None and
1829
        min_count == max_count and len(args) != min_count):
1830
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1831
      return False
1832

    
1833
    # Command with limited number of arguments
1834
    if max_count is not None and len(args) > max_count:
1835
      ToStderr("Error: Command %s expects only %d argument(s)",
1836
               cmd, max_count)
1837
      return False
1838

    
1839
  # Command with some required arguments
1840
  if min_count is not None and len(args) < min_count:
1841
    ToStderr("Error: Command %s expects at least %d argument(s)",
1842
             cmd, min_count)
1843
    return False
1844

    
1845
  return True
1846

    
1847

    
1848
def SplitNodeOption(value):
1849
  """Splits the value of a --node option.
1850

1851
  """
1852
  if value and ":" in value:
1853
    return value.split(":", 1)
1854
  else:
1855
    return (value, None)
1856

    
1857

    
1858
def CalculateOSNames(os_name, os_variants):
1859
  """Calculates all the names an OS can be called, according to its variants.
1860

1861
  @type os_name: string
1862
  @param os_name: base name of the os
1863
  @type os_variants: list or None
1864
  @param os_variants: list of supported variants
1865
  @rtype: list
1866
  @return: list of valid names
1867

1868
  """
1869
  if os_variants:
1870
    return ["%s+%s" % (os_name, v) for v in os_variants]
1871
  else:
1872
    return [os_name]
1873

    
1874

    
1875
def ParseFields(selected, default):
1876
  """Parses the values of "--field"-like options.
1877

1878
  @type selected: string or None
1879
  @param selected: User-selected options
1880
  @type default: list
1881
  @param default: Default fields
1882

1883
  """
1884
  if selected is None:
1885
    return default
1886

    
1887
  if selected.startswith("+"):
1888
    return default + selected[1:].split(",")
1889

    
1890
  return selected.split(",")
1891

    
1892

    
1893
UsesRPC = rpc.RunWithRPC
1894

    
1895

    
1896
def AskUser(text, choices=None):
1897
  """Ask the user a question.
1898

1899
  @param text: the question to ask
1900

1901
  @param choices: list with elements tuples (input_char, return_value,
1902
      description); if not given, it will default to: [('y', True,
1903
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1904
      note that the '?' char is reserved for help
1905

1906
  @return: one of the return values from the choices list; if input is
1907
      not possible (i.e. not running with a tty, we return the last
1908
      entry from the list
1909

1910
  """
1911
  if choices is None:
1912
    choices = [("y", True, "Perform the operation"),
1913
               ("n", False, "Do not perform the operation")]
1914
  if not choices or not isinstance(choices, list):
1915
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1916
  for entry in choices:
1917
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1918
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1919

    
1920
  answer = choices[-1][1]
1921
  new_text = []
1922
  for line in text.splitlines():
1923
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1924
  text = "\n".join(new_text)
1925
  try:
1926
    f = file("/dev/tty", "a+")
1927
  except IOError:
1928
    return answer
1929
  try:
1930
    chars = [entry[0] for entry in choices]
1931
    chars[-1] = "[%s]" % chars[-1]
1932
    chars.append("?")
1933
    maps = dict([(entry[0], entry[1]) for entry in choices])
1934
    while True:
1935
      f.write(text)
1936
      f.write("\n")
1937
      f.write("/".join(chars))
1938
      f.write(": ")
1939
      line = f.readline(2).strip().lower()
1940
      if line in maps:
1941
        answer = maps[line]
1942
        break
1943
      elif line == "?":
1944
        for entry in choices:
1945
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1946
        f.write("\n")
1947
        continue
1948
  finally:
1949
    f.close()
1950
  return answer
1951

    
1952

    
1953
class JobSubmittedException(Exception):
1954
  """Job was submitted, client should exit.
1955

1956
  This exception has one argument, the ID of the job that was
1957
  submitted. The handler should print this ID.
1958

1959
  This is not an error, just a structured way to exit from clients.
1960

1961
  """
1962

    
1963

    
1964
def SendJob(ops, cl=None):
1965
  """Function to submit an opcode without waiting for the results.
1966

1967
  @type ops: list
1968
  @param ops: list of opcodes
1969
  @type cl: luxi.Client
1970
  @param cl: the luxi client to use for communicating with the master;
1971
             if None, a new client will be created
1972

1973
  """
1974
  if cl is None:
1975
    cl = GetClient()
1976

    
1977
  job_id = cl.SubmitJob(ops)
1978

    
1979
  return job_id
1980

    
1981

    
1982
def GenericPollJob(job_id, cbs, report_cbs):
1983
  """Generic job-polling function.
1984

1985
  @type job_id: number
1986
  @param job_id: Job ID
1987
  @type cbs: Instance of L{JobPollCbBase}
1988
  @param cbs: Data callbacks
1989
  @type report_cbs: Instance of L{JobPollReportCbBase}
1990
  @param report_cbs: Reporting callbacks
1991

1992
  """
1993
  prev_job_info = None
1994
  prev_logmsg_serial = None
1995

    
1996
  status = None
1997

    
1998
  while True:
1999
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2000
                                      prev_logmsg_serial)
2001
    if not result:
2002
      # job not found, go away!
2003
      raise errors.JobLost("Job with id %s lost" % job_id)
2004

    
2005
    if result == constants.JOB_NOTCHANGED:
2006
      report_cbs.ReportNotChanged(job_id, status)
2007

    
2008
      # Wait again
2009
      continue
2010

    
2011
    # Split result, a tuple of (field values, log entries)
2012
    (job_info, log_entries) = result
2013
    (status, ) = job_info
2014

    
2015
    if log_entries:
2016
      for log_entry in log_entries:
2017
        (serial, timestamp, log_type, message) = log_entry
2018
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2019
                                    log_type, message)
2020
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2021

    
2022
    # TODO: Handle canceled and archived jobs
2023
    elif status in (constants.JOB_STATUS_SUCCESS,
2024
                    constants.JOB_STATUS_ERROR,
2025
                    constants.JOB_STATUS_CANCELING,
2026
                    constants.JOB_STATUS_CANCELED):
2027
      break
2028

    
2029
    prev_job_info = job_info
2030

    
2031
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2032
  if not jobs:
2033
    raise errors.JobLost("Job with id %s lost" % job_id)
2034

    
2035
  status, opstatus, result = jobs[0]
2036

    
2037
  if status == constants.JOB_STATUS_SUCCESS:
2038
    return result
2039

    
2040
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2041
    raise errors.OpExecError("Job was canceled")
2042

    
2043
  has_ok = False
2044
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2045
    if status == constants.OP_STATUS_SUCCESS:
2046
      has_ok = True
2047
    elif status == constants.OP_STATUS_ERROR:
2048
      errors.MaybeRaise(msg)
2049

    
2050
      if has_ok:
2051
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2052
                                 (idx, msg))
2053

    
2054
      raise errors.OpExecError(str(msg))
2055

    
2056
  # default failure mode
2057
  raise errors.OpExecError(result)
2058

    
2059

    
2060
class JobPollCbBase:
2061
  """Base class for L{GenericPollJob} callbacks.
2062

2063
  """
2064
  def __init__(self):
2065
    """Initializes this class.
2066

2067
    """
2068

    
2069
  def WaitForJobChangeOnce(self, job_id, fields,
2070
                           prev_job_info, prev_log_serial):
2071
    """Waits for changes on a job.
2072

2073
    """
2074
    raise NotImplementedError()
2075

    
2076
  def QueryJobs(self, job_ids, fields):
2077
    """Returns the selected fields for the selected job IDs.
2078

2079
    @type job_ids: list of numbers
2080
    @param job_ids: Job IDs
2081
    @type fields: list of strings
2082
    @param fields: Fields
2083

2084
    """
2085
    raise NotImplementedError()
2086

    
2087

    
2088
class JobPollReportCbBase:
2089
  """Base class for L{GenericPollJob} reporting callbacks.
2090

2091
  """
2092
  def __init__(self):
2093
    """Initializes this class.
2094

2095
    """
2096

    
2097
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2098
    """Handles a log message.
2099

2100
    """
2101
    raise NotImplementedError()
2102

    
2103
  def ReportNotChanged(self, job_id, status):
2104
    """Called for if a job hasn't changed in a while.
2105

2106
    @type job_id: number
2107
    @param job_id: Job ID
2108
    @type status: string or None
2109
    @param status: Job status if available
2110

2111
    """
2112
    raise NotImplementedError()
2113

    
2114

    
2115
class _LuxiJobPollCb(JobPollCbBase):
2116
  def __init__(self, cl):
2117
    """Initializes this class.
2118

2119
    """
2120
    JobPollCbBase.__init__(self)
2121
    self.cl = cl
2122

    
2123
  def WaitForJobChangeOnce(self, job_id, fields,
2124
                           prev_job_info, prev_log_serial):
2125
    """Waits for changes on a job.
2126

2127
    """
2128
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2129
                                        prev_job_info, prev_log_serial)
2130

    
2131
  def QueryJobs(self, job_ids, fields):
2132
    """Returns the selected fields for the selected job IDs.
2133

2134
    """
2135
    return self.cl.QueryJobs(job_ids, fields)
2136

    
2137

    
2138
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2139
  def __init__(self, feedback_fn):
2140
    """Initializes this class.
2141

2142
    """
2143
    JobPollReportCbBase.__init__(self)
2144

    
2145
    self.feedback_fn = feedback_fn
2146

    
2147
    assert callable(feedback_fn)
2148

    
2149
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2150
    """Handles a log message.
2151

2152
    """
2153
    self.feedback_fn((timestamp, log_type, log_msg))
2154

    
2155
  def ReportNotChanged(self, job_id, status):
2156
    """Called if a job hasn't changed in a while.
2157

2158
    """
2159
    # Ignore
2160

    
2161

    
2162
class StdioJobPollReportCb(JobPollReportCbBase):
2163
  def __init__(self):
2164
    """Initializes this class.
2165

2166
    """
2167
    JobPollReportCbBase.__init__(self)
2168

    
2169
    self.notified_queued = False
2170
    self.notified_waitlock = False
2171

    
2172
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2173
    """Handles a log message.
2174

2175
    """
2176
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2177
             FormatLogMessage(log_type, log_msg))
2178

    
2179
  def ReportNotChanged(self, job_id, status):
2180
    """Called if a job hasn't changed in a while.
2181

2182
    """
2183
    if status is None:
2184
      return
2185

    
2186
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2187
      ToStderr("Job %s is waiting in queue", job_id)
2188
      self.notified_queued = True
2189

    
2190
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2191
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2192
      self.notified_waitlock = True
2193

    
2194

    
2195
def FormatLogMessage(log_type, log_msg):
2196
  """Formats a job message according to its type.
2197

2198
  """
2199
  if log_type != constants.ELOG_MESSAGE:
2200
    log_msg = str(log_msg)
2201

    
2202
  return utils.SafeEncode(log_msg)
2203

    
2204

    
2205
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2206
  """Function to poll for the result of a job.
2207

2208
  @type job_id: job identified
2209
  @param job_id: the job to poll for results
2210
  @type cl: luxi.Client
2211
  @param cl: the luxi client to use for communicating with the master;
2212
             if None, a new client will be created
2213

2214
  """
2215
  if cl is None:
2216
    cl = GetClient()
2217

    
2218
  if reporter is None:
2219
    if feedback_fn:
2220
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2221
    else:
2222
      reporter = StdioJobPollReportCb()
2223
  elif feedback_fn:
2224
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2225

    
2226
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2227

    
2228

    
2229
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2230
  """Legacy function to submit an opcode.
2231

2232
  This is just a simple wrapper over the construction of the processor
2233
  instance. It should be extended to better handle feedback and
2234
  interaction functions.
2235

2236
  """
2237
  if cl is None:
2238
    cl = GetClient()
2239

    
2240
  SetGenericOpcodeOpts([op], opts)
2241

    
2242
  job_id = SendJob([op], cl=cl)
2243

    
2244
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2245
                       reporter=reporter)
2246

    
2247
  return op_results[0]
2248

    
2249

    
2250
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2251
  """Wrapper around SubmitOpCode or SendJob.
2252

2253
  This function will decide, based on the 'opts' parameter, whether to
2254
  submit and wait for the result of the opcode (and return it), or
2255
  whether to just send the job and print its identifier. It is used in
2256
  order to simplify the implementation of the '--submit' option.
2257

2258
  It will also process the opcodes if we're sending the via SendJob
2259
  (otherwise SubmitOpCode does it).
2260

2261
  """
2262
  if opts and opts.submit_only:
2263
    job = [op]
2264
    SetGenericOpcodeOpts(job, opts)
2265
    job_id = SendJob(job, cl=cl)
2266
    raise JobSubmittedException(job_id)
2267
  else:
2268
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2269

    
2270

    
2271
def _InitReasonTrail(op, opts):
2272
  """Builds the first part of the reason trail
2273

2274
  Builds the initial part of the reason trail, adding the user provided reason
2275
  (if it exists) and the name of the command starting the operation.
2276

2277
  @param op: the opcode the reason trail will be added to
2278
  @param opts: the command line options selected by the user
2279

2280
  """
2281
  assert len(sys.argv) >= 2
2282
  trail = []
2283

    
2284
  if opts.reason:
2285
    trail.append((constants.OPCODE_REASON_SRC_USER,
2286
                  opts.reason,
2287
                  utils.EpochNano()))
2288

    
2289
  binary = os.path.basename(sys.argv[0])
2290
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2291
  command = sys.argv[1]
2292
  trail.append((source, command, utils.EpochNano()))
2293
  op.reason = trail
2294

    
2295

    
2296
def SetGenericOpcodeOpts(opcode_list, options):
2297
  """Processor for generic options.
2298

2299
  This function updates the given opcodes based on generic command
2300
  line options (like debug, dry-run, etc.).
2301

2302
  @param opcode_list: list of opcodes
2303
  @param options: command line options or None
2304
  @return: None (in-place modification)
2305

2306
  """
2307
  if not options:
2308
    return
2309
  for op in opcode_list:
2310
    op.debug_level = options.debug
2311
    if hasattr(options, "dry_run"):
2312
      op.dry_run = options.dry_run
2313
    if getattr(options, "priority", None) is not None:
2314
      op.priority = options.priority
2315
    _InitReasonTrail(op, options)
2316

    
2317

    
2318
def GetClient(query=False):
2319
  """Connects to the a luxi socket and returns a client.
2320

2321
  @type query: boolean
2322
  @param query: this signifies that the client will only be
2323
      used for queries; if the build-time parameter
2324
      enable-split-queries is enabled, then the client will be
2325
      connected to the query socket instead of the masterd socket
2326

2327
  """
2328
  override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2329
  if override_socket:
2330
    if override_socket == constants.LUXI_OVERRIDE_MASTER:
2331
      address = pathutils.MASTER_SOCKET
2332
    elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2333
      address = pathutils.QUERY_SOCKET
2334
    else:
2335
      address = override_socket
2336
  elif query and constants.ENABLE_SPLIT_QUERY:
2337
    address = pathutils.QUERY_SOCKET
2338
  else:
2339
    address = None
2340
  # TODO: Cache object?
2341
  try:
2342
    client = luxi.Client(address=address)
2343
  except luxi.NoMasterError:
2344
    ss = ssconf.SimpleStore()
2345

    
2346
    # Try to read ssconf file
2347
    try:
2348
      ss.GetMasterNode()
2349
    except errors.ConfigurationError:
2350
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2351
                                 " not part of a cluster",
2352
                                 errors.ECODE_INVAL)
2353

    
2354
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2355
    if master != myself:
2356
      raise errors.OpPrereqError("This is not the master node, please connect"
2357
                                 " to node '%s' and rerun the command" %
2358
                                 master, errors.ECODE_INVAL)
2359
    raise
2360
  return client
2361

    
2362

    
2363
def FormatError(err):
2364
  """Return a formatted error message for a given error.
2365

2366
  This function takes an exception instance and returns a tuple
2367
  consisting of two values: first, the recommended exit code, and
2368
  second, a string describing the error message (not
2369
  newline-terminated).
2370

2371
  """
2372
  retcode = 1
2373
  obuf = StringIO()
2374
  msg = str(err)
2375
  if isinstance(err, errors.ConfigurationError):
2376
    txt = "Corrupt configuration file: %s" % msg
2377
    logging.error(txt)
2378
    obuf.write(txt + "\n")
2379
    obuf.write("Aborting.")
2380
    retcode = 2
2381
  elif isinstance(err, errors.HooksAbort):
2382
    obuf.write("Failure: hooks execution failed:\n")
2383
    for node, script, out in err.args[0]:
2384
      if out:
2385
        obuf.write("  node: %s, script: %s, output: %s\n" %
2386
                   (node, script, out))
2387
      else:
2388
        obuf.write("  node: %s, script: %s (no output)\n" %
2389
                   (node, script))
2390
  elif isinstance(err, errors.HooksFailure):
2391
    obuf.write("Failure: hooks general failure: %s" % msg)
2392
  elif isinstance(err, errors.ResolverError):
2393
    this_host = netutils.Hostname.GetSysName()
2394
    if err.args[0] == this_host:
2395
      msg = "Failure: can't resolve my own hostname ('%s')"
2396
    else:
2397
      msg = "Failure: can't resolve hostname '%s'"
2398
    obuf.write(msg % err.args[0])
2399
  elif isinstance(err, errors.OpPrereqError):
2400
    if len(err.args) == 2:
2401
      obuf.write("Failure: prerequisites not met for this"
2402
                 " operation:\nerror type: %s, error details:\n%s" %
2403
                 (err.args[1], err.args[0]))
2404
    else:
2405
      obuf.write("Failure: prerequisites not met for this"
2406
                 " operation:\n%s" % msg)
2407
  elif isinstance(err, errors.OpExecError):
2408
    obuf.write("Failure: command execution error:\n%s" % msg)
2409
  elif isinstance(err, errors.TagError):
2410
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2411
  elif isinstance(err, errors.JobQueueDrainError):
2412
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2413
               " accept new requests\n")
2414
  elif isinstance(err, errors.JobQueueFull):
2415
    obuf.write("Failure: the job queue is full and doesn't accept new"
2416
               " job submissions until old jobs are archived\n")
2417
  elif isinstance(err, errors.TypeEnforcementError):
2418
    obuf.write("Parameter Error: %s" % msg)
2419
  elif isinstance(err, errors.ParameterError):
2420
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2421
  elif isinstance(err, luxi.NoMasterError):
2422
    if err.args[0] == pathutils.MASTER_SOCKET:
2423
      daemon = "the master daemon"
2424
    elif err.args[0] == pathutils.QUERY_SOCKET:
2425
      daemon = "the config daemon"
2426
    else:
2427
      daemon = "socket '%s'" % str(err.args[0])
2428
    obuf.write("Cannot communicate with %s.\nIs the process running"
2429
               " and listening for connections?" % daemon)
2430
  elif isinstance(err, luxi.TimeoutError):
2431
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2432
               " been submitted and will continue to run even if the call"
2433
               " timed out. Useful commands in this situation are \"gnt-job"
2434
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2435
    obuf.write(msg)
2436
  elif isinstance(err, luxi.PermissionError):
2437
    obuf.write("It seems you don't have permissions to connect to the"
2438
               " master daemon.\nPlease retry as a different user.")
2439
  elif isinstance(err, luxi.ProtocolError):
2440
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2441
               "%s" % msg)
2442
  elif isinstance(err, errors.JobLost):
2443
    obuf.write("Error checking job status: %s" % msg)
2444
  elif isinstance(err, errors.QueryFilterParseError):
2445
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2446
    obuf.write("\n".join(err.GetDetails()))
2447
  elif isinstance(err, errors.GenericError):
2448
    obuf.write("Unhandled Ganeti error: %s" % msg)
2449
  elif isinstance(err, JobSubmittedException):
2450
    obuf.write("JobID: %s\n" % err.args[0])
2451
    retcode = 0
2452
  else:
2453
    obuf.write("Unhandled exception: %s" % msg)
2454
  return retcode, obuf.getvalue().rstrip("\n")
2455

    
2456

    
2457
def GenericMain(commands, override=None, aliases=None,
2458
                env_override=frozenset()):
2459
  """Generic main function for all the gnt-* commands.
2460

2461
  @param commands: a dictionary with a special structure, see the design doc
2462
                   for command line handling.
2463
  @param override: if not None, we expect a dictionary with keys that will
2464
                   override command line options; this can be used to pass
2465
                   options from the scripts to generic functions
2466
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2467
  @param env_override: list of environment names which are allowed to submit
2468
                       default args for commands
2469

2470
  """
2471
  # save the program name and the entire command line for later logging
2472
  if sys.argv:
2473
    binary = os.path.basename(sys.argv[0])
2474
    if not binary:
2475
      binary = sys.argv[0]
2476

    
2477
    if len(sys.argv) >= 2:
2478
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2479
    else:
2480
      logname = binary
2481

    
2482
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2483
  else:
2484
    binary = "<unknown program>"
2485
    cmdline = "<unknown>"
2486

    
2487
  if aliases is None:
2488
    aliases = {}
2489

    
2490
  try:
2491
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2492
                                       env_override)
2493
  except _ShowVersion:
2494
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2495
             constants.RELEASE_VERSION)
2496
    return constants.EXIT_SUCCESS
2497
  except _ShowUsage, err:
2498
    for line in _FormatUsage(binary, commands):
2499
      ToStdout(line)
2500

    
2501
    if err.exit_error:
2502
      return constants.EXIT_FAILURE
2503
    else:
2504
      return constants.EXIT_SUCCESS
2505
  except errors.ParameterError, err:
2506
    result, err_msg = FormatError(err)
2507
    ToStderr(err_msg)
2508
    return 1
2509

    
2510
  if func is None: # parse error
2511
    return 1
2512

    
2513
  if override is not None:
2514
    for key, val in override.iteritems():
2515
      setattr(options, key, val)
2516

    
2517
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2518
                     stderr_logging=True)
2519

    
2520
  logging.info("Command line: %s", cmdline)
2521

    
2522
  try:
2523
    result = func(options, args)
2524
  except (errors.GenericError, luxi.ProtocolError,
2525
          JobSubmittedException), err:
2526
    result, err_msg = FormatError(err)
2527
    logging.exception("Error during command processing")
2528
    ToStderr(err_msg)
2529
  except KeyboardInterrupt:
2530
    result = constants.EXIT_FAILURE
2531
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2532
             " might have been submitted and"
2533
             " will continue to run in the background.")
2534
  except IOError, err:
2535
    if err.errno == errno.EPIPE:
2536
      # our terminal went away, we'll exit
2537
      sys.exit(constants.EXIT_FAILURE)
2538
    else:
2539
      raise
2540

    
2541
  return result
2542

    
2543

    
2544
def ParseNicOption(optvalue):
2545
  """Parses the value of the --net option(s).
2546

2547
  """
2548
  try:
2549
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2550
  except (TypeError, ValueError), err:
2551
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2552
                               errors.ECODE_INVAL)
2553

    
2554
  nics = [{}] * nic_max
2555
  for nidx, ndict in optvalue:
2556
    nidx = int(nidx)
2557

    
2558
    if not isinstance(ndict, dict):
2559
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2560
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2561

    
2562
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2563

    
2564
    nics[nidx] = ndict
2565

    
2566
  return nics
2567

    
2568

    
2569
def GenericInstanceCreate(mode, opts, args):
2570
  """Add an instance to the cluster via either creation or import.
2571

2572
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2573
  @param opts: the command line options selected by the user
2574
  @type args: list
2575
  @param args: should contain only one element, the new instance name
2576
  @rtype: int
2577
  @return: the desired exit code
2578

2579
  """
2580
  instance = args[0]
2581

    
2582
  (pnode, snode) = SplitNodeOption(opts.node)
2583

    
2584
  hypervisor = None
2585
  hvparams = {}
2586
  if opts.hypervisor:
2587
    hypervisor, hvparams = opts.hypervisor
2588

    
2589
  if opts.nics:
2590
    nics = ParseNicOption(opts.nics)
2591
  elif opts.no_nics:
2592
    # no nics
2593
    nics = []
2594
  elif mode == constants.INSTANCE_CREATE:
2595
    # default of one nic, all auto
2596
    nics = [{}]
2597
  else:
2598
    # mode == import
2599
    nics = []
2600

    
2601
  if opts.disk_template == constants.DT_DISKLESS:
2602
    if opts.disks or opts.sd_size is not None:
2603
      raise errors.OpPrereqError("Diskless instance but disk"
2604
                                 " information passed", errors.ECODE_INVAL)
2605
    disks = []
2606
  else:
2607
    if (not opts.disks and not opts.sd_size
2608
        and mode == constants.INSTANCE_CREATE):
2609
      raise errors.OpPrereqError("No disk information specified",
2610
                                 errors.ECODE_INVAL)
2611
    if opts.disks and opts.sd_size is not None:
2612
      raise errors.OpPrereqError("Please use either the '--disk' or"
2613
                                 " '-s' option", errors.ECODE_INVAL)
2614
    if opts.sd_size is not None:
2615
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2616

    
2617
    if opts.disks:
2618
      try:
2619
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2620
      except ValueError, err:
2621
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2622
                                   errors.ECODE_INVAL)
2623
      disks = [{}] * disk_max
2624
    else:
2625
      disks = []
2626
    for didx, ddict in opts.disks:
2627
      didx = int(didx)
2628
      if not isinstance(ddict, dict):
2629
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2630
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2631
      elif constants.IDISK_SIZE in ddict:
2632
        if constants.IDISK_ADOPT in ddict:
2633
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2634
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2635
        try:
2636
          ddict[constants.IDISK_SIZE] = \
2637
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2638
        except ValueError, err:
2639
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2640
                                     (didx, err), errors.ECODE_INVAL)
2641
      elif constants.IDISK_ADOPT in ddict:
2642
        if mode == constants.INSTANCE_IMPORT:
2643
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2644
                                     " import", errors.ECODE_INVAL)
2645
        ddict[constants.IDISK_SIZE] = 0
2646
      else:
2647
        raise errors.OpPrereqError("Missing size or adoption source for"
2648
                                   " disk %d" % didx, errors.ECODE_INVAL)
2649
      disks[didx] = ddict
2650

    
2651
  if opts.tags is not None:
2652
    tags = opts.tags.split(",")
2653
  else:
2654
    tags = []
2655

    
2656
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2657
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2658

    
2659
  if mode == constants.INSTANCE_CREATE:
2660
    start = opts.start
2661
    os_type = opts.os
2662
    force_variant = opts.force_variant
2663
    src_node = None
2664
    src_path = None
2665
    no_install = opts.no_install
2666
    identify_defaults = False
2667
  elif mode == constants.INSTANCE_IMPORT:
2668
    start = False
2669
    os_type = None
2670
    force_variant = False
2671
    src_node = opts.src_node
2672
    src_path = opts.src_dir
2673
    no_install = None
2674
    identify_defaults = opts.identify_defaults
2675
  else:
2676
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2677

    
2678
  op = opcodes.OpInstanceCreate(instance_name=instance,
2679
                                disks=disks,
2680
                                disk_template=opts.disk_template,
2681
                                nics=nics,
2682
                                conflicts_check=opts.conflicts_check,
2683
                                pnode=pnode, snode=snode,
2684
                                ip_check=opts.ip_check,
2685
                                name_check=opts.name_check,
2686
                                wait_for_sync=opts.wait_for_sync,
2687
                                file_storage_dir=opts.file_storage_dir,
2688
                                file_driver=opts.file_driver,
2689
                                iallocator=opts.iallocator,
2690
                                hypervisor=hypervisor,
2691
                                hvparams=hvparams,
2692
                                beparams=opts.beparams,
2693
                                osparams=opts.osparams,
2694
                                mode=mode,
2695
                                start=start,
2696
                                os_type=os_type,
2697
                                force_variant=force_variant,
2698
                                src_node=src_node,
2699
                                src_path=src_path,
2700
                                tags=tags,
2701
                                no_install=no_install,
2702
                                identify_defaults=identify_defaults,
2703
                                ignore_ipolicy=opts.ignore_ipolicy)
2704

    
2705
  SubmitOrSend(op, opts)
2706
  return 0
2707

    
2708

    
2709
class _RunWhileClusterStoppedHelper:
2710
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2711

2712
  """
2713
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2714
    """Initializes this class.
2715

2716
    @type feedback_fn: callable
2717
    @param feedback_fn: Feedback function
2718
    @type cluster_name: string
2719
    @param cluster_name: Cluster name
2720
    @type master_node: string
2721
    @param master_node Master node name
2722
    @type online_nodes: list
2723
    @param online_nodes: List of names of online nodes
2724

2725
    """
2726
    self.feedback_fn = feedback_fn
2727
    self.cluster_name = cluster_name
2728
    self.master_node = master_node
2729
    self.online_nodes = online_nodes
2730

    
2731
    self.ssh = ssh.SshRunner(self.cluster_name)
2732

    
2733
    self.nonmaster_nodes = [name for name in online_nodes
2734
                            if name != master_node]
2735

    
2736
    assert self.master_node not in self.nonmaster_nodes
2737

    
2738
  def _RunCmd(self, node_name, cmd):
2739
    """Runs a command on the local or a remote machine.
2740

2741
    @type node_name: string
2742
    @param node_name: Machine name
2743
    @type cmd: list
2744
    @param cmd: Command
2745

2746
    """
2747
    if node_name is None or node_name == self.master_node:
2748
      # No need to use SSH
2749
      result = utils.RunCmd(cmd)
2750
    else:
2751
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2752
                            utils.ShellQuoteArgs(cmd))
2753

    
2754
    if result.failed:
2755
      errmsg = ["Failed to run command %s" % result.cmd]
2756
      if node_name:
2757
        errmsg.append("on node %s" % node_name)
2758
      errmsg.append(": exitcode %s and error %s" %
2759
                    (result.exit_code, result.output))
2760
      raise errors.OpExecError(" ".join(errmsg))
2761

    
2762
  def Call(self, fn, *args):
2763
    """Call function while all daemons are stopped.
2764

2765
    @type fn: callable
2766
    @param fn: Function to be called
2767

2768
    """
2769
    # Pause watcher by acquiring an exclusive lock on watcher state file
2770
    self.feedback_fn("Blocking watcher")
2771
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2772
    try:
2773
      # TODO: Currently, this just blocks. There's no timeout.
2774
      # TODO: Should it be a shared lock?
2775
      watcher_block.Exclusive(blocking=True)
2776

    
2777
      # Stop master daemons, so that no new jobs can come in and all running
2778
      # ones are finished
2779
      self.feedback_fn("Stopping master daemons")
2780
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2781
      try:
2782
        # Stop daemons on all nodes
2783
        for node_name in self.online_nodes:
2784
          self.feedback_fn("Stopping daemons on %s" % node_name)
2785
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2786

    
2787
        # All daemons are shut down now
2788
        try:
2789
          return fn(self, *args)
2790
        except Exception, err:
2791
          _, errmsg = FormatError(err)
2792
          logging.exception("Caught exception")
2793
          self.feedback_fn(errmsg)
2794
          raise
2795
      finally:
2796
        # Start cluster again, master node last
2797
        for node_name in self.nonmaster_nodes + [self.master_node]:
2798
          self.feedback_fn("Starting daemons on %s" % node_name)
2799
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2800
    finally:
2801
      # Resume watcher
2802
      watcher_block.Close()
2803

    
2804

    
2805
def RunWhileClusterStopped(feedback_fn, fn, *args):
2806
  """Calls a function while all cluster daemons are stopped.
2807

2808
  @type feedback_fn: callable
2809
  @param feedback_fn: Feedback function
2810
  @type fn: callable
2811
  @param fn: Function to be called when daemons are stopped
2812

2813
  """
2814
  feedback_fn("Gathering cluster information")
2815

    
2816
  # This ensures we're running on the master daemon
2817
  cl = GetClient()
2818

    
2819
  (cluster_name, master_node) = \
2820
    cl.QueryConfigValues(["cluster_name", "master_node"])
2821

    
2822
  online_nodes = GetOnlineNodes([], cl=cl)
2823

    
2824
  # Don't keep a reference to the client. The master daemon will go away.
2825
  del cl
2826

    
2827
  assert master_node in online_nodes
2828

    
2829
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2830
                                       online_nodes).Call(fn, *args)
2831

    
2832

    
2833
def GenerateTable(headers, fields, separator, data,
2834
                  numfields=None, unitfields=None,
2835
                  units=None):
2836
  """Prints a table with headers and different fields.
2837

2838
  @type headers: dict
2839
  @param headers: dictionary mapping field names to headers for
2840
      the table
2841
  @type fields: list
2842
  @param fields: the field names corresponding to each row in
2843
      the data field
2844
  @param separator: the separator to be used; if this is None,
2845
      the default 'smart' algorithm is used which computes optimal
2846
      field width, otherwise just the separator is used between
2847
      each field
2848
  @type data: list
2849
  @param data: a list of lists, each sublist being one row to be output
2850
  @type numfields: list
2851
  @param numfields: a list with the fields that hold numeric
2852
      values and thus should be right-aligned
2853
  @type unitfields: list
2854
  @param unitfields: a list with the fields that hold numeric
2855
      values that should be formatted with the units field
2856
  @type units: string or None
2857
  @param units: the units we should use for formatting, or None for
2858
      automatic choice (human-readable for non-separator usage, otherwise
2859
      megabytes); this is a one-letter string
2860

2861
  """
2862
  if units is None:
2863
    if separator:
2864
      units = "m"
2865
    else:
2866
      units = "h"
2867

    
2868
  if numfields is None:
2869
    numfields = []
2870
  if unitfields is None:
2871
    unitfields = []
2872

    
2873
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2874
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2875

    
2876
  format_fields = []
2877
  for field in fields:
2878
    if headers and field not in headers:
2879
      # TODO: handle better unknown fields (either revert to old
2880
      # style of raising exception, or deal more intelligently with
2881
      # variable fields)
2882
      headers[field] = field
2883
    if separator is not None:
2884
      format_fields.append("%s")
2885
    elif numfields.Matches(field):
2886
      format_fields.append("%*s")
2887
    else:
2888
      format_fields.append("%-*s")
2889

    
2890
  if separator is None:
2891
    mlens = [0 for name in fields]
2892
    format_str = " ".join(format_fields)
2893
  else:
2894
    format_str = separator.replace("%", "%%").join(format_fields)
2895

    
2896
  for row in data:
2897
    if row is None:
2898
      continue
2899
    for idx, val in enumerate(row):
2900
      if unitfields.Matches(fields[idx]):
2901
        try:
2902
          val = int(val)
2903
        except (TypeError, ValueError):
2904
          pass
2905
        else:
2906
          val = row[idx] = utils.FormatUnit(val, units)
2907
      val = row[idx] = str(val)
2908
      if separator is None:
2909
        mlens[idx] = max(mlens[idx], len(val))
2910

    
2911
  result = []
2912
  if headers:
2913
    args = []
2914
    for idx, name in enumerate(fields):
2915
      hdr = headers[name]
2916
      if separator is None:
2917
        mlens[idx] = max(mlens[idx], len(hdr))
2918
        args.append(mlens[idx])
2919
      args.append(hdr)
2920
    result.append(format_str % tuple(args))
2921

    
2922
  if separator is None:
2923
    assert len(mlens) == len(fields)
2924

    
2925
    if fields and not numfields.Matches(fields[-1]):
2926
      mlens[-1] = 0
2927

    
2928
  for line in data:
2929
    args = []
2930
    if line is None:
2931
      line = ["-" for _ in fields]
2932
    for idx in range(len(fields)):
2933
      if separator is None:
2934
        args.append(mlens[idx])
2935
      args.append(line[idx])
2936
    result.append(format_str % tuple(args))
2937

    
2938
  return result
2939

    
2940

    
2941
def _FormatBool(value):
2942
  """Formats a boolean value as a string.
2943

2944
  """
2945
  if value:
2946
    return "Y"
2947
  return "N"
2948

    
2949

    
2950
#: Default formatting for query results; (callback, align right)
2951
_DEFAULT_FORMAT_QUERY = {
2952
  constants.QFT_TEXT: (str, False),
2953
  constants.QFT_BOOL: (_FormatBool, False),
2954
  constants.QFT_NUMBER: (str, True),
2955
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2956
  constants.QFT_OTHER: (str, False),
2957
  constants.QFT_UNKNOWN: (str, False),
2958
  }
2959

    
2960

    
2961
def _GetColumnFormatter(fdef, override, unit):
2962
  """Returns formatting function for a field.
2963

2964
  @type fdef: L{objects.QueryFieldDefinition}
2965
  @type override: dict
2966
  @param override: Dictionary for overriding field formatting functions,
2967
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2968
  @type unit: string
2969
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2970
  @rtype: tuple; (callable, bool)
2971
  @return: Returns the function to format a value (takes one parameter) and a
2972
    boolean for aligning the value on the right-hand side
2973

2974
  """
2975
  fmt = override.get(fdef.name, None)
2976
  if fmt is not None:
2977
    return fmt
2978

    
2979
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2980

    
2981
  if fdef.kind == constants.QFT_UNIT:
2982
    # Can't keep this information in the static dictionary
2983
    return (lambda value: utils.FormatUnit(value, unit), True)
2984

    
2985
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2986
  if fmt is not None:
2987
    return fmt
2988

    
2989
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2990

    
2991

    
2992
class _QueryColumnFormatter:
2993
  """Callable class for formatting fields of a query.
2994

2995
  """
2996
  def __init__(self, fn, status_fn, verbose):
2997
    """Initializes this class.
2998

2999
    @type fn: callable
3000
    @param fn: Formatting function
3001
    @type status_fn: callable
3002
    @param status_fn: Function to report fields' status
3003
    @type verbose: boolean
3004
    @param verbose: whether to use verbose field descriptions or not
3005

3006
    """
3007
    self._fn = fn
3008
    self._status_fn = status_fn
3009
    self._verbose = verbose
3010

    
3011
  def __call__(self, data):
3012
    """Returns a field's string representation.
3013

3014
    """
3015
    (status, value) = data
3016

    
3017
    # Report status
3018
    self._status_fn(status)
3019

    
3020
    if status == constants.RS_NORMAL:
3021
      return self._fn(value)
3022

    
3023
    assert value is None, \
3024
           "Found value %r for abnormal status %s" % (value, status)
3025

    
3026
    return FormatResultError(status, self._verbose)
3027

    
3028

    
3029
def FormatResultError(status, verbose):
3030
  """Formats result status other than L{constants.RS_NORMAL}.
3031

3032
  @param status: The result status
3033
  @type verbose: boolean
3034
  @param verbose: Whether to return the verbose text
3035
  @return: Text of result status
3036

3037
  """
3038
  assert status != constants.RS_NORMAL, \
3039
         "FormatResultError called with status equal to constants.RS_NORMAL"
3040
  try:
3041
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3042
  except KeyError:
3043
    raise NotImplementedError("Unknown status %s" % status)
3044
  else:
3045
    if verbose:
3046
      return verbose_text
3047
    return normal_text
3048

    
3049

    
3050
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3051
                      header=False, verbose=False):
3052
  """Formats data in L{objects.QueryResponse}.
3053

3054
  @type result: L{objects.QueryResponse}
3055
  @param result: result of query operation
3056
  @type unit: string
3057
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3058
    see L{utils.text.FormatUnit}
3059
  @type format_override: dict
3060
  @param format_override: Dictionary for overriding field formatting functions,
3061
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3062
  @type separator: string or None
3063
  @param separator: String used to separate fields
3064
  @type header: bool
3065
  @param header: Whether to output header row
3066
  @type verbose: boolean
3067
  @param verbose: whether to use verbose field descriptions or not
3068

3069
  """
3070
  if unit is None:
3071
    if separator:
3072
      unit = "m"
3073
    else:
3074
      unit = "h"
3075

    
3076
  if format_override is None:
3077
    format_override = {}
3078

    
3079
  stats = dict.fromkeys(constants.RS_ALL, 0)
3080

    
3081
  def _RecordStatus(status):
3082
    if status in stats:
3083
      stats[status] += 1
3084

    
3085
  columns = []
3086
  for fdef in result.fields:
3087
    assert fdef.title and fdef.name
3088
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3089
    columns.append(TableColumn(fdef.title,
3090
                               _QueryColumnFormatter(fn, _RecordStatus,
3091
                                                     verbose),
3092
                               align_right))
3093

    
3094
  table = FormatTable(result.data, columns, header, separator)
3095

    
3096
  # Collect statistics
3097
  assert len(stats) == len(constants.RS_ALL)
3098
  assert compat.all(count >= 0 for count in stats.values())
3099

    
3100
  # Determine overall status. If there was no data, unknown fields must be
3101
  # detected via the field definitions.
3102
  if (stats[constants.RS_UNKNOWN] or
3103
      (not result.data and _GetUnknownFields(result.fields))):
3104
    status = QR_UNKNOWN
3105
  elif compat.any(count > 0 for key, count in stats.items()
3106
                  if key != constants.RS_NORMAL):
3107
    status = QR_INCOMPLETE
3108
  else:
3109
    status = QR_NORMAL
3110

    
3111
  return (status, table)
3112

    
3113

    
3114
def _GetUnknownFields(fdefs):
3115
  """Returns list of unknown fields included in C{fdefs}.
3116

3117
  @type fdefs: list of L{objects.QueryFieldDefinition}
3118

3119
  """
3120
  return [fdef for fdef in fdefs
3121
          if fdef.kind == constants.QFT_UNKNOWN]
3122

    
3123

    
3124
def _WarnUnknownFields(fdefs):
3125
  """Prints a warning to stderr if a query included unknown fields.
3126

3127
  @type fdefs: list of L{objects.QueryFieldDefinition}
3128

3129
  """
3130
  unknown = _GetUnknownFields(fdefs)
3131
  if unknown:
3132
    ToStderr("Warning: Queried for unknown fields %s",
3133
             utils.CommaJoin(fdef.name for fdef in unknown))
3134
    return True
3135

    
3136
  return False
3137

    
3138

    
3139
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3140
                format_override=None, verbose=False, force_filter=False,
3141
                namefield=None, qfilter=None, isnumeric=False):
3142
  """Generic implementation for listing all items of a resource.
3143

3144
  @param resource: One of L{constants.QR_VIA_LUXI}
3145
  @type fields: list of strings
3146
  @param fields: List of fields to query for
3147
  @type names: list of strings
3148
  @param names: Names of items to query for
3149
  @type unit: string or None
3150
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3151
    None for automatic choice (human-readable for non-separator usage,
3152
    otherwise megabytes); this is a one-letter string
3153
  @type separator: string or None
3154
  @param separator: String used to separate fields
3155
  @type header: bool
3156
  @param header: Whether to show header row
3157
  @type force_filter: bool
3158
  @param force_filter: Whether to always treat names as filter
3159
  @type format_override: dict
3160
  @param format_override: Dictionary for overriding field formatting functions,
3161
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3162
  @type verbose: boolean
3163
  @param verbose: whether to use verbose field descriptions or not
3164
  @type namefield: string
3165
  @param namefield: Name of field to use for simple filters (see
3166
    L{qlang.MakeFilter} for details)
3167
  @type qfilter: list or None
3168
  @param qfilter: Query filter (in addition to names)
3169
  @param isnumeric: bool
3170
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3171
    any simple filters built by namefield should use integer values to
3172
    reflect that
3173

3174
  """
3175
  if not names:
3176
    names = None
3177

    
3178
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3179
                                isnumeric=isnumeric)
3180

    
3181
  if qfilter is None:
3182
    qfilter = namefilter
3183
  elif namefilter is not None:
3184
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3185

    
3186
  if cl is None:
3187
    cl = GetClient()
3188

    
3189
  response = cl.Query(resource, fields, qfilter)
3190

    
3191
  found_unknown = _WarnUnknownFields(response.fields)
3192

    
3193
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3194
                                     header=header,
3195
                                     format_override=format_override,
3196
                                     verbose=verbose)
3197

    
3198
  for line in data:
3199
    ToStdout(line)
3200

    
3201
  assert ((found_unknown and status == QR_UNKNOWN) or
3202
          (not found_unknown and status != QR_UNKNOWN))
3203

    
3204
  if status == QR_UNKNOWN:
3205
    return constants.EXIT_UNKNOWN_FIELD
3206

    
3207
  # TODO: Should the list command fail if not all data could be collected?
3208
  return constants.EXIT_SUCCESS
3209

    
3210

    
3211
def _FieldDescValues(fdef):
3212
  """Helper function for L{GenericListFields} to get query field description.
3213

3214
  @type fdef: L{objects.QueryFieldDefinition}
3215
  @rtype: list
3216

3217
  """
3218
  return [
3219
    fdef.name,
3220
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3221
    fdef.title,
3222
    fdef.doc,
3223
    ]
3224

    
3225

    
3226
def GenericListFields(resource, fields, separator, header, cl=None):
3227
  """Generic implementation for listing fields for a resource.
3228

3229
  @param resource: One of L{constants.QR_VIA_LUXI}
3230
  @type fields: list of strings
3231
  @param fields: List of fields to query for
3232
  @type separator: string or None
3233
  @param separator: String used to separate fields
3234
  @type header: bool
3235
  @param header: Whether to show header row
3236

3237
  """
3238
  if cl is None:
3239
    cl = GetClient()
3240

    
3241
  if not fields:
3242
    fields = None
3243

    
3244
  response = cl.QueryFields(resource, fields)
3245

    
3246
  found_unknown = _WarnUnknownFields(response.fields)
3247

    
3248
  columns = [
3249
    TableColumn("Name", str, False),
3250
    TableColumn("Type", str, False),
3251
    TableColumn("Title", str, False),
3252
    TableColumn("Description", str, False),
3253
    ]
3254

    
3255
  rows = map(_FieldDescValues, response.fields)
3256

    
3257
  for line in FormatTable(rows, columns, header, separator):
3258
    ToStdout(line)
3259

    
3260
  if found_unknown:
3261
    return constants.EXIT_UNKNOWN_FIELD
3262

    
3263
  return constants.EXIT_SUCCESS
3264

    
3265

    
3266
class TableColumn:
3267
  """Describes a column for L{FormatTable}.
3268

3269
  """
3270
  def __init__(self, title, fn, align_right):
3271
    """Initializes this class.
3272

3273
    @type title: string
3274
    @param title: Column title
3275
    @type fn: callable
3276
    @param fn: Formatting function
3277
    @type align_right: bool
3278
    @param align_right: Whether to align values on the right-hand side
3279

3280
    """
3281
    self.title = title
3282
    self.format = fn
3283
    self.align_right = align_right
3284

    
3285

    
3286
def _GetColFormatString(width, align_right):
3287
  """Returns the format string for a field.
3288

3289
  """
3290
  if align_right:
3291
    sign = ""
3292
  else:
3293
    sign = "-"
3294

    
3295
  return "%%%s%ss" % (sign, width)
3296

    
3297

    
3298
def FormatTable(rows, columns, header, separator):
3299
  """Formats data as a table.
3300

3301
  @type rows: list of lists
3302
  @param rows: Row data, one list per row
3303
  @type columns: list of L{TableColumn}
3304
  @param columns: Column descriptions
3305
  @type header: bool
3306
  @param header: Whether to show header row
3307
  @type separator: string or None
3308
  @param separator: String used to separate columns
3309

3310
  """
3311
  if header:
3312
    data = [[col.title for col in columns]]
3313
    colwidth = [len(col.title) for col in columns]
3314
  else:
3315
    data = []
3316
    colwidth = [0 for _ in columns]
3317

    
3318
  # Format row data
3319
  for row in rows:
3320
    assert len(row) == len(columns)
3321

    
3322
    formatted = [col.format(value) for value, col in zip(row, columns)]
3323

    
3324
    if separator is None:
3325
      # Update column widths
3326
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3327
        # Modifying a list's items while iterating is fine
3328
        colwidth[idx] = max(oldwidth, len(value))
3329

    
3330
    data.append(formatted)
3331

    
3332
  if separator is not None:
3333
    # Return early if a separator is used
3334
    return [separator.join(row) for row in data]
3335

    
3336
  if columns and not columns[-1].align_right:
3337
    # Avoid unnecessary spaces at end of line
3338
    colwidth[-1] = 0
3339

    
3340
  # Build format string
3341
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3342
                  for col, width in zip(columns, colwidth)])
3343

    
3344
  return [fmt % tuple(row) for row in data]
3345

    
3346

    
3347
def FormatTimestamp(ts):
3348
  """Formats a given timestamp.
3349

3350
  @type ts: timestamp
3351
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3352

3353
  @rtype: string
3354
  @return: a string with the formatted timestamp
3355

3356
  """
3357
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3358
    return "?"
3359

    
3360
  (sec, usecs) = ts
3361
  return utils.FormatTime(sec, usecs=usecs)
3362

    
3363

    
3364
def ParseTimespec(value):
3365
  """Parse a time specification.
3366

3367
  The following suffixed will be recognized:
3368

3369
    - s: seconds
3370
    - m: minutes
3371
    - h: hours
3372
    - d: day
3373
    - w: weeks
3374

3375
  Without any suffix, the value will be taken to be in seconds.
3376

3377
  """
3378
  value = str(value)
3379
  if not value:
3380
    raise errors.OpPrereqError("Empty time specification passed",
3381
                               errors.ECODE_INVAL)
3382
  suffix_map = {
3383
    "s": 1,
3384
    "m": 60,
3385
    "h": 3600,
3386
    "d": 86400,
3387
    "w": 604800,
3388
    }
3389
  if value[-1] not in suffix_map:
3390
    try:
3391
      value = int(value)
3392
    except (TypeError, ValueError):
3393
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3394
                                 errors.ECODE_INVAL)
3395
  else:
3396
    multiplier = suffix_map[value[-1]]
3397
    value = value[:-1]
3398
    if not value: # no data left after stripping the suffix
3399
      raise errors.OpPrereqError("Invalid time specification (only"
3400
                                 " suffix passed)", errors.ECODE_INVAL)
3401
    try:
3402
      value = int(value) * multiplier
3403
    except (TypeError, ValueError):
3404
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3405
                                 errors.ECODE_INVAL)
3406
  return value
3407

    
3408

    
3409
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3410
                   filter_master=False, nodegroup=None):
3411
  """Returns the names of online nodes.
3412

3413
  This function will also log a warning on stderr with the names of
3414
  the online nodes.
3415

3416
  @param nodes: if not empty, use only this subset of nodes (minus the
3417
      offline ones)
3418
  @param cl: if not None, luxi client to use
3419
  @type nowarn: boolean
3420
  @param nowarn: by default, this function will output a note with the
3421
      offline nodes that are skipped; if this parameter is True the
3422
      note is not displayed
3423
  @type secondary_ips: boolean
3424
  @param secondary_ips: if True, return the secondary IPs instead of the
3425
      names, useful for doing network traffic over the replication interface
3426
      (if any)
3427
  @type filter_master: boolean
3428
  @param filter_master: if True, do not return the master node in the list
3429
      (useful in coordination with secondary_ips where we cannot check our
3430
      node name against the list)
3431
  @type nodegroup: string
3432
  @param nodegroup: If set, only return nodes in this node group
3433

3434
  """
3435
  if cl is None:
3436
    cl = GetClient()
3437

    
3438
  qfilter = []
3439

    
3440
  if nodes:
3441
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3442

    
3443
  if nodegroup is not None:
3444
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3445
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3446

    
3447
  if filter_master:
3448
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3449

    
3450
  if qfilter:
3451
    if len(qfilter) > 1:
3452
      final_filter = [qlang.OP_AND] + qfilter
3453
    else:
3454
      assert len(qfilter) == 1
3455
      final_filter = qfilter[0]
3456
  else:
3457
    final_filter = None
3458

    
3459
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3460

    
3461
  def _IsOffline(row):
3462
    (_, (_, offline), _) = row
3463
    return offline
3464

    
3465
  def _GetName(row):
3466
    ((_, name), _, _) = row
3467
    return name
3468

    
3469
  def _GetSip(row):
3470
    (_, _, (_, sip)) = row
3471
    return sip
3472

    
3473
  (offline, online) = compat.partition(result.data, _IsOffline)
3474

    
3475
  if offline and not nowarn:
3476
    ToStderr("Note: skipping offline node(s): %s" %
3477
             utils.CommaJoin(map(_GetName, offline)))
3478

    
3479
  if secondary_ips:
3480
    fn = _GetSip
3481
  else:
3482
    fn = _GetName
3483

    
3484
  return map(fn, online)
3485

    
3486

    
3487
def _ToStream(stream, txt, *args):
3488
  """Write a message to a stream, bypassing the logging system
3489

3490
  @type stream: file object
3491
  @param stream: the file to which we should write
3492
  @type txt: str
3493
  @param txt: the message
3494

3495
  """
3496
  try:
3497
    if args:
3498
      args = tuple(args)
3499
      stream.write(txt % args)
3500
    else:
3501
      stream.write(txt)
3502
    stream.write("\n")
3503
    stream.flush()
3504
  except IOError, err:
3505
    if err.errno == errno.EPIPE:
3506
      # our terminal went away, we'll exit
3507
      sys.exit(constants.EXIT_FAILURE)
3508
    else:
3509
      raise
3510

    
3511

    
3512
def ToStdout(txt, *args):
3513
  """Write a message to stdout only, bypassing the logging system
3514

3515
  This is just a wrapper over _ToStream.
3516

3517
  @type txt: str
3518
  @param txt: the message
3519

3520
  """
3521
  _ToStream(sys.stdout, txt, *args)
3522

    
3523

    
3524
def ToStderr(txt, *args):
3525
  """Write a message to stderr only, bypassing the logging system
3526

3527
  This is just a wrapper over _ToStream.
3528

3529
  @type txt: str
3530
  @param txt: the message
3531

3532
  """
3533
  _ToStream(sys.stderr, txt, *args)
3534

    
3535

    
3536
class JobExecutor(object):
3537
  """Class which manages the submission and execution of multiple jobs.
3538

3539
  Note that instances of this class should not be reused between
3540
  GetResults() calls.
3541

3542
  """
3543
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3544
    self.queue = []
3545
    if cl is None:
3546
      cl = GetClient()
3547
    self.cl = cl
3548
    self.verbose = verbose
3549
    self.jobs = []
3550
    self.opts = opts
3551
    self.feedback_fn = feedback_fn
3552
    self._counter = itertools.count()
3553

    
3554
  @staticmethod
3555
  def _IfName(name, fmt):
3556
    """Helper function for formatting name.
3557

3558
    """
3559
    if name:
3560
      return fmt % name
3561

    
3562
    return ""
3563

    
3564
  def QueueJob(self, name, *ops):
3565
    """Record a job for later submit.
3566

3567
    @type name: string
3568
    @param name: a description of the job, will be used in WaitJobSet
3569

3570
    """
3571
    SetGenericOpcodeOpts(ops, self.opts)
3572
    self.queue.append((self._counter.next(), name, ops))
3573

    
3574
  def AddJobId(self, name, status, job_id):
3575
    """Adds a job ID to the internal queue.
3576

3577
    """
3578
    self.jobs.append((self._counter.next(), status, job_id, name))
3579

    
3580
  def SubmitPending(self, each=False):
3581
    """Submit all pending jobs.
3582

3583
    """
3584
    if each:
3585
      results = []
3586
      for (_, _, ops) in self.queue:
3587
        # SubmitJob will remove the success status, but raise an exception if
3588
        # the submission fails, so we'll notice that anyway.
3589
        results.append([True, self.cl.SubmitJob(ops)[0]])
3590
    else:
3591
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3592
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3593
      self.jobs.append((idx, status, data, name))
3594

    
3595
  def _ChooseJob(self):
3596
    """Choose a non-waiting/queued job to poll next.
3597

3598
    """
3599
    assert self.jobs, "_ChooseJob called with empty job list"
3600

    
3601
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3602
                               ["status"])
3603
    assert result
3604

    
3605
    for job_data, status in zip(self.jobs, result):
3606
      if (isinstance(status, list) and status and
3607
          status[0] in (constants.JOB_STATUS_QUEUED,
3608
                        constants.JOB_STATUS_WAITING,
3609
                        constants.JOB_STATUS_CANCELING)):
3610
        # job is still present and waiting
3611
        continue
3612
      # good candidate found (either running job or lost job)
3613
      self.jobs.remove(job_data)
3614
      return job_data
3615

    
3616
    # no job found
3617
    return self.jobs.pop(0)
3618

    
3619
  def GetResults(self):
3620
    """Wait for and return the results of all jobs.
3621

3622
    @rtype: list
3623
    @return: list of tuples (success, job results), in the same order
3624
        as the submitted jobs; if a job has failed, instead of the result
3625
        there will be the error message
3626

3627
    """
3628
    if not self.jobs:
3629
      self.SubmitPending()
3630
    results = []
3631
    if self.verbose:
3632
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3633
      if ok_jobs:
3634
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3635

    
3636
    # first, remove any non-submitted jobs
3637
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3638
    for idx, _, jid, name in failures:
3639
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3640
      results.append((idx, False, jid))
3641

    
3642
    while self.jobs:
3643
      (idx, _, jid, name) = self._ChooseJob()
3644
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3645
      try:
3646
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3647
        success = True
3648
      except errors.JobLost, err:
3649
        _, job_result = FormatError(err)
3650
        ToStderr("Job %s%s has been archived, cannot check its result",
3651
                 jid, self._IfName(name, " for %s"))
3652
        success = False
3653
      except (errors.GenericError, luxi.ProtocolError), err:
3654
        _, job_result = FormatError(err)
3655
        success = False
3656
        # the error message will always be shown, verbose or not
3657
        ToStderr("Job %s%s has failed: %s",
3658
                 jid, self._IfName(name, " for %s"), job_result)
3659

    
3660
      results.append((idx, success, job_result))
3661

    
3662
    # sort based on the index, then drop it
3663
    results.sort()
3664
    results = [i[1:] for i in results]
3665

    
3666
    return results
3667

    
3668
  def WaitOrShow(self, wait):
3669
    """Wait for job results or only print the job IDs.
3670

3671
    @type wait: boolean
3672
    @param wait: whether to wait or not
3673

3674
    """
3675
    if wait:
3676
      return self.GetResults()
3677
    else:
3678
      if not self.jobs:
3679
        self.SubmitPending()
3680
      for _, status, result, name in self.jobs:
3681
        if status:
3682
          ToStdout("%s: %s", result, name)
3683
        else:
3684
          ToStderr("Failure for %s: %s", name, result)
3685
      return [row[1:3] for row in self.jobs]
3686

    
3687

    
3688
def FormatParamsDictInfo(param_dict, actual):
3689
  """Formats a parameter dictionary.
3690

3691
  @type param_dict: dict
3692
  @param param_dict: the own parameters
3693
  @type actual: dict
3694
  @param actual: the current parameter set (including defaults)
3695
  @rtype: dict
3696
  @return: dictionary where the value of each parameter is either a fully
3697
      formatted string or a dictionary containing formatted strings
3698

3699
  """
3700
  ret = {}
3701
  for (key, data) in actual.items():
3702
    if isinstance(data, dict) and data:
3703
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3704
    else:
3705
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3706
  return ret
3707

    
3708

    
3709
def _FormatListInfoDefault(data, def_data):
3710
  if data is not None:
3711
    ret = utils.CommaJoin(data)
3712
  else:
3713
    ret = "default (%s)" % utils.CommaJoin(def_data)
3714
  return ret
3715

    
3716

    
3717
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3718
  """Formats an instance policy.
3719

3720
  @type custom_ipolicy: dict
3721
  @param custom_ipolicy: own policy
3722
  @type eff_ipolicy: dict
3723
  @param eff_ipolicy: effective policy (including defaults); ignored for
3724
      cluster
3725
  @type iscluster: bool
3726
  @param iscluster: the policy is at cluster level
3727
  @rtype: list of pairs
3728
  @return: formatted data, suitable for L{PrintGenericInfo}
3729

3730
  """
3731
  if iscluster:
3732
    eff_ipolicy = custom_ipolicy
3733

    
3734
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3735
  ret = [
3736
    (key,
3737
     FormatParamsDictInfo(custom_minmax.get(key, {}),
3738
                          eff_ipolicy[constants.ISPECS_MINMAX][key]))
3739
    for key in constants.ISPECS_MINMAX_KEYS
3740
    ]
3741
  if iscluster:
3742
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3743
    ret.append(
3744
      (constants.ISPECS_STD,
3745
       FormatParamsDictInfo(stdspecs, stdspecs))
3746
      )
3747

    
3748
  ret.append(
3749
    ("enabled disk templates",
3750
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3751
                            eff_ipolicy[constants.IPOLICY_DTS]))
3752
    )
3753
  ret.extend([
3754
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3755
    for key in constants.IPOLICY_PARAMETERS
3756
    ])
3757
  return ret
3758

    
3759

    
3760
def _PrintSpecsParameters(buf, specs):
3761
  values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3762
  buf.write(",".join(values))
3763

    
3764

    
3765
def PrintIPolicyCommand(buf, ipolicy, isgroup):
3766
  """Print the command option used to generate the given instance policy.
3767

3768
  Currently only the parts dealing with specs are supported.
3769

3770
  @type buf: StringIO
3771
  @param buf: stream to write into
3772
  @type ipolicy: dict
3773
  @param ipolicy: instance policy
3774
  @type isgroup: bool
3775
  @param isgroup: whether the policy is at group level
3776

3777
  """
3778
  if not isgroup:
3779
    stdspecs = ipolicy.get("std")
3780
    if stdspecs:
3781
      buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3782
      _PrintSpecsParameters(buf, stdspecs)
3783
  minmax = ipolicy.get("minmax")
3784
  if minmax:
3785
    minspecs = minmax.get("min")
3786
    maxspecs = minmax.get("max")
3787
    if minspecs and maxspecs:
3788
      buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3789
      buf.write("min:")
3790
      _PrintSpecsParameters(buf, minspecs)
3791
      buf.write("/max:")
3792
      _PrintSpecsParameters(buf, maxspecs)
3793

    
3794

    
3795
def ConfirmOperation(names, list_type, text, extra=""):
3796
  """Ask the user to confirm an operation on a list of list_type.
3797

3798
  This function is used to request confirmation for doing an operation
3799
  on a given list of list_type.
3800

3801
  @type names: list
3802
  @param names: the list of names that we display when
3803
      we ask for confirmation
3804
  @type list_type: str
3805
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3806
  @type text: str
3807
  @param text: the operation that the user should confirm
3808
  @rtype: boolean
3809
  @return: True or False depending on user's confirmation.
3810

3811
  """
3812
  count = len(names)
3813
  msg = ("The %s will operate on %d %s.\n%s"
3814
         "Do you want to continue?" % (text, count, list_type, extra))
3815
  affected = (("\nAffected %s:\n" % list_type) +
3816
              "\n".join(["  %s" % name for name in names]))
3817

    
3818
  choices = [("y", True, "Yes, execute the %s" % text),
3819
             ("n", False, "No, abort the %s" % text)]
3820

    
3821
  if count > 20:
3822
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3823
    question = msg
3824
  else:
3825
    question = msg + affected
3826

    
3827
  choice = AskUser(question, choices)
3828
  if choice == "v":
3829
    choices.pop(1)
3830
    choice = AskUser(msg + affected, choices)
3831
  return choice
3832

    
3833

    
3834
def _MaybeParseUnit(elements):
3835
  """Parses and returns an array of potential values with units.
3836

3837
  """
3838
  parsed = {}
3839
  for k, v in elements.items():
3840
    if v == constants.VALUE_DEFAULT:
3841
      parsed[k] = v
3842
    else:
3843
      parsed[k] = utils.ParseUnit(v)
3844
  return parsed
3845

    
3846

    
3847
def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3848
                             ispecs_disk_count, ispecs_disk_size,
3849
                             ispecs_nic_count, group_ipolicy, allowed_values):
3850
  try:
3851
    if ispecs_mem_size:
3852
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3853
    if ispecs_disk_size:
3854
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3855
  except (TypeError, ValueError, errors.UnitParseError), err:
3856
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3857
                               " in policy: %s" %
3858
                               (ispecs_disk_size, ispecs_mem_size, err),
3859
                               errors.ECODE_INVAL)
3860

    
3861
  # prepare ipolicy dict
3862
  ispecs_transposed = {
3863
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3864
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3865
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3866
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3867
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3868
    }
3869

    
3870
  # first, check that the values given are correct
3871
  if group_ipolicy:
3872
    forced_type = TISPECS_GROUP_TYPES
3873
  else:
3874
    forced_type = TISPECS_CLUSTER_TYPES
3875
  for specs in ispecs_transposed.values():
3876
    assert type(specs) is dict
3877
    utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3878

    
3879
  # then transpose
3880
  ispecs = {
3881
    constants.ISPECS_MIN: {},
3882
    constants.ISPECS_MAX: {},
3883
    constants.ISPECS_STD: {},
3884
    }
3885
  for (name, specs) in ispecs_transposed.iteritems():
3886
    assert name in constants.ISPECS_PARAMETERS
3887
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3888
      assert key in ispecs
3889
      ispecs[key][name] = val
3890
  for key in constants.ISPECS_MINMAX_KEYS:
3891
    ipolicy[constants.ISPECS_MINMAX][key] = ispecs[key]
3892
  ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3893

    
3894

    
3895
def _ParseSpecUnit(spec, keyname):
3896
  ret = spec.copy()
3897
  for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3898
    if k in ret and ret[k] != constants.VALUE_DEFAULT:
3899
      try:
3900
        ret[k] = utils.ParseUnit(ret[k])
3901
      except (TypeError, ValueError, errors.UnitParseError), err:
3902
        raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3903
                                    " specs: %s" % (k, ret[k], keyname, err)),
3904
                                   errors.ECODE_INVAL)
3905
  return ret
3906

    
3907

    
3908
def _ParseISpec(spec, keyname, allowed_values):
3909
  ret = _ParseSpecUnit(spec, keyname)
3910
  utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES,
3911
                      allowed_values=allowed_values)
3912
  return ret
3913

    
3914

    
3915
def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
3916
                            group_ipolicy, allowed_values):
3917
  if minmax_ispecs is not None:
3918
    minmax_out = {}
3919
    for (key, spec) in minmax_ispecs.items():
3920
      if key not in constants.ISPECS_MINMAX_KEYS:
3921
        msg = "Invalid key in bounds instance specifications: %s" % key
3922
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3923
      minmax_out[key] = _ParseISpec(spec, key, allowed_values)
3924
    ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
3925
  if std_ispecs is not None:
3926
    assert not group_ipolicy # This is not an option for gnt-group
3927
    ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std",
3928
                                                    allowed_values)
3929

    
3930

    
3931
def CreateIPolicyFromOpts(ispecs_mem_size=None,
3932
                          ispecs_cpu_count=None,
3933
                          ispecs_disk_count=None,
3934
                          ispecs_disk_size=None,
3935
                          ispecs_nic_count=None,
3936
                          minmax_ispecs=None,
3937
                          std_ispecs=None,
3938
                          ipolicy_disk_templates=None,
3939
                          ipolicy_vcpu_ratio=None,
3940
                          ipolicy_spindle_ratio=None,
3941
                          group_ipolicy=False,
3942
                          allowed_values=None,
3943
                          fill_all=False):
3944
  """Creation of instance policy based on command line options.
3945

3946
  @param fill_all: whether for cluster policies we should ensure that
3947
    all values are filled
3948

3949

3950
  """
3951
  if ((ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
3952
       ispecs_disk_size or ispecs_nic_count) and
3953
      (minmax_ispecs is not None or std_ispecs is not None)):
3954
    raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
3955
                               " together with any --ipolicy-xxx-specs option",
3956
                               errors.ECODE_INVAL)
3957

    
3958
  ipolicy_out = objects.MakeEmptyIPolicy()
3959
  if minmax_ispecs is None and std_ispecs is None:
3960
    _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
3961
                             ispecs_disk_count, ispecs_disk_size,
3962
                             ispecs_nic_count, group_ipolicy, allowed_values)
3963
  else:
3964
    _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
3965
                            group_ipolicy, allowed_values)
3966

    
3967
  if ipolicy_disk_templates is not None:
3968
    if allowed_values and ipolicy_disk_templates in allowed_values:
3969
      ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
3970
    else:
3971
      ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3972
  if ipolicy_vcpu_ratio is not None:
3973
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3974
  if ipolicy_spindle_ratio is not None:
3975
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
3976

    
3977
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
3978

    
3979
  if not group_ipolicy and fill_all:
3980
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
3981

    
3982
  return ipolicy_out
3983

    
3984

    
3985
def _SerializeGenericInfo(buf, data, level, afterkey=False):
3986
  """Formatting core of L{PrintGenericInfo}.
3987

3988
  @param buf: (string) stream to accumulate the result into
3989
  @param data: data to format
3990
  @type level: int
3991
  @param level: depth in the data hierarchy, used for indenting
3992
  @type afterkey: bool
3993
  @param afterkey: True when we are in the middle of a line after a key (used
3994
      to properly add newlines or indentation)
3995

3996
  """
3997
  baseind = "  "
3998
  if isinstance(data, dict):
3999
    if not data:
4000
      buf.write("\n")
4001
    else:
4002
      if afterkey:
4003
        buf.write("\n")
4004
        doindent = True
4005
      else:
4006
        doindent = False
4007
      for key in sorted(data):
4008
        if doindent:
4009
          buf.write(baseind * level)
4010
        else:
4011
          doindent = True
4012
        buf.write(key)
4013
        buf.write(": ")
4014
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4015
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4016
    # list of tuples (an ordered dictionary)
4017
    if afterkey:
4018
      buf.write("\n")
4019
      doindent = True
4020
    else:
4021
      doindent = False
4022
    for (key, val) in data:
4023
      if doindent:
4024
        buf.write(baseind * level)
4025
      else:
4026
        doindent = True
4027
      buf.write(key)
4028
      buf.write(": ")
4029
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4030
  elif isinstance(data, list):
4031
    if not data:
4032
      buf.write("\n")
4033
    else:
4034
      if afterkey:
4035
        buf.write("\n")
4036
        doindent = True
4037
      else:
4038
        doindent = False
4039
      for item in data:
4040
        if doindent:
4041
          buf.write(baseind * level)
4042
        else:
4043
          doindent = True
4044
        buf.write("-")
4045
        buf.write(baseind[1:])
4046
        _SerializeGenericInfo(buf, item, level + 1)
4047
  else:
4048
    # This branch should be only taken for strings, but it's practically
4049
    # impossible to guarantee that no other types are produced somewhere
4050
    buf.write(str(data))
4051
    buf.write("\n")
4052

    
4053

    
4054
def PrintGenericInfo(data):
4055
  """Print information formatted according to the hierarchy.
4056

4057
  The output is a valid YAML string.
4058

4059
  @param data: the data to print. It's a hierarchical structure whose elements
4060
      can be:
4061
        - dictionaries, where keys are strings and values are of any of the
4062
          types listed here
4063
        - lists of pairs (key, value), where key is a string and value is of
4064
          any of the types listed here; it's a way to encode ordered
4065
          dictionaries
4066
        - lists of any of the types listed here
4067
        - strings
4068

4069
  """
4070
  buf = StringIO()
4071
  _SerializeGenericInfo(buf, data, 0)
4072
  ToStdout(buf.getvalue().rstrip("\n"))