Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 4869595d

History | View | Annotate | Download (137.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
import ganeti.rpc.node as rpc
41
from ganeti import ssh
42
from ganeti import compat
43
from ganeti import netutils
44
from ganeti import qlang
45
from ganeti import objects
46
from ganeti import pathutils
47

    
48
from ganeti.runtime import (GetClient)
49

    
50
from optparse import (OptionParser, TitledHelpFormatter,
51
                      Option, OptionValueError)
52

    
53

    
54
__all__ = [
55
  # Command line options
56
  "ABSOLUTE_OPT",
57
  "ADD_UIDS_OPT",
58
  "ADD_RESERVED_IPS_OPT",
59
  "ALLOCATABLE_OPT",
60
  "ALLOC_POLICY_OPT",
61
  "ALL_OPT",
62
  "ALLOW_FAILOVER_OPT",
63
  "AUTO_PROMOTE_OPT",
64
  "AUTO_REPLACE_OPT",
65
  "BACKEND_OPT",
66
  "BLK_OS_OPT",
67
  "CAPAB_MASTER_OPT",
68
  "CAPAB_VM_OPT",
69
  "CLEANUP_OPT",
70
  "CLUSTER_DOMAIN_SECRET_OPT",
71
  "CONFIRM_OPT",
72
  "CP_SIZE_OPT",
73
  "DEBUG_OPT",
74
  "DEBUG_SIMERR_OPT",
75
  "DISKIDX_OPT",
76
  "DISK_OPT",
77
  "DISK_PARAMS_OPT",
78
  "DISK_TEMPLATE_OPT",
79
  "DRAINED_OPT",
80
  "DRY_RUN_OPT",
81
  "DRBD_HELPER_OPT",
82
  "DST_NODE_OPT",
83
  "EARLY_RELEASE_OPT",
84
  "ENABLED_HV_OPT",
85
  "ENABLED_DISK_TEMPLATES_OPT",
86
  "ERROR_CODES_OPT",
87
  "FAILURE_ONLY_OPT",
88
  "FIELDS_OPT",
89
  "FILESTORE_DIR_OPT",
90
  "FILESTORE_DRIVER_OPT",
91
  "FORCE_FILTER_OPT",
92
  "FORCE_OPT",
93
  "FORCE_VARIANT_OPT",
94
  "GATEWAY_OPT",
95
  "GATEWAY6_OPT",
96
  "GLOBAL_FILEDIR_OPT",
97
  "HID_OS_OPT",
98
  "GLOBAL_GLUSTER_FILEDIR_OPT",
99
  "GLOBAL_SHARED_FILEDIR_OPT",
100
  "HOTPLUG_OPT",
101
  "HVLIST_OPT",
102
  "HVOPTS_OPT",
103
  "HYPERVISOR_OPT",
104
  "IALLOCATOR_OPT",
105
  "DEFAULT_IALLOCATOR_OPT",
106
  "DEFAULT_IALLOCATOR_PARAMS_OPT",
107
  "IDENTIFY_DEFAULTS_OPT",
108
  "IGNORE_CONSIST_OPT",
109
  "IGNORE_ERRORS_OPT",
110
  "IGNORE_FAILURES_OPT",
111
  "IGNORE_OFFLINE_OPT",
112
  "IGNORE_REMOVE_FAILURES_OPT",
113
  "IGNORE_SECONDARIES_OPT",
114
  "IGNORE_SIZE_OPT",
115
  "INCLUDEDEFAULTS_OPT",
116
  "INTERVAL_OPT",
117
  "MAC_PREFIX_OPT",
118
  "MAINTAIN_NODE_HEALTH_OPT",
119
  "MASTER_NETDEV_OPT",
120
  "MASTER_NETMASK_OPT",
121
  "MC_OPT",
122
  "MIGRATION_MODE_OPT",
123
  "MODIFY_ETCHOSTS_OPT",
124
  "NET_OPT",
125
  "NETWORK_OPT",
126
  "NETWORK6_OPT",
127
  "NEW_CLUSTER_CERT_OPT",
128
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
129
  "NEW_CONFD_HMAC_KEY_OPT",
130
  "NEW_RAPI_CERT_OPT",
131
  "NEW_PRIMARY_OPT",
132
  "NEW_SECONDARY_OPT",
133
  "NEW_SPICE_CERT_OPT",
134
  "NIC_PARAMS_OPT",
135
  "NOCONFLICTSCHECK_OPT",
136
  "NODE_FORCE_JOIN_OPT",
137
  "NODE_LIST_OPT",
138
  "NODE_PLACEMENT_OPT",
139
  "NODEGROUP_OPT",
140
  "NODE_PARAMS_OPT",
141
  "NODE_POWERED_OPT",
142
  "NOHDR_OPT",
143
  "NOIPCHECK_OPT",
144
  "NO_INSTALL_OPT",
145
  "NONAMECHECK_OPT",
146
  "NOMODIFY_ETCHOSTS_OPT",
147
  "NOMODIFY_SSH_SETUP_OPT",
148
  "NONICS_OPT",
149
  "NONLIVE_OPT",
150
  "NONPLUS1_OPT",
151
  "NORUNTIME_CHGS_OPT",
152
  "NOSHUTDOWN_OPT",
153
  "NOSTART_OPT",
154
  "NOSSH_KEYCHECK_OPT",
155
  "NOVOTING_OPT",
156
  "NO_REMEMBER_OPT",
157
  "NWSYNC_OPT",
158
  "OFFLINE_INST_OPT",
159
  "ONLINE_INST_OPT",
160
  "ON_PRIMARY_OPT",
161
  "ON_SECONDARY_OPT",
162
  "OFFLINE_OPT",
163
  "OSPARAMS_OPT",
164
  "OS_OPT",
165
  "OS_SIZE_OPT",
166
  "OOB_TIMEOUT_OPT",
167
  "POWER_DELAY_OPT",
168
  "PREALLOC_WIPE_DISKS_OPT",
169
  "PRIMARY_IP_VERSION_OPT",
170
  "PRIMARY_ONLY_OPT",
171
  "PRINT_JOBID_OPT",
172
  "PRIORITY_OPT",
173
  "RAPI_CERT_OPT",
174
  "READD_OPT",
175
  "REASON_OPT",
176
  "REBOOT_TYPE_OPT",
177
  "REMOVE_INSTANCE_OPT",
178
  "REMOVE_RESERVED_IPS_OPT",
179
  "REMOVE_UIDS_OPT",
180
  "RESERVED_LVS_OPT",
181
  "RUNTIME_MEM_OPT",
182
  "ROMAN_OPT",
183
  "SECONDARY_IP_OPT",
184
  "SECONDARY_ONLY_OPT",
185
  "SELECT_OS_OPT",
186
  "SEP_OPT",
187
  "SHOWCMD_OPT",
188
  "SHOW_MACHINE_OPT",
189
  "COMPRESS_OPT",
190
  "SHUTDOWN_TIMEOUT_OPT",
191
  "SINGLE_NODE_OPT",
192
  "SPECS_CPU_COUNT_OPT",
193
  "SPECS_DISK_COUNT_OPT",
194
  "SPECS_DISK_SIZE_OPT",
195
  "SPECS_MEM_SIZE_OPT",
196
  "SPECS_NIC_COUNT_OPT",
197
  "SPLIT_ISPECS_OPTS",
198
  "IPOLICY_STD_SPECS_OPT",
199
  "IPOLICY_DISK_TEMPLATES",
200
  "IPOLICY_VCPU_RATIO",
201
  "SPICE_CACERT_OPT",
202
  "SPICE_CERT_OPT",
203
  "SRC_DIR_OPT",
204
  "SRC_NODE_OPT",
205
  "SUBMIT_OPT",
206
  "SUBMIT_OPTS",
207
  "STARTUP_PAUSED_OPT",
208
  "STATIC_OPT",
209
  "SYNC_OPT",
210
  "TAG_ADD_OPT",
211
  "TAG_SRC_OPT",
212
  "TIMEOUT_OPT",
213
  "TO_GROUP_OPT",
214
  "UIDPOOL_OPT",
215
  "USEUNITS_OPT",
216
  "USE_EXTERNAL_MIP_SCRIPT",
217
  "USE_REPL_NET_OPT",
218
  "VERBOSE_OPT",
219
  "VG_NAME_OPT",
220
  "WFSYNC_OPT",
221
  "YES_DOIT_OPT",
222
  "DISK_STATE_OPT",
223
  "HV_STATE_OPT",
224
  "IGNORE_IPOLICY_OPT",
225
  "INSTANCE_POLICY_OPTS",
226
  # Generic functions for CLI programs
227
  "ConfirmOperation",
228
  "CreateIPolicyFromOpts",
229
  "GenericMain",
230
  "GenericInstanceCreate",
231
  "GenericList",
232
  "GenericListFields",
233
  "GetClient",
234
  "GetOnlineNodes",
235
  "GetNodesSshPorts",
236
  "JobExecutor",
237
  "JobSubmittedException",
238
  "ParseTimespec",
239
  "RunWhileClusterStopped",
240
  "SubmitOpCode",
241
  "SubmitOpCodeToDrainedQueue",
242
  "SubmitOrSend",
243
  "UsesRPC",
244
  # Formatting functions
245
  "ToStderr", "ToStdout",
246
  "FormatError",
247
  "FormatQueryResult",
248
  "FormatParamsDictInfo",
249
  "FormatPolicyInfo",
250
  "PrintIPolicyCommand",
251
  "PrintGenericInfo",
252
  "GenerateTable",
253
  "AskUser",
254
  "FormatTimestamp",
255
  "FormatLogMessage",
256
  # Tags functions
257
  "ListTags",
258
  "AddTags",
259
  "RemoveTags",
260
  # command line options support infrastructure
261
  "ARGS_MANY_INSTANCES",
262
  "ARGS_MANY_NODES",
263
  "ARGS_MANY_GROUPS",
264
  "ARGS_MANY_NETWORKS",
265
  "ARGS_NONE",
266
  "ARGS_ONE_INSTANCE",
267
  "ARGS_ONE_NODE",
268
  "ARGS_ONE_GROUP",
269
  "ARGS_ONE_OS",
270
  "ARGS_ONE_NETWORK",
271
  "ArgChoice",
272
  "ArgCommand",
273
  "ArgFile",
274
  "ArgGroup",
275
  "ArgHost",
276
  "ArgInstance",
277
  "ArgJobId",
278
  "ArgNetwork",
279
  "ArgNode",
280
  "ArgOs",
281
  "ArgExtStorage",
282
  "ArgSuggest",
283
  "ArgUnknown",
284
  "OPT_COMPL_INST_ADD_NODES",
285
  "OPT_COMPL_MANY_NODES",
286
  "OPT_COMPL_ONE_IALLOCATOR",
287
  "OPT_COMPL_ONE_INSTANCE",
288
  "OPT_COMPL_ONE_NODE",
289
  "OPT_COMPL_ONE_NODEGROUP",
290
  "OPT_COMPL_ONE_NETWORK",
291
  "OPT_COMPL_ONE_OS",
292
  "OPT_COMPL_ONE_EXTSTORAGE",
293
  "cli_option",
294
  "SplitNodeOption",
295
  "CalculateOSNames",
296
  "ParseFields",
297
  "COMMON_CREATE_OPTS",
298
  ]
299

    
300
NO_PREFIX = "no_"
301
UN_PREFIX = "-"
302

    
303
#: Priorities (sorted)
304
_PRIORITY_NAMES = [
305
  ("low", constants.OP_PRIO_LOW),
306
  ("normal", constants.OP_PRIO_NORMAL),
307
  ("high", constants.OP_PRIO_HIGH),
308
  ]
309

    
310
#: Priority dictionary for easier lookup
311
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
312
# we migrate to Python 2.6
313
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
314

    
315
# Query result status for clients
316
(QR_NORMAL,
317
 QR_UNKNOWN,
318
 QR_INCOMPLETE) = range(3)
319

    
320
#: Maximum batch size for ChooseJob
321
_CHOOSE_BATCH = 25
322

    
323

    
324
# constants used to create InstancePolicy dictionary
325
TISPECS_GROUP_TYPES = {
326
  constants.ISPECS_MIN: constants.VTYPE_INT,
327
  constants.ISPECS_MAX: constants.VTYPE_INT,
328
  }
329

    
330
TISPECS_CLUSTER_TYPES = {
331
  constants.ISPECS_MIN: constants.VTYPE_INT,
332
  constants.ISPECS_MAX: constants.VTYPE_INT,
333
  constants.ISPECS_STD: constants.VTYPE_INT,
334
  }
335

    
336
#: User-friendly names for query2 field types
337
_QFT_NAMES = {
338
  constants.QFT_UNKNOWN: "Unknown",
339
  constants.QFT_TEXT: "Text",
340
  constants.QFT_BOOL: "Boolean",
341
  constants.QFT_NUMBER: "Number",
342
  constants.QFT_UNIT: "Storage size",
343
  constants.QFT_TIMESTAMP: "Timestamp",
344
  constants.QFT_OTHER: "Custom",
345
  }
346

    
347

    
348
class _Argument:
349
  def __init__(self, min=0, max=None): # pylint: disable=W0622
350
    self.min = min
351
    self.max = max
352

    
353
  def __repr__(self):
354
    return ("<%s min=%s max=%s>" %
355
            (self.__class__.__name__, self.min, self.max))
356

    
357

    
358
class ArgSuggest(_Argument):
359
  """Suggesting argument.
360

361
  Value can be any of the ones passed to the constructor.
362

363
  """
364
  # pylint: disable=W0622
365
  def __init__(self, min=0, max=None, choices=None):
366
    _Argument.__init__(self, min=min, max=max)
367
    self.choices = choices
368

    
369
  def __repr__(self):
370
    return ("<%s min=%s max=%s choices=%r>" %
371
            (self.__class__.__name__, self.min, self.max, self.choices))
372

    
373

    
374
class ArgChoice(ArgSuggest):
375
  """Choice argument.
376

377
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
378
  but value must be one of the choices.
379

380
  """
381

    
382

    
383
class ArgUnknown(_Argument):
384
  """Unknown argument to program (e.g. determined at runtime).
385

386
  """
387

    
388

    
389
class ArgInstance(_Argument):
390
  """Instances argument.
391

392
  """
393

    
394

    
395
class ArgNode(_Argument):
396
  """Node argument.
397

398
  """
399

    
400

    
401
class ArgNetwork(_Argument):
402
  """Network argument.
403

404
  """
405

    
406

    
407
class ArgGroup(_Argument):
408
  """Node group argument.
409

410
  """
411

    
412

    
413
class ArgJobId(_Argument):
414
  """Job ID argument.
415

416
  """
417

    
418

    
419
class ArgFile(_Argument):
420
  """File path argument.
421

422
  """
423

    
424

    
425
class ArgCommand(_Argument):
426
  """Command argument.
427

428
  """
429

    
430

    
431
class ArgHost(_Argument):
432
  """Host argument.
433

434
  """
435

    
436

    
437
class ArgOs(_Argument):
438
  """OS argument.
439

440
  """
441

    
442

    
443
class ArgExtStorage(_Argument):
444
  """ExtStorage argument.
445

446
  """
447

    
448

    
449
ARGS_NONE = []
450
ARGS_MANY_INSTANCES = [ArgInstance()]
451
ARGS_MANY_NETWORKS = [ArgNetwork()]
452
ARGS_MANY_NODES = [ArgNode()]
453
ARGS_MANY_GROUPS = [ArgGroup()]
454
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
455
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
456
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
457
# TODO
458
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
459
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
460

    
461

    
462
def _ExtractTagsObject(opts, args):
463
  """Extract the tag type object.
464

465
  Note that this function will modify its args parameter.
466

467
  """
468
  if not hasattr(opts, "tag_type"):
469
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
470
  kind = opts.tag_type
471
  if kind == constants.TAG_CLUSTER:
472
    retval = kind, ""
473
  elif kind in (constants.TAG_NODEGROUP,
474
                constants.TAG_NODE,
475
                constants.TAG_NETWORK,
476
                constants.TAG_INSTANCE):
477
    if not args:
478
      raise errors.OpPrereqError("no arguments passed to the command",
479
                                 errors.ECODE_INVAL)
480
    name = args.pop(0)
481
    retval = kind, name
482
  else:
483
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
484
  return retval
485

    
486

    
487
def _ExtendTags(opts, args):
488
  """Extend the args if a source file has been given.
489

490
  This function will extend the tags with the contents of the file
491
  passed in the 'tags_source' attribute of the opts parameter. A file
492
  named '-' will be replaced by stdin.
493

494
  """
495
  fname = opts.tags_source
496
  if fname is None:
497
    return
498
  if fname == "-":
499
    new_fh = sys.stdin
500
  else:
501
    new_fh = open(fname, "r")
502
  new_data = []
503
  try:
504
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
505
    # because of python bug 1633941
506
    while True:
507
      line = new_fh.readline()
508
      if not line:
509
        break
510
      new_data.append(line.strip())
511
  finally:
512
    new_fh.close()
513
  args.extend(new_data)
514

    
515

    
516
def ListTags(opts, args):
517
  """List the tags on a given object.
518

519
  This is a generic implementation that knows how to deal with all
520
  three cases of tag objects (cluster, node, instance). The opts
521
  argument is expected to contain a tag_type field denoting what
522
  object type we work on.
523

524
  """
525
  kind, name = _ExtractTagsObject(opts, args)
526
  cl = GetClient(query=True)
527
  result = cl.QueryTags(kind, name)
528
  result = list(result)
529
  result.sort()
530
  for tag in result:
531
    ToStdout(tag)
532

    
533

    
534
def AddTags(opts, args):
535
  """Add tags on a given object.
536

537
  This is a generic implementation that knows how to deal with all
538
  three cases of tag objects (cluster, node, instance). The opts
539
  argument is expected to contain a tag_type field denoting what
540
  object type we work on.
541

542
  """
543
  kind, name = _ExtractTagsObject(opts, args)
544
  _ExtendTags(opts, args)
545
  if not args:
546
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
547
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
548
  SubmitOrSend(op, opts)
549

    
550

    
551
def RemoveTags(opts, args):
552
  """Remove tags from a given object.
553

554
  This is a generic implementation that knows how to deal with all
555
  three cases of tag objects (cluster, node, instance). The opts
556
  argument is expected to contain a tag_type field denoting what
557
  object type we work on.
558

559
  """
560
  kind, name = _ExtractTagsObject(opts, args)
561
  _ExtendTags(opts, args)
562
  if not args:
563
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
564
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
565
  SubmitOrSend(op, opts)
566

    
567

    
568
def check_unit(option, opt, value): # pylint: disable=W0613
569
  """OptParsers custom converter for units.
570

571
  """
572
  try:
573
    return utils.ParseUnit(value)
574
  except errors.UnitParseError, err:
575
    raise OptionValueError("option %s: %s" % (opt, err))
576

    
577

    
578
def _SplitKeyVal(opt, data, parse_prefixes):
579
  """Convert a KeyVal string into a dict.
580

581
  This function will convert a key=val[,...] string into a dict. Empty
582
  values will be converted specially: keys which have the prefix 'no_'
583
  will have the value=False and the prefix stripped, keys with the prefix
584
  "-" will have value=None and the prefix stripped, and the others will
585
  have value=True.
586

587
  @type opt: string
588
  @param opt: a string holding the option name for which we process the
589
      data, used in building error messages
590
  @type data: string
591
  @param data: a string of the format key=val,key=val,...
592
  @type parse_prefixes: bool
593
  @param parse_prefixes: whether to handle prefixes specially
594
  @rtype: dict
595
  @return: {key=val, key=val}
596
  @raises errors.ParameterError: if there are duplicate keys
597

598
  """
599
  kv_dict = {}
600
  if data:
601
    for elem in utils.UnescapeAndSplit(data, sep=","):
602
      if "=" in elem:
603
        key, val = elem.split("=", 1)
604
      elif parse_prefixes:
605
        if elem.startswith(NO_PREFIX):
606
          key, val = elem[len(NO_PREFIX):], False
607
        elif elem.startswith(UN_PREFIX):
608
          key, val = elem[len(UN_PREFIX):], None
609
        else:
610
          key, val = elem, True
611
      else:
612
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
613
                                    (elem, opt))
614
      if key in kv_dict:
615
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
616
                                    (key, opt))
617
      kv_dict[key] = val
618
  return kv_dict
619

    
620

    
621
def _SplitIdentKeyVal(opt, value, parse_prefixes):
622
  """Helper function to parse "ident:key=val,key=val" options.
623

624
  @type opt: string
625
  @param opt: option name, used in error messages
626
  @type value: string
627
  @param value: expected to be in the format "ident:key=val,key=val,..."
628
  @type parse_prefixes: bool
629
  @param parse_prefixes: whether to handle prefixes specially (see
630
      L{_SplitKeyVal})
631
  @rtype: tuple
632
  @return: (ident, {key=val, key=val})
633
  @raises errors.ParameterError: in case of duplicates or other parsing errors
634

635
  """
636
  if ":" not in value:
637
    ident, rest = value, ""
638
  else:
639
    ident, rest = value.split(":", 1)
640

    
641
  if parse_prefixes and ident.startswith(NO_PREFIX):
642
    if rest:
643
      msg = "Cannot pass options when removing parameter groups: %s" % value
644
      raise errors.ParameterError(msg)
645
    retval = (ident[len(NO_PREFIX):], False)
646
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
647
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
648
    if rest:
649
      msg = "Cannot pass options when removing parameter groups: %s" % value
650
      raise errors.ParameterError(msg)
651
    retval = (ident[len(UN_PREFIX):], None)
652
  else:
653
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
654
    retval = (ident, kv_dict)
655
  return retval
656

    
657

    
658
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
659
  """Custom parser for ident:key=val,key=val options.
660

661
  This will store the parsed values as a tuple (ident, {key: val}). As such,
662
  multiple uses of this option via action=append is possible.
663

664
  """
665
  return _SplitIdentKeyVal(opt, value, True)
666

    
667

    
668
def check_key_val(option, opt, value):  # pylint: disable=W0613
669
  """Custom parser class for key=val,key=val options.
670

671
  This will store the parsed values as a dict {key: val}.
672

673
  """
674
  return _SplitKeyVal(opt, value, True)
675

    
676

    
677
def _SplitListKeyVal(opt, value):
678
  retval = {}
679
  for elem in value.split("/"):
680
    if not elem:
681
      raise errors.ParameterError("Empty section in option '%s'" % opt)
682
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
683
    if ident in retval:
684
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
685
             (ident, opt, elem))
686
      raise errors.ParameterError(msg)
687
    retval[ident] = valdict
688
  return retval
689

    
690

    
691
def check_multilist_ident_key_val(_, opt, value):
692
  """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
693

694
  @rtype: list of dictionary
695
  @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
696

697
  """
698
  retval = []
699
  for line in value.split("//"):
700
    retval.append(_SplitListKeyVal(opt, line))
701
  return retval
702

    
703

    
704
def check_bool(option, opt, value): # pylint: disable=W0613
705
  """Custom parser for yes/no options.
706

707
  This will store the parsed value as either True or False.
708

709
  """
710
  value = value.lower()
711
  if value == constants.VALUE_FALSE or value == "no":
712
    return False
713
  elif value == constants.VALUE_TRUE or value == "yes":
714
    return True
715
  else:
716
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
717

    
718

    
719
def check_list(option, opt, value): # pylint: disable=W0613
720
  """Custom parser for comma-separated lists.
721

722
  """
723
  # we have to make this explicit check since "".split(",") is [""],
724
  # not an empty list :(
725
  if not value:
726
    return []
727
  else:
728
    return utils.UnescapeAndSplit(value)
729

    
730

    
731
def check_maybefloat(option, opt, value): # pylint: disable=W0613
732
  """Custom parser for float numbers which might be also defaults.
733

734
  """
735
  value = value.lower()
736

    
737
  if value == constants.VALUE_DEFAULT:
738
    return value
739
  else:
740
    return float(value)
741

    
742

    
743
# completion_suggestion is normally a list. Using numeric values not evaluating
744
# to False for dynamic completion.
745
(OPT_COMPL_MANY_NODES,
746
 OPT_COMPL_ONE_NODE,
747
 OPT_COMPL_ONE_INSTANCE,
748
 OPT_COMPL_ONE_OS,
749
 OPT_COMPL_ONE_EXTSTORAGE,
750
 OPT_COMPL_ONE_IALLOCATOR,
751
 OPT_COMPL_ONE_NETWORK,
752
 OPT_COMPL_INST_ADD_NODES,
753
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
754

    
755
OPT_COMPL_ALL = compat.UniqueFrozenset([
756
  OPT_COMPL_MANY_NODES,
757
  OPT_COMPL_ONE_NODE,
758
  OPT_COMPL_ONE_INSTANCE,
759
  OPT_COMPL_ONE_OS,
760
  OPT_COMPL_ONE_EXTSTORAGE,
761
  OPT_COMPL_ONE_IALLOCATOR,
762
  OPT_COMPL_ONE_NETWORK,
763
  OPT_COMPL_INST_ADD_NODES,
764
  OPT_COMPL_ONE_NODEGROUP,
765
  ])
766

    
767

    
768
class CliOption(Option):
769
  """Custom option class for optparse.
770

771
  """
772
  ATTRS = Option.ATTRS + [
773
    "completion_suggest",
774
    ]
775
  TYPES = Option.TYPES + (
776
    "multilistidentkeyval",
777
    "identkeyval",
778
    "keyval",
779
    "unit",
780
    "bool",
781
    "list",
782
    "maybefloat",
783
    )
784
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
785
  TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
786
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
787
  TYPE_CHECKER["keyval"] = check_key_val
788
  TYPE_CHECKER["unit"] = check_unit
789
  TYPE_CHECKER["bool"] = check_bool
790
  TYPE_CHECKER["list"] = check_list
791
  TYPE_CHECKER["maybefloat"] = check_maybefloat
792

    
793

    
794
# optparse.py sets make_option, so we do it for our own option class, too
795
cli_option = CliOption
796

    
797

    
798
_YORNO = "yes|no"
799

    
800
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
801
                       help="Increase debugging level")
802

    
803
NOHDR_OPT = cli_option("--no-headers", default=False,
804
                       action="store_true", dest="no_headers",
805
                       help="Don't display column headers")
806

    
807
SEP_OPT = cli_option("--separator", default=None,
808
                     action="store", dest="separator",
809
                     help=("Separator between output fields"
810
                           " (defaults to one space)"))
811

    
812
USEUNITS_OPT = cli_option("--units", default=None,
813
                          dest="units", choices=("h", "m", "g", "t"),
814
                          help="Specify units for output (one of h/m/g/t)")
815

    
816
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
817
                        type="string", metavar="FIELDS",
818
                        help="Comma separated list of output fields")
819

    
820
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
821
                       default=False, help="Force the operation")
822

    
823
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
824
                         default=False, help="Do not require confirmation")
825

    
826
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
827
                                  action="store_true", default=False,
828
                                  help=("Ignore offline nodes and do as much"
829
                                        " as possible"))
830

    
831
TAG_ADD_OPT = cli_option("--tags", dest="tags",
832
                         default=None, help="Comma-separated list of instance"
833
                                            " tags")
834

    
835
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
836
                         default=None, help="File with tag names")
837

    
838
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
839
                        default=False, action="store_true",
840
                        help=("Submit the job and return the job ID, but"
841
                              " don't wait for the job to finish"))
842

    
843
PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
844
                             default=False, action="store_true",
845
                             help=("Additionally print the job as first line"
846
                                   " on stdout (for scripting)."))
847

    
848
SYNC_OPT = cli_option("--sync", dest="do_locking",
849
                      default=False, action="store_true",
850
                      help=("Grab locks while doing the queries"
851
                            " in order to ensure more consistent results"))
852

    
853
DRY_RUN_OPT = cli_option("--dry-run", default=False,
854
                         action="store_true",
855
                         help=("Do not execute the operation, just run the"
856
                               " check steps and verify if it could be"
857
                               " executed"))
858

    
859
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
860
                         action="store_true",
861
                         help="Increase the verbosity of the operation")
862

    
863
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
864
                              action="store_true", dest="simulate_errors",
865
                              help="Debugging option that makes the operation"
866
                              " treat most runtime checks as failed")
867

    
868
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
869
                        default=True, action="store_false",
870
                        help="Don't wait for sync (DANGEROUS!)")
871

    
872
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
873
                        default=False, action="store_true",
874
                        help="Wait for disks to sync")
875

    
876
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
877
                             action="store_true", default=False,
878
                             help="Enable offline instance")
879

    
880
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
881
                              action="store_true", default=False,
882
                              help="Disable down instance")
883

    
884
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
885
                               help=("Custom disk setup (%s)" %
886
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
887
                               default=None, metavar="TEMPL",
888
                               choices=list(constants.DISK_TEMPLATES))
889

    
890
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
891
                        help="Do not create any network cards for"
892
                        " the instance")
893

    
894
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
895
                               help="Relative path under default cluster-wide"
896
                               " file storage dir to store file-based disks",
897
                               default=None, metavar="<DIR>")
898

    
899
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
900
                                  help="Driver to use for image files",
901
                                  default=None, metavar="<DRIVER>",
902
                                  choices=list(constants.FILE_DRIVER))
903

    
904
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
905
                            help="Select nodes for the instance automatically"
906
                            " using the <NAME> iallocator plugin",
907
                            default=None, type="string",
908
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
909

    
910
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
911
                                    metavar="<NAME>",
912
                                    help="Set the default instance"
913
                                    " allocator plugin",
914
                                    default=None, type="string",
915
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
916

    
917
DEFAULT_IALLOCATOR_PARAMS_OPT = cli_option("--default-iallocator-params",
918
                                           dest="default_iallocator_params",
919
                                           help="iallocator template"
920
                                           " parameters, in the format"
921
                                           " template:option=value,"
922
                                           " option=value,...",
923
                                           type="keyval",
924
                                           default={})
925

    
926
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
927
                    metavar="<os>",
928
                    completion_suggest=OPT_COMPL_ONE_OS)
929

    
930
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
931
                          type="keyval", default={},
932
                          help="OS parameters")
933

    
934
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
935
                               action="store_true", default=False,
936
                               help="Force an unknown variant")
937

    
938
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
939
                            action="store_true", default=False,
940
                            help="Do not install the OS (will"
941
                            " enable no-start)")
942

    
943
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
944
                                dest="allow_runtime_chgs",
945
                                default=True, action="store_false",
946
                                help="Don't allow runtime changes")
947

    
948
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
949
                         type="keyval", default={},
950
                         help="Backend parameters")
951

    
952
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
953
                        default={}, dest="hvparams",
954
                        help="Hypervisor parameters")
955

    
956
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
957
                             help="Disk template parameters, in the format"
958
                             " template:option=value,option=value,...",
959
                             type="identkeyval", action="append", default=[])
960

    
961
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
962
                                 type="keyval", default={},
963
                                 help="Memory size specs: list of key=value,"
964
                                " where key is one of min, max, std"
965
                                 " (in MB or using a unit)")
966

    
967
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
968
                                 type="keyval", default={},
969
                                 help="CPU count specs: list of key=value,"
970
                                 " where key is one of min, max, std")
971

    
972
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
973
                                  dest="ispecs_disk_count",
974
                                  type="keyval", default={},
975
                                  help="Disk count specs: list of key=value,"
976
                                  " where key is one of min, max, std")
977

    
978
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
979
                                 type="keyval", default={},
980
                                 help="Disk size specs: list of key=value,"
981
                                 " where key is one of min, max, std"
982
                                 " (in MB or using a unit)")
983

    
984
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
985
                                 type="keyval", default={},
986
                                 help="NIC count specs: list of key=value,"
987
                                 " where key is one of min, max, std")
988

    
989
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
990
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
991
                                      dest="ipolicy_bounds_specs",
992
                                      type="multilistidentkeyval", default=None,
993
                                      help="Complete instance specs limits")
994

    
995
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
996
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
997
                                   dest="ipolicy_std_specs",
998
                                   type="keyval", default=None,
999
                                   help="Complte standard instance specs")
1000

    
1001
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
1002
                                    dest="ipolicy_disk_templates",
1003
                                    type="list", default=None,
1004
                                    help="Comma-separated list of"
1005
                                    " enabled disk templates")
1006

    
1007
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
1008
                                 dest="ipolicy_vcpu_ratio",
1009
                                 type="maybefloat", default=None,
1010
                                 help="The maximum allowed vcpu-to-cpu ratio")
1011

    
1012
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
1013
                                   dest="ipolicy_spindle_ratio",
1014
                                   type="maybefloat", default=None,
1015
                                   help=("The maximum allowed instances to"
1016
                                         " spindle ratio"))
1017

    
1018
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1019
                            help="Hypervisor and hypervisor options, in the"
1020
                            " format hypervisor:option=value,option=value,...",
1021
                            default=None, type="identkeyval")
1022

    
1023
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1024
                        help="Hypervisor and hypervisor options, in the"
1025
                        " format hypervisor:option=value,option=value,...",
1026
                        default=[], action="append", type="identkeyval")
1027

    
1028
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1029
                           action="store_false",
1030
                           help="Don't check that the instance's IP"
1031
                           " is alive")
1032

    
1033
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1034
                             default=True, action="store_false",
1035
                             help="Don't check that the instance's name"
1036
                             " is resolvable")
1037

    
1038
NET_OPT = cli_option("--net",
1039
                     help="NIC parameters", default=[],
1040
                     dest="nics", action="append", type="identkeyval")
1041

    
1042
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1043
                      dest="disks", action="append", type="identkeyval")
1044

    
1045
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1046
                         help="Comma-separated list of disks"
1047
                         " indices to act on (e.g. 0,2) (optional,"
1048
                         " defaults to all disks)")
1049

    
1050
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1051
                         help="Enforces a single-disk configuration using the"
1052
                         " given disk size, in MiB unless a suffix is used",
1053
                         default=None, type="unit", metavar="<size>")
1054

    
1055
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1056
                                dest="ignore_consistency",
1057
                                action="store_true", default=False,
1058
                                help="Ignore the consistency of the disks on"
1059
                                " the secondary")
1060

    
1061
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1062
                                dest="allow_failover",
1063
                                action="store_true", default=False,
1064
                                help="If migration is not possible fallback to"
1065
                                     " failover")
1066

    
1067
NONLIVE_OPT = cli_option("--non-live", dest="live",
1068
                         default=True, action="store_false",
1069
                         help="Do a non-live migration (this usually means"
1070
                         " freeze the instance, save the state, transfer and"
1071
                         " only then resume running on the secondary node)")
1072

    
1073
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1074
                                default=None,
1075
                                choices=list(constants.HT_MIGRATION_MODES),
1076
                                help="Override default migration mode (choose"
1077
                                " either live or non-live")
1078

    
1079
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1080
                                help="Target node and optional secondary node",
1081
                                metavar="<pnode>[:<snode>]",
1082
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1083

    
1084
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1085
                           action="append", metavar="<node>",
1086
                           help="Use only this node (can be used multiple"
1087
                           " times, if not given defaults to all nodes)",
1088
                           completion_suggest=OPT_COMPL_ONE_NODE)
1089

    
1090
NODEGROUP_OPT_NAME = "--node-group"
1091
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1092
                           dest="nodegroup",
1093
                           help="Node group (name or uuid)",
1094
                           metavar="<nodegroup>",
1095
                           default=None, type="string",
1096
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1097

    
1098
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1099
                             metavar="<node>",
1100
                             completion_suggest=OPT_COMPL_ONE_NODE)
1101

    
1102
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1103
                         action="store_false",
1104
                         help="Don't start the instance after creation")
1105

    
1106
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1107
                         action="store_true", default=False,
1108
                         help="Show command instead of executing it")
1109

    
1110
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1111
                         default=False, action="store_true",
1112
                         help="Instead of performing the migration/failover,"
1113
                         " try to recover from a failed cleanup. This is safe"
1114
                         " to run even if the instance is healthy, but it"
1115
                         " will create extra replication traffic and "
1116
                         " disrupt briefly the replication (like during the"
1117
                         " migration/failover")
1118

    
1119
STATIC_OPT = cli_option("-s", "--static", dest="static",
1120
                        action="store_true", default=False,
1121
                        help="Only show configuration data, not runtime data")
1122

    
1123
ALL_OPT = cli_option("--all", dest="show_all",
1124
                     default=False, action="store_true",
1125
                     help="Show info on all instances on the cluster."
1126
                     " This can take a long time to run, use wisely")
1127

    
1128
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1129
                           action="store_true", default=False,
1130
                           help="Interactive OS reinstall, lists available"
1131
                           " OS templates for selection")
1132

    
1133
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1134
                                 action="store_true", default=False,
1135
                                 help="Remove the instance from the cluster"
1136
                                 " configuration even if there are failures"
1137
                                 " during the removal process")
1138

    
1139
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1140
                                        dest="ignore_remove_failures",
1141
                                        action="store_true", default=False,
1142
                                        help="Remove the instance from the"
1143
                                        " cluster configuration even if there"
1144
                                        " are failures during the removal"
1145
                                        " process")
1146

    
1147
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1148
                                 action="store_true", default=False,
1149
                                 help="Remove the instance from the cluster")
1150

    
1151
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1152
                               help="Specifies the new node for the instance",
1153
                               metavar="NODE", default=None,
1154
                               completion_suggest=OPT_COMPL_ONE_NODE)
1155

    
1156
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1157
                               help="Specifies the new secondary node",
1158
                               metavar="NODE", default=None,
1159
                               completion_suggest=OPT_COMPL_ONE_NODE)
1160

    
1161
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1162
                             help="Specifies the new primary node",
1163
                             metavar="<node>", default=None,
1164
                             completion_suggest=OPT_COMPL_ONE_NODE)
1165

    
1166
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1167
                            default=False, action="store_true",
1168
                            help="Replace the disk(s) on the primary"
1169
                                 " node (applies only to internally mirrored"
1170
                                 " disk templates, e.g. %s)" %
1171
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1172

    
1173
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1174
                              default=False, action="store_true",
1175
                              help="Replace the disk(s) on the secondary"
1176
                                   " node (applies only to internally mirrored"
1177
                                   " disk templates, e.g. %s)" %
1178
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1179

    
1180
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1181
                              default=False, action="store_true",
1182
                              help="Lock all nodes and auto-promote as needed"
1183
                              " to MC status")
1184

    
1185
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1186
                              default=False, action="store_true",
1187
                              help="Automatically replace faulty disks"
1188
                                   " (applies only to internally mirrored"
1189
                                   " disk templates, e.g. %s)" %
1190
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1191

    
1192
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1193
                             default=False, action="store_true",
1194
                             help="Ignore current recorded size"
1195
                             " (useful for forcing activation when"
1196
                             " the recorded size is wrong)")
1197

    
1198
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1199
                          metavar="<node>",
1200
                          completion_suggest=OPT_COMPL_ONE_NODE)
1201

    
1202
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1203
                         metavar="<dir>")
1204

    
1205
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1206
                              help="Specify the secondary ip for the node",
1207
                              metavar="ADDRESS", default=None)
1208

    
1209
READD_OPT = cli_option("--readd", dest="readd",
1210
                       default=False, action="store_true",
1211
                       help="Readd old node after replacing it")
1212

    
1213
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1214
                                default=True, action="store_false",
1215
                                help="Disable SSH key fingerprint checking")
1216

    
1217
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1218
                                 default=False, action="store_true",
1219
                                 help="Force the joining of a node")
1220

    
1221
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1222
                    type="bool", default=None, metavar=_YORNO,
1223
                    help="Set the master_candidate flag on the node")
1224

    
1225
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1226
                         type="bool", default=None,
1227
                         help=("Set the offline flag on the node"
1228
                               " (cluster does not communicate with offline"
1229
                               " nodes)"))
1230

    
1231
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1232
                         type="bool", default=None,
1233
                         help=("Set the drained flag on the node"
1234
                               " (excluded from allocation operations)"))
1235

    
1236
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1237
                              type="bool", default=None, metavar=_YORNO,
1238
                              help="Set the master_capable flag on the node")
1239

    
1240
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1241
                          type="bool", default=None, metavar=_YORNO,
1242
                          help="Set the vm_capable flag on the node")
1243

    
1244
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1245
                             type="bool", default=None, metavar=_YORNO,
1246
                             help="Set the allocatable flag on a volume")
1247

    
1248
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1249
                            dest="enabled_hypervisors",
1250
                            help="Comma-separated list of hypervisors",
1251
                            type="string", default=None)
1252

    
1253
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1254
                                        dest="enabled_disk_templates",
1255
                                        help="Comma-separated list of "
1256
                                             "disk templates",
1257
                                        type="string", default=None)
1258

    
1259
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1260
                            type="keyval", default={},
1261
                            help="NIC parameters")
1262

    
1263
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1264
                         dest="candidate_pool_size", type="int",
1265
                         help="Set the candidate pool size")
1266

    
1267
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1268
                         help=("Enables LVM and specifies the volume group"
1269
                               " name (cluster-wide) for disk allocation"
1270
                               " [%s]" % constants.DEFAULT_VG),
1271
                         metavar="VG", default=None)
1272

    
1273
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1274
                          help="Destroy cluster", action="store_true")
1275

    
1276
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1277
                          help="Skip node agreement check (dangerous)",
1278
                          action="store_true", default=False)
1279

    
1280
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1281
                            help="Specify the mac prefix for the instance IP"
1282
                            " addresses, in the format XX:XX:XX",
1283
                            metavar="PREFIX",
1284
                            default=None)
1285

    
1286
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1287
                               help="Specify the node interface (cluster-wide)"
1288
                               " on which the master IP address will be added"
1289
                               " (cluster init default: %s)" %
1290
                               constants.DEFAULT_BRIDGE,
1291
                               metavar="NETDEV",
1292
                               default=None)
1293

    
1294
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1295
                                help="Specify the netmask of the master IP",
1296
                                metavar="NETMASK",
1297
                                default=None)
1298

    
1299
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1300
                                     dest="use_external_mip_script",
1301
                                     help="Specify whether to run a"
1302
                                     " user-provided script for the master"
1303
                                     " IP address turnup and"
1304
                                     " turndown operations",
1305
                                     type="bool", metavar=_YORNO, default=None)
1306

    
1307
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1308
                                help="Specify the default directory (cluster-"
1309
                                "wide) for storing the file-based disks [%s]" %
1310
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1311
                                metavar="DIR",
1312
                                default=None)
1313

    
1314
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1315
  "--shared-file-storage-dir",
1316
  dest="shared_file_storage_dir",
1317
  help="Specify the default directory (cluster-wide) for storing the"
1318
  " shared file-based disks [%s]" %
1319
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1320
  metavar="SHAREDDIR", default=None)
1321

    
1322
GLOBAL_GLUSTER_FILEDIR_OPT = cli_option(
1323
  "--gluster-storage-dir",
1324
  dest="gluster_storage_dir",
1325
  help="Specify the default directory (cluster-wide) for mounting Gluster"
1326
  " file systems [%s]" %
1327
  pathutils.DEFAULT_GLUSTER_STORAGE_DIR,
1328
  metavar="GLUSTERDIR",
1329
  default=pathutils.DEFAULT_GLUSTER_STORAGE_DIR)
1330

    
1331
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1332
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1333
                                   action="store_false", default=True)
1334

    
1335
MODIFY_ETCHOSTS_OPT = \
1336
 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1337
            default=None, type="bool",
1338
            help="Defines whether the cluster should autonomously modify"
1339
            " and keep in sync the /etc/hosts file of the nodes")
1340

    
1341
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1342
                                    help="Don't initialize SSH keys",
1343
                                    action="store_false", default=True)
1344

    
1345
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1346
                             help="Enable parseable error messages",
1347
                             action="store_true", default=False)
1348

    
1349
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1350
                          help="Skip N+1 memory redundancy tests",
1351
                          action="store_true", default=False)
1352

    
1353
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1354
                             help="Type of reboot: soft/hard/full",
1355
                             default=constants.INSTANCE_REBOOT_HARD,
1356
                             metavar="<REBOOT>",
1357
                             choices=list(constants.REBOOT_TYPES))
1358

    
1359
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1360
                                    dest="ignore_secondaries",
1361
                                    default=False, action="store_true",
1362
                                    help="Ignore errors from secondaries")
1363

    
1364
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1365
                            action="store_false", default=True,
1366
                            help="Don't shutdown the instance (unsafe)")
1367

    
1368
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1369
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1370
                         help="Maximum time to wait")
1371

    
1372
COMPRESS_OPT = cli_option("--compress", dest="compress",
1373
                          default=constants.IEC_NONE,
1374
                          help="The compression mode to use",
1375
                          choices=list(constants.IEC_ALL))
1376

    
1377
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1378
                                  dest="shutdown_timeout", type="int",
1379
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1380
                                  help="Maximum time to wait for instance"
1381
                                  " shutdown")
1382

    
1383
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1384
                          default=None,
1385
                          help=("Number of seconds between repetions of the"
1386
                                " command"))
1387

    
1388
EARLY_RELEASE_OPT = cli_option("--early-release",
1389
                               dest="early_release", default=False,
1390
                               action="store_true",
1391
                               help="Release the locks on the secondary"
1392
                               " node(s) early")
1393

    
1394
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1395
                                  dest="new_cluster_cert",
1396
                                  default=False, action="store_true",
1397
                                  help="Generate a new cluster certificate")
1398

    
1399
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1400
                           default=None,
1401
                           help="File containing new RAPI certificate")
1402

    
1403
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1404
                               default=None, action="store_true",
1405
                               help=("Generate a new self-signed RAPI"
1406
                                     " certificate"))
1407

    
1408
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1409
                            default=None,
1410
                            help="File containing new SPICE certificate")
1411

    
1412
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1413
                              default=None,
1414
                              help="File containing the certificate of the CA"
1415
                              " which signed the SPICE certificate")
1416

    
1417
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1418
                                dest="new_spice_cert", default=None,
1419
                                action="store_true",
1420
                                help=("Generate a new self-signed SPICE"
1421
                                      " certificate"))
1422

    
1423
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1424
                                    dest="new_confd_hmac_key",
1425
                                    default=False, action="store_true",
1426
                                    help=("Create a new HMAC key for %s" %
1427
                                          constants.CONFD))
1428

    
1429
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1430
                                       dest="cluster_domain_secret",
1431
                                       default=None,
1432
                                       help=("Load new new cluster domain"
1433
                                             " secret from file"))
1434

    
1435
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1436
                                           dest="new_cluster_domain_secret",
1437
                                           default=False, action="store_true",
1438
                                           help=("Create a new cluster domain"
1439
                                                 " secret"))
1440

    
1441
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1442
                              dest="use_replication_network",
1443
                              help="Whether to use the replication network"
1444
                              " for talking to the nodes",
1445
                              action="store_true", default=False)
1446

    
1447
MAINTAIN_NODE_HEALTH_OPT = \
1448
    cli_option("--maintain-node-health", dest="maintain_node_health",
1449
               metavar=_YORNO, default=None, type="bool",
1450
               help="Configure the cluster to automatically maintain node"
1451
               " health, by shutting down unknown instances, shutting down"
1452
               " unknown DRBD devices, etc.")
1453

    
1454
IDENTIFY_DEFAULTS_OPT = \
1455
    cli_option("--identify-defaults", dest="identify_defaults",
1456
               default=False, action="store_true",
1457
               help="Identify which saved instance parameters are equal to"
1458
               " the current cluster defaults and set them as such, instead"
1459
               " of marking them as overridden")
1460

    
1461
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1462
                         action="store", dest="uid_pool",
1463
                         help=("A list of user-ids or user-id"
1464
                               " ranges separated by commas"))
1465

    
1466
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1467
                          action="store", dest="add_uids",
1468
                          help=("A list of user-ids or user-id"
1469
                                " ranges separated by commas, to be"
1470
                                " added to the user-id pool"))
1471

    
1472
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1473
                             action="store", dest="remove_uids",
1474
                             help=("A list of user-ids or user-id"
1475
                                   " ranges separated by commas, to be"
1476
                                   " removed from the user-id pool"))
1477

    
1478
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1479
                              action="store", dest="reserved_lvs",
1480
                              help=("A comma-separated list of reserved"
1481
                                    " logical volumes names, that will be"
1482
                                    " ignored by cluster verify"))
1483

    
1484
ROMAN_OPT = cli_option("--roman",
1485
                       dest="roman_integers", default=False,
1486
                       action="store_true",
1487
                       help="Use roman numbers for positive integers")
1488

    
1489
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1490
                             action="store", default=None,
1491
                             help="Specifies usermode helper for DRBD")
1492

    
1493
PRIMARY_IP_VERSION_OPT = \
1494
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1495
               action="store", dest="primary_ip_version",
1496
               metavar="%d|%d" % (constants.IP4_VERSION,
1497
                                  constants.IP6_VERSION),
1498
               help="Cluster-wide IP version for primary IP")
1499

    
1500
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1501
                              action="store_true",
1502
                              help="Show machine name for every line in output")
1503

    
1504
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1505
                              action="store_true",
1506
                              help=("Hide successful results and show failures"
1507
                                    " only (determined by the exit code)"))
1508

    
1509
REASON_OPT = cli_option("--reason", default=None,
1510
                        help="The reason for executing the command")
1511

    
1512

    
1513
def _PriorityOptionCb(option, _, value, parser):
1514
  """Callback for processing C{--priority} option.
1515

1516
  """
1517
  value = _PRIONAME_TO_VALUE[value]
1518

    
1519
  setattr(parser.values, option.dest, value)
1520

    
1521

    
1522
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1523
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1524
                          choices=_PRIONAME_TO_VALUE.keys(),
1525
                          action="callback", type="choice",
1526
                          callback=_PriorityOptionCb,
1527
                          help="Priority for opcode processing")
1528

    
1529
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1530
                        type="bool", default=None, metavar=_YORNO,
1531
                        help="Sets the hidden flag on the OS")
1532

    
1533
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1534
                        type="bool", default=None, metavar=_YORNO,
1535
                        help="Sets the blacklisted flag on the OS")
1536

    
1537
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1538
                                     type="bool", metavar=_YORNO,
1539
                                     dest="prealloc_wipe_disks",
1540
                                     help=("Wipe disks prior to instance"
1541
                                           " creation"))
1542

    
1543
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1544
                             type="keyval", default=None,
1545
                             help="Node parameters")
1546

    
1547
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1548
                              action="store", metavar="POLICY", default=None,
1549
                              help="Allocation policy for the node group")
1550

    
1551
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1552
                              type="bool", metavar=_YORNO,
1553
                              dest="node_powered",
1554
                              help="Specify if the SoR for node is powered")
1555

    
1556
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1557
                             default=constants.OOB_TIMEOUT,
1558
                             help="Maximum time to wait for out-of-band helper")
1559

    
1560
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1561
                             default=constants.OOB_POWER_DELAY,
1562
                             help="Time in seconds to wait between power-ons")
1563

    
1564
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1565
                              action="store_true", default=False,
1566
                              help=("Whether command argument should be treated"
1567
                                    " as filter"))
1568

    
1569
NO_REMEMBER_OPT = cli_option("--no-remember",
1570
                             dest="no_remember",
1571
                             action="store_true", default=False,
1572
                             help="Perform but do not record the change"
1573
                             " in the configuration")
1574

    
1575
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1576
                              default=False, action="store_true",
1577
                              help="Evacuate primary instances only")
1578

    
1579
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1580
                                default=False, action="store_true",
1581
                                help="Evacuate secondary instances only"
1582
                                     " (applies only to internally mirrored"
1583
                                     " disk templates, e.g. %s)" %
1584
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1585

    
1586
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1587
                                action="store_true", default=False,
1588
                                help="Pause instance at startup")
1589

    
1590
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1591
                          help="Destination node group (name or uuid)",
1592
                          default=None, action="append",
1593
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1594

    
1595
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1596
                               action="append", dest="ignore_errors",
1597
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1598
                               help="Error code to be ignored")
1599

    
1600
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1601
                            action="append",
1602
                            help=("Specify disk state information in the"
1603
                                  " format"
1604
                                  " storage_type/identifier:option=value,...;"
1605
                                  " note this is unused for now"),
1606
                            type="identkeyval")
1607

    
1608
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1609
                          action="append",
1610
                          help=("Specify hypervisor state information in the"
1611
                                " format hypervisor:option=value,...;"
1612
                                " note this is unused for now"),
1613
                          type="identkeyval")
1614

    
1615
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1616
                                action="store_true", default=False,
1617
                                help="Ignore instance policy violations")
1618

    
1619
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1620
                             help="Sets the instance's runtime memory,"
1621
                             " ballooning it up or down to the new value",
1622
                             default=None, type="unit", metavar="<size>")
1623

    
1624
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1625
                          action="store_true", default=False,
1626
                          help="Marks the grow as absolute instead of the"
1627
                          " (default) relative mode")
1628

    
1629
NETWORK_OPT = cli_option("--network",
1630
                         action="store", default=None, dest="network",
1631
                         help="IP network in CIDR notation")
1632

    
1633
GATEWAY_OPT = cli_option("--gateway",
1634
                         action="store", default=None, dest="gateway",
1635
                         help="IP address of the router (gateway)")
1636

    
1637
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1638
                                  action="store", default=None,
1639
                                  dest="add_reserved_ips",
1640
                                  help="Comma-separated list of"
1641
                                  " reserved IPs to add")
1642

    
1643
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1644
                                     action="store", default=None,
1645
                                     dest="remove_reserved_ips",
1646
                                     help="Comma-delimited list of"
1647
                                     " reserved IPs to remove")
1648

    
1649
NETWORK6_OPT = cli_option("--network6",
1650
                          action="store", default=None, dest="network6",
1651
                          help="IP network in CIDR notation")
1652

    
1653
GATEWAY6_OPT = cli_option("--gateway6",
1654
                          action="store", default=None, dest="gateway6",
1655
                          help="IP6 address of the router (gateway)")
1656

    
1657
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1658
                                  dest="conflicts_check",
1659
                                  default=True,
1660
                                  action="store_false",
1661
                                  help="Don't check for conflicting IPs")
1662

    
1663
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1664
                                 default=False, action="store_true",
1665
                                 help="Include default values")
1666

    
1667
HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1668
                         action="store_true", default=False,
1669
                         help="Hotplug supported devices (NICs and Disks)")
1670

    
1671
#: Options provided by all commands
1672
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1673

    
1674
# options related to asynchronous job handling
1675

    
1676
SUBMIT_OPTS = [
1677
  SUBMIT_OPT,
1678
  PRINT_JOBID_OPT,
1679
  ]
1680

    
1681
# common options for creating instances. add and import then add their own
1682
# specific ones.
1683
COMMON_CREATE_OPTS = [
1684
  BACKEND_OPT,
1685
  DISK_OPT,
1686
  DISK_TEMPLATE_OPT,
1687
  FILESTORE_DIR_OPT,
1688
  FILESTORE_DRIVER_OPT,
1689
  HYPERVISOR_OPT,
1690
  IALLOCATOR_OPT,
1691
  NET_OPT,
1692
  NODE_PLACEMENT_OPT,
1693
  NOIPCHECK_OPT,
1694
  NOCONFLICTSCHECK_OPT,
1695
  NONAMECHECK_OPT,
1696
  NONICS_OPT,
1697
  NWSYNC_OPT,
1698
  OSPARAMS_OPT,
1699
  OS_SIZE_OPT,
1700
  SUBMIT_OPT,
1701
  PRINT_JOBID_OPT,
1702
  TAG_ADD_OPT,
1703
  DRY_RUN_OPT,
1704
  PRIORITY_OPT,
1705
  ]
1706

    
1707
# common instance policy options
1708
INSTANCE_POLICY_OPTS = [
1709
  IPOLICY_BOUNDS_SPECS_OPT,
1710
  IPOLICY_DISK_TEMPLATES,
1711
  IPOLICY_VCPU_RATIO,
1712
  IPOLICY_SPINDLE_RATIO,
1713
  ]
1714

    
1715
# instance policy split specs options
1716
SPLIT_ISPECS_OPTS = [
1717
  SPECS_CPU_COUNT_OPT,
1718
  SPECS_DISK_COUNT_OPT,
1719
  SPECS_DISK_SIZE_OPT,
1720
  SPECS_MEM_SIZE_OPT,
1721
  SPECS_NIC_COUNT_OPT,
1722
  ]
1723

    
1724

    
1725
class _ShowUsage(Exception):
1726
  """Exception class for L{_ParseArgs}.
1727

1728
  """
1729
  def __init__(self, exit_error):
1730
    """Initializes instances of this class.
1731

1732
    @type exit_error: bool
1733
    @param exit_error: Whether to report failure on exit
1734

1735
    """
1736
    Exception.__init__(self)
1737
    self.exit_error = exit_error
1738

    
1739

    
1740
class _ShowVersion(Exception):
1741
  """Exception class for L{_ParseArgs}.
1742

1743
  """
1744

    
1745

    
1746
def _ParseArgs(binary, argv, commands, aliases, env_override):
1747
  """Parser for the command line arguments.
1748

1749
  This function parses the arguments and returns the function which
1750
  must be executed together with its (modified) arguments.
1751

1752
  @param binary: Script name
1753
  @param argv: Command line arguments
1754
  @param commands: Dictionary containing command definitions
1755
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1756
  @param env_override: list of env variables allowed for default args
1757
  @raise _ShowUsage: If usage description should be shown
1758
  @raise _ShowVersion: If version should be shown
1759

1760
  """
1761
  assert not (env_override - set(commands))
1762
  assert not (set(aliases.keys()) & set(commands.keys()))
1763

    
1764
  if len(argv) > 1:
1765
    cmd = argv[1]
1766
  else:
1767
    # No option or command given
1768
    raise _ShowUsage(exit_error=True)
1769

    
1770
  if cmd == "--version":
1771
    raise _ShowVersion()
1772
  elif cmd == "--help":
1773
    raise _ShowUsage(exit_error=False)
1774
  elif not (cmd in commands or cmd in aliases):
1775
    raise _ShowUsage(exit_error=True)
1776

    
1777
  # get command, unalias it, and look it up in commands
1778
  if cmd in aliases:
1779
    if aliases[cmd] not in commands:
1780
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1781
                                   " command '%s'" % (cmd, aliases[cmd]))
1782

    
1783
    cmd = aliases[cmd]
1784

    
1785
  if cmd in env_override:
1786
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1787
    env_args = os.environ.get(args_env_name)
1788
    if env_args:
1789
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1790

    
1791
  func, args_def, parser_opts, usage, description = commands[cmd]
1792
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1793
                        description=description,
1794
                        formatter=TitledHelpFormatter(),
1795
                        usage="%%prog %s %s" % (cmd, usage))
1796
  parser.disable_interspersed_args()
1797
  options, args = parser.parse_args(args=argv[2:])
1798

    
1799
  if not _CheckArguments(cmd, args_def, args):
1800
    return None, None, None
1801

    
1802
  return func, options, args
1803

    
1804

    
1805
def _FormatUsage(binary, commands):
1806
  """Generates a nice description of all commands.
1807

1808
  @param binary: Script name
1809
  @param commands: Dictionary containing command definitions
1810

1811
  """
1812
  # compute the max line length for cmd + usage
1813
  mlen = min(60, max(map(len, commands)))
1814

    
1815
  yield "Usage: %s {command} [options...] [argument...]" % binary
1816
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1817
  yield ""
1818
  yield "Commands:"
1819

    
1820
  # and format a nice command list
1821
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1822
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1823
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1824
    for line in help_lines:
1825
      yield " %-*s   %s" % (mlen, "", line)
1826

    
1827
  yield ""
1828

    
1829

    
1830
def _CheckArguments(cmd, args_def, args):
1831
  """Verifies the arguments using the argument definition.
1832

1833
  Algorithm:
1834

1835
    1. Abort with error if values specified by user but none expected.
1836

1837
    1. For each argument in definition
1838

1839
      1. Keep running count of minimum number of values (min_count)
1840
      1. Keep running count of maximum number of values (max_count)
1841
      1. If it has an unlimited number of values
1842

1843
        1. Abort with error if it's not the last argument in the definition
1844

1845
    1. If last argument has limited number of values
1846

1847
      1. Abort with error if number of values doesn't match or is too large
1848

1849
    1. Abort with error if user didn't pass enough values (min_count)
1850

1851
  """
1852
  if args and not args_def:
1853
    ToStderr("Error: Command %s expects no arguments", cmd)
1854
    return False
1855

    
1856
  min_count = None
1857
  max_count = None
1858
  check_max = None
1859

    
1860
  last_idx = len(args_def) - 1
1861

    
1862
  for idx, arg in enumerate(args_def):
1863
    if min_count is None:
1864
      min_count = arg.min
1865
    elif arg.min is not None:
1866
      min_count += arg.min
1867

    
1868
    if max_count is None:
1869
      max_count = arg.max
1870
    elif arg.max is not None:
1871
      max_count += arg.max
1872

    
1873
    if idx == last_idx:
1874
      check_max = (arg.max is not None)
1875

    
1876
    elif arg.max is None:
1877
      raise errors.ProgrammerError("Only the last argument can have max=None")
1878

    
1879
  if check_max:
1880
    # Command with exact number of arguments
1881
    if (min_count is not None and max_count is not None and
1882
        min_count == max_count and len(args) != min_count):
1883
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1884
      return False
1885

    
1886
    # Command with limited number of arguments
1887
    if max_count is not None and len(args) > max_count:
1888
      ToStderr("Error: Command %s expects only %d argument(s)",
1889
               cmd, max_count)
1890
      return False
1891

    
1892
  # Command with some required arguments
1893
  if min_count is not None and len(args) < min_count:
1894
    ToStderr("Error: Command %s expects at least %d argument(s)",
1895
             cmd, min_count)
1896
    return False
1897

    
1898
  return True
1899

    
1900

    
1901
def SplitNodeOption(value):
1902
  """Splits the value of a --node option.
1903

1904
  """
1905
  if value and ":" in value:
1906
    return value.split(":", 1)
1907
  else:
1908
    return (value, None)
1909

    
1910

    
1911
def CalculateOSNames(os_name, os_variants):
1912
  """Calculates all the names an OS can be called, according to its variants.
1913

1914
  @type os_name: string
1915
  @param os_name: base name of the os
1916
  @type os_variants: list or None
1917
  @param os_variants: list of supported variants
1918
  @rtype: list
1919
  @return: list of valid names
1920

1921
  """
1922
  if os_variants:
1923
    return ["%s+%s" % (os_name, v) for v in os_variants]
1924
  else:
1925
    return [os_name]
1926

    
1927

    
1928
def ParseFields(selected, default):
1929
  """Parses the values of "--field"-like options.
1930

1931
  @type selected: string or None
1932
  @param selected: User-selected options
1933
  @type default: list
1934
  @param default: Default fields
1935

1936
  """
1937
  if selected is None:
1938
    return default
1939

    
1940
  if selected.startswith("+"):
1941
    return default + selected[1:].split(",")
1942

    
1943
  return selected.split(",")
1944

    
1945

    
1946
UsesRPC = rpc.RunWithRPC
1947

    
1948

    
1949
def AskUser(text, choices=None):
1950
  """Ask the user a question.
1951

1952
  @param text: the question to ask
1953

1954
  @param choices: list with elements tuples (input_char, return_value,
1955
      description); if not given, it will default to: [('y', True,
1956
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1957
      note that the '?' char is reserved for help
1958

1959
  @return: one of the return values from the choices list; if input is
1960
      not possible (i.e. not running with a tty, we return the last
1961
      entry from the list
1962

1963
  """
1964
  if choices is None:
1965
    choices = [("y", True, "Perform the operation"),
1966
               ("n", False, "Do not perform the operation")]
1967
  if not choices or not isinstance(choices, list):
1968
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1969
  for entry in choices:
1970
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1971
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1972

    
1973
  answer = choices[-1][1]
1974
  new_text = []
1975
  for line in text.splitlines():
1976
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1977
  text = "\n".join(new_text)
1978
  try:
1979
    f = file("/dev/tty", "a+")
1980
  except IOError:
1981
    return answer
1982
  try:
1983
    chars = [entry[0] for entry in choices]
1984
    chars[-1] = "[%s]" % chars[-1]
1985
    chars.append("?")
1986
    maps = dict([(entry[0], entry[1]) for entry in choices])
1987
    while True:
1988
      f.write(text)
1989
      f.write("\n")
1990
      f.write("/".join(chars))
1991
      f.write(": ")
1992
      line = f.readline(2).strip().lower()
1993
      if line in maps:
1994
        answer = maps[line]
1995
        break
1996
      elif line == "?":
1997
        for entry in choices:
1998
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1999
        f.write("\n")
2000
        continue
2001
  finally:
2002
    f.close()
2003
  return answer
2004

    
2005

    
2006
class JobSubmittedException(Exception):
2007
  """Job was submitted, client should exit.
2008

2009
  This exception has one argument, the ID of the job that was
2010
  submitted. The handler should print this ID.
2011

2012
  This is not an error, just a structured way to exit from clients.
2013

2014
  """
2015

    
2016

    
2017
def SendJob(ops, cl=None):
2018
  """Function to submit an opcode without waiting for the results.
2019

2020
  @type ops: list
2021
  @param ops: list of opcodes
2022
  @type cl: luxi.Client
2023
  @param cl: the luxi client to use for communicating with the master;
2024
             if None, a new client will be created
2025

2026
  """
2027
  if cl is None:
2028
    cl = GetClient()
2029

    
2030
  job_id = cl.SubmitJob(ops)
2031

    
2032
  return job_id
2033

    
2034

    
2035
def GenericPollJob(job_id, cbs, report_cbs):
2036
  """Generic job-polling function.
2037

2038
  @type job_id: number
2039
  @param job_id: Job ID
2040
  @type cbs: Instance of L{JobPollCbBase}
2041
  @param cbs: Data callbacks
2042
  @type report_cbs: Instance of L{JobPollReportCbBase}
2043
  @param report_cbs: Reporting callbacks
2044

2045
  """
2046
  prev_job_info = None
2047
  prev_logmsg_serial = None
2048

    
2049
  status = None
2050

    
2051
  while True:
2052
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2053
                                      prev_logmsg_serial)
2054
    if not result:
2055
      # job not found, go away!
2056
      raise errors.JobLost("Job with id %s lost" % job_id)
2057

    
2058
    if result == constants.JOB_NOTCHANGED:
2059
      report_cbs.ReportNotChanged(job_id, status)
2060

    
2061
      # Wait again
2062
      continue
2063

    
2064
    # Split result, a tuple of (field values, log entries)
2065
    (job_info, log_entries) = result
2066
    (status, ) = job_info
2067

    
2068
    if log_entries:
2069
      for log_entry in log_entries:
2070
        (serial, timestamp, log_type, message) = log_entry
2071
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2072
                                    log_type, message)
2073
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2074

    
2075
    # TODO: Handle canceled and archived jobs
2076
    elif status in (constants.JOB_STATUS_SUCCESS,
2077
                    constants.JOB_STATUS_ERROR,
2078
                    constants.JOB_STATUS_CANCELING,
2079
                    constants.JOB_STATUS_CANCELED):
2080
      break
2081

    
2082
    prev_job_info = job_info
2083

    
2084
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2085
  if not jobs:
2086
    raise errors.JobLost("Job with id %s lost" % job_id)
2087

    
2088
  status, opstatus, result = jobs[0]
2089

    
2090
  if status == constants.JOB_STATUS_SUCCESS:
2091
    return result
2092

    
2093
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2094
    raise errors.OpExecError("Job was canceled")
2095

    
2096
  has_ok = False
2097
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2098
    if status == constants.OP_STATUS_SUCCESS:
2099
      has_ok = True
2100
    elif status == constants.OP_STATUS_ERROR:
2101
      errors.MaybeRaise(msg)
2102

    
2103
      if has_ok:
2104
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2105
                                 (idx, msg))
2106

    
2107
      raise errors.OpExecError(str(msg))
2108

    
2109
  # default failure mode
2110
  raise errors.OpExecError(result)
2111

    
2112

    
2113
class JobPollCbBase:
2114
  """Base class for L{GenericPollJob} callbacks.
2115

2116
  """
2117
  def __init__(self):
2118
    """Initializes this class.
2119

2120
    """
2121

    
2122
  def WaitForJobChangeOnce(self, job_id, fields,
2123
                           prev_job_info, prev_log_serial):
2124
    """Waits for changes on a job.
2125

2126
    """
2127
    raise NotImplementedError()
2128

    
2129
  def QueryJobs(self, job_ids, fields):
2130
    """Returns the selected fields for the selected job IDs.
2131

2132
    @type job_ids: list of numbers
2133
    @param job_ids: Job IDs
2134
    @type fields: list of strings
2135
    @param fields: Fields
2136

2137
    """
2138
    raise NotImplementedError()
2139

    
2140

    
2141
class JobPollReportCbBase:
2142
  """Base class for L{GenericPollJob} reporting callbacks.
2143

2144
  """
2145
  def __init__(self):
2146
    """Initializes this class.
2147

2148
    """
2149

    
2150
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2151
    """Handles a log message.
2152

2153
    """
2154
    raise NotImplementedError()
2155

    
2156
  def ReportNotChanged(self, job_id, status):
2157
    """Called for if a job hasn't changed in a while.
2158

2159
    @type job_id: number
2160
    @param job_id: Job ID
2161
    @type status: string or None
2162
    @param status: Job status if available
2163

2164
    """
2165
    raise NotImplementedError()
2166

    
2167

    
2168
class _LuxiJobPollCb(JobPollCbBase):
2169
  def __init__(self, cl):
2170
    """Initializes this class.
2171

2172
    """
2173
    JobPollCbBase.__init__(self)
2174
    self.cl = cl
2175

    
2176
  def WaitForJobChangeOnce(self, job_id, fields,
2177
                           prev_job_info, prev_log_serial):
2178
    """Waits for changes on a job.
2179

2180
    """
2181
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2182
                                        prev_job_info, prev_log_serial)
2183

    
2184
  def QueryJobs(self, job_ids, fields):
2185
    """Returns the selected fields for the selected job IDs.
2186

2187
    """
2188
    return self.cl.QueryJobs(job_ids, fields)
2189

    
2190

    
2191
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2192
  def __init__(self, feedback_fn):
2193
    """Initializes this class.
2194

2195
    """
2196
    JobPollReportCbBase.__init__(self)
2197

    
2198
    self.feedback_fn = feedback_fn
2199

    
2200
    assert callable(feedback_fn)
2201

    
2202
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2203
    """Handles a log message.
2204

2205
    """
2206
    self.feedback_fn((timestamp, log_type, log_msg))
2207

    
2208
  def ReportNotChanged(self, job_id, status):
2209
    """Called if a job hasn't changed in a while.
2210

2211
    """
2212
    # Ignore
2213

    
2214

    
2215
class StdioJobPollReportCb(JobPollReportCbBase):
2216
  def __init__(self):
2217
    """Initializes this class.
2218

2219
    """
2220
    JobPollReportCbBase.__init__(self)
2221

    
2222
    self.notified_queued = False
2223
    self.notified_waitlock = False
2224

    
2225
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2226
    """Handles a log message.
2227

2228
    """
2229
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2230
             FormatLogMessage(log_type, log_msg))
2231

    
2232
  def ReportNotChanged(self, job_id, status):
2233
    """Called if a job hasn't changed in a while.
2234

2235
    """
2236
    if status is None:
2237
      return
2238

    
2239
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2240
      ToStderr("Job %s is waiting in queue", job_id)
2241
      self.notified_queued = True
2242

    
2243
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2244
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2245
      self.notified_waitlock = True
2246

    
2247

    
2248
def FormatLogMessage(log_type, log_msg):
2249
  """Formats a job message according to its type.
2250

2251
  """
2252
  if log_type != constants.ELOG_MESSAGE:
2253
    log_msg = str(log_msg)
2254

    
2255
  return utils.SafeEncode(log_msg)
2256

    
2257

    
2258
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2259
  """Function to poll for the result of a job.
2260

2261
  @type job_id: job identified
2262
  @param job_id: the job to poll for results
2263
  @type cl: luxi.Client
2264
  @param cl: the luxi client to use for communicating with the master;
2265
             if None, a new client will be created
2266

2267
  """
2268
  if cl is None:
2269
    cl = GetClient()
2270

    
2271
  if reporter is None:
2272
    if feedback_fn:
2273
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2274
    else:
2275
      reporter = StdioJobPollReportCb()
2276
  elif feedback_fn:
2277
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2278

    
2279
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2280

    
2281

    
2282
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2283
  """Legacy function to submit an opcode.
2284

2285
  This is just a simple wrapper over the construction of the processor
2286
  instance. It should be extended to better handle feedback and
2287
  interaction functions.
2288

2289
  """
2290
  if cl is None:
2291
    cl = GetClient()
2292

    
2293
  SetGenericOpcodeOpts([op], opts)
2294

    
2295
  job_id = SendJob([op], cl=cl)
2296
  if hasattr(opts, "print_jobid") and opts.print_jobid:
2297
    ToStdout("%d" % job_id)
2298

    
2299
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2300
                       reporter=reporter)
2301

    
2302
  return op_results[0]
2303

    
2304

    
2305
def SubmitOpCodeToDrainedQueue(op):
2306
  """Forcefully insert a job in the queue, even if it is drained.
2307

2308
  """
2309
  cl = GetClient()
2310
  job_id = cl.SubmitJobToDrainedQueue([op])
2311
  op_results = PollJob(job_id, cl=cl)
2312
  return op_results[0]
2313

    
2314

    
2315
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2316
  """Wrapper around SubmitOpCode or SendJob.
2317

2318
  This function will decide, based on the 'opts' parameter, whether to
2319
  submit and wait for the result of the opcode (and return it), or
2320
  whether to just send the job and print its identifier. It is used in
2321
  order to simplify the implementation of the '--submit' option.
2322

2323
  It will also process the opcodes if we're sending the via SendJob
2324
  (otherwise SubmitOpCode does it).
2325

2326
  """
2327
  if opts and opts.submit_only:
2328
    job = [op]
2329
    SetGenericOpcodeOpts(job, opts)
2330
    job_id = SendJob(job, cl=cl)
2331
    if opts.print_jobid:
2332
      ToStdout("%d" % job_id)
2333
    raise JobSubmittedException(job_id)
2334
  else:
2335
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2336

    
2337

    
2338
def _InitReasonTrail(op, opts):
2339
  """Builds the first part of the reason trail
2340

2341
  Builds the initial part of the reason trail, adding the user provided reason
2342
  (if it exists) and the name of the command starting the operation.
2343

2344
  @param op: the opcode the reason trail will be added to
2345
  @param opts: the command line options selected by the user
2346

2347
  """
2348
  assert len(sys.argv) >= 2
2349
  trail = []
2350

    
2351
  if opts.reason:
2352
    trail.append((constants.OPCODE_REASON_SRC_USER,
2353
                  opts.reason,
2354
                  utils.EpochNano()))
2355

    
2356
  binary = os.path.basename(sys.argv[0])
2357
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2358
  command = sys.argv[1]
2359
  trail.append((source, command, utils.EpochNano()))
2360
  op.reason = trail
2361

    
2362

    
2363
def SetGenericOpcodeOpts(opcode_list, options):
2364
  """Processor for generic options.
2365

2366
  This function updates the given opcodes based on generic command
2367
  line options (like debug, dry-run, etc.).
2368

2369
  @param opcode_list: list of opcodes
2370
  @param options: command line options or None
2371
  @return: None (in-place modification)
2372

2373
  """
2374
  if not options:
2375
    return
2376
  for op in opcode_list:
2377
    op.debug_level = options.debug
2378
    if hasattr(options, "dry_run"):
2379
      op.dry_run = options.dry_run
2380
    if getattr(options, "priority", None) is not None:
2381
      op.priority = options.priority
2382
    _InitReasonTrail(op, options)
2383

    
2384

    
2385
def FormatError(err):
2386
  """Return a formatted error message for a given error.
2387

2388
  This function takes an exception instance and returns a tuple
2389
  consisting of two values: first, the recommended exit code, and
2390
  second, a string describing the error message (not
2391
  newline-terminated).
2392

2393
  """
2394
  retcode = 1
2395
  obuf = StringIO()
2396
  msg = str(err)
2397
  if isinstance(err, errors.ConfigurationError):
2398
    txt = "Corrupt configuration file: %s" % msg
2399
    logging.error(txt)
2400
    obuf.write(txt + "\n")
2401
    obuf.write("Aborting.")
2402
    retcode = 2
2403
  elif isinstance(err, errors.HooksAbort):
2404
    obuf.write("Failure: hooks execution failed:\n")
2405
    for node, script, out in err.args[0]:
2406
      if out:
2407
        obuf.write("  node: %s, script: %s, output: %s\n" %
2408
                   (node, script, out))
2409
      else:
2410
        obuf.write("  node: %s, script: %s (no output)\n" %
2411
                   (node, script))
2412
  elif isinstance(err, errors.HooksFailure):
2413
    obuf.write("Failure: hooks general failure: %s" % msg)
2414
  elif isinstance(err, errors.ResolverError):
2415
    this_host = netutils.Hostname.GetSysName()
2416
    if err.args[0] == this_host:
2417
      msg = "Failure: can't resolve my own hostname ('%s')"
2418
    else:
2419
      msg = "Failure: can't resolve hostname '%s'"
2420
    obuf.write(msg % err.args[0])
2421
  elif isinstance(err, errors.OpPrereqError):
2422
    if len(err.args) == 2:
2423
      obuf.write("Failure: prerequisites not met for this"
2424
                 " operation:\nerror type: %s, error details:\n%s" %
2425
                 (err.args[1], err.args[0]))
2426
    else:
2427
      obuf.write("Failure: prerequisites not met for this"
2428
                 " operation:\n%s" % msg)
2429
  elif isinstance(err, errors.OpExecError):
2430
    obuf.write("Failure: command execution error:\n%s" % msg)
2431
  elif isinstance(err, errors.TagError):
2432
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2433
  elif isinstance(err, errors.JobQueueDrainError):
2434
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2435
               " accept new requests\n")
2436
  elif isinstance(err, errors.JobQueueFull):
2437
    obuf.write("Failure: the job queue is full and doesn't accept new"
2438
               " job submissions until old jobs are archived\n")
2439
  elif isinstance(err, errors.TypeEnforcementError):
2440
    obuf.write("Parameter Error: %s" % msg)
2441
  elif isinstance(err, errors.ParameterError):
2442
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2443
  elif isinstance(err, luxi.NoMasterError):
2444
    if err.args[0] == pathutils.MASTER_SOCKET:
2445
      daemon = "the master daemon"
2446
    elif err.args[0] == pathutils.QUERY_SOCKET:
2447
      daemon = "the config daemon"
2448
    else:
2449
      daemon = "socket '%s'" % str(err.args[0])
2450
    obuf.write("Cannot communicate with %s.\nIs the process running"
2451
               " and listening for connections?" % daemon)
2452
  elif isinstance(err, luxi.TimeoutError):
2453
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2454
               " been submitted and will continue to run even if the call"
2455
               " timed out. Useful commands in this situation are \"gnt-job"
2456
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2457
    obuf.write(msg)
2458
  elif isinstance(err, luxi.PermissionError):
2459
    obuf.write("It seems you don't have permissions to connect to the"
2460
               " master daemon.\nPlease retry as a different user.")
2461
  elif isinstance(err, luxi.ProtocolError):
2462
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2463
               "%s" % msg)
2464
  elif isinstance(err, errors.JobLost):
2465
    obuf.write("Error checking job status: %s" % msg)
2466
  elif isinstance(err, errors.QueryFilterParseError):
2467
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2468
    obuf.write("\n".join(err.GetDetails()))
2469
  elif isinstance(err, errors.GenericError):
2470
    obuf.write("Unhandled Ganeti error: %s" % msg)
2471
  elif isinstance(err, JobSubmittedException):
2472
    obuf.write("JobID: %s\n" % err.args[0])
2473
    retcode = 0
2474
  else:
2475
    obuf.write("Unhandled exception: %s" % msg)
2476
  return retcode, obuf.getvalue().rstrip("\n")
2477

    
2478

    
2479
def GenericMain(commands, override=None, aliases=None,
2480
                env_override=frozenset()):
2481
  """Generic main function for all the gnt-* commands.
2482

2483
  @param commands: a dictionary with a special structure, see the design doc
2484
                   for command line handling.
2485
  @param override: if not None, we expect a dictionary with keys that will
2486
                   override command line options; this can be used to pass
2487
                   options from the scripts to generic functions
2488
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2489
  @param env_override: list of environment names which are allowed to submit
2490
                       default args for commands
2491

2492
  """
2493
  # save the program name and the entire command line for later logging
2494
  if sys.argv:
2495
    binary = os.path.basename(sys.argv[0])
2496
    if not binary:
2497
      binary = sys.argv[0]
2498

    
2499
    if len(sys.argv) >= 2:
2500
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2501
    else:
2502
      logname = binary
2503

    
2504
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2505
  else:
2506
    binary = "<unknown program>"
2507
    cmdline = "<unknown>"
2508

    
2509
  if aliases is None:
2510
    aliases = {}
2511

    
2512
  try:
2513
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2514
                                       env_override)
2515
  except _ShowVersion:
2516
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2517
             constants.RELEASE_VERSION)
2518
    return constants.EXIT_SUCCESS
2519
  except _ShowUsage, err:
2520
    for line in _FormatUsage(binary, commands):
2521
      ToStdout(line)
2522

    
2523
    if err.exit_error:
2524
      return constants.EXIT_FAILURE
2525
    else:
2526
      return constants.EXIT_SUCCESS
2527
  except errors.ParameterError, err:
2528
    result, err_msg = FormatError(err)
2529
    ToStderr(err_msg)
2530
    return 1
2531

    
2532
  if func is None: # parse error
2533
    return 1
2534

    
2535
  if override is not None:
2536
    for key, val in override.iteritems():
2537
      setattr(options, key, val)
2538

    
2539
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2540
                     stderr_logging=True)
2541

    
2542
  logging.info("Command line: %s", cmdline)
2543

    
2544
  try:
2545
    result = func(options, args)
2546
  except (errors.GenericError, luxi.ProtocolError,
2547
          JobSubmittedException), err:
2548
    result, err_msg = FormatError(err)
2549
    logging.exception("Error during command processing")
2550
    ToStderr(err_msg)
2551
  except KeyboardInterrupt:
2552
    result = constants.EXIT_FAILURE
2553
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2554
             " might have been submitted and"
2555
             " will continue to run in the background.")
2556
  except IOError, err:
2557
    if err.errno == errno.EPIPE:
2558
      # our terminal went away, we'll exit
2559
      sys.exit(constants.EXIT_FAILURE)
2560
    else:
2561
      raise
2562

    
2563
  return result
2564

    
2565

    
2566
def ParseNicOption(optvalue):
2567
  """Parses the value of the --net option(s).
2568

2569
  """
2570
  try:
2571
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2572
  except (TypeError, ValueError), err:
2573
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2574
                               errors.ECODE_INVAL)
2575

    
2576
  nics = [{}] * nic_max
2577
  for nidx, ndict in optvalue:
2578
    nidx = int(nidx)
2579

    
2580
    if not isinstance(ndict, dict):
2581
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2582
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2583

    
2584
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2585

    
2586
    nics[nidx] = ndict
2587

    
2588
  return nics
2589

    
2590

    
2591
def GenericInstanceCreate(mode, opts, args):
2592
  """Add an instance to the cluster via either creation or import.
2593

2594
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2595
  @param opts: the command line options selected by the user
2596
  @type args: list
2597
  @param args: should contain only one element, the new instance name
2598
  @rtype: int
2599
  @return: the desired exit code
2600

2601
  """
2602
  instance = args[0]
2603

    
2604
  (pnode, snode) = SplitNodeOption(opts.node)
2605

    
2606
  hypervisor = None
2607
  hvparams = {}
2608
  if opts.hypervisor:
2609
    hypervisor, hvparams = opts.hypervisor
2610

    
2611
  if opts.nics:
2612
    nics = ParseNicOption(opts.nics)
2613
  elif opts.no_nics:
2614
    # no nics
2615
    nics = []
2616
  elif mode == constants.INSTANCE_CREATE:
2617
    # default of one nic, all auto
2618
    nics = [{}]
2619
  else:
2620
    # mode == import
2621
    nics = []
2622

    
2623
  if opts.disk_template == constants.DT_DISKLESS:
2624
    if opts.disks or opts.sd_size is not None:
2625
      raise errors.OpPrereqError("Diskless instance but disk"
2626
                                 " information passed", errors.ECODE_INVAL)
2627
    disks = []
2628
  else:
2629
    if (not opts.disks and not opts.sd_size
2630
        and mode == constants.INSTANCE_CREATE):
2631
      raise errors.OpPrereqError("No disk information specified",
2632
                                 errors.ECODE_INVAL)
2633
    if opts.disks and opts.sd_size is not None:
2634
      raise errors.OpPrereqError("Please use either the '--disk' or"
2635
                                 " '-s' option", errors.ECODE_INVAL)
2636
    if opts.sd_size is not None:
2637
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2638

    
2639
    if opts.disks:
2640
      try:
2641
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2642
      except ValueError, err:
2643
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2644
                                   errors.ECODE_INVAL)
2645
      disks = [{}] * disk_max
2646
    else:
2647
      disks = []
2648
    for didx, ddict in opts.disks:
2649
      didx = int(didx)
2650
      if not isinstance(ddict, dict):
2651
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2652
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2653
      elif constants.IDISK_SIZE in ddict:
2654
        if constants.IDISK_ADOPT in ddict:
2655
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2656
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2657
        try:
2658
          ddict[constants.IDISK_SIZE] = \
2659
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2660
        except ValueError, err:
2661
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2662
                                     (didx, err), errors.ECODE_INVAL)
2663
      elif constants.IDISK_ADOPT in ddict:
2664
        if constants.IDISK_SPINDLES in ddict:
2665
          raise errors.OpPrereqError("spindles is not a valid option when"
2666
                                     " adopting a disk", errors.ECODE_INVAL)
2667
        if mode == constants.INSTANCE_IMPORT:
2668
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2669
                                     " import", errors.ECODE_INVAL)
2670
        ddict[constants.IDISK_SIZE] = 0
2671
      else:
2672
        raise errors.OpPrereqError("Missing size or adoption source for"
2673
                                   " disk %d" % didx, errors.ECODE_INVAL)
2674
      if constants.IDISK_SPINDLES in ddict:
2675
        ddict[constants.IDISK_SPINDLES] = int(ddict[constants.IDISK_SPINDLES])
2676

    
2677
      disks[didx] = ddict
2678

    
2679
  if opts.tags is not None:
2680
    tags = opts.tags.split(",")
2681
  else:
2682
    tags = []
2683

    
2684
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2685
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2686

    
2687
  if mode == constants.INSTANCE_CREATE:
2688
    start = opts.start
2689
    os_type = opts.os
2690
    force_variant = opts.force_variant
2691
    src_node = None
2692
    src_path = None
2693
    no_install = opts.no_install
2694
    identify_defaults = False
2695
    compress = constants.IEC_NONE
2696
  elif mode == constants.INSTANCE_IMPORT:
2697
    start = False
2698
    os_type = None
2699
    force_variant = False
2700
    src_node = opts.src_node
2701
    src_path = opts.src_dir
2702
    no_install = None
2703
    identify_defaults = opts.identify_defaults
2704
    compress = opts.compress
2705
  else:
2706
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2707

    
2708
  op = opcodes.OpInstanceCreate(instance_name=instance,
2709
                                disks=disks,
2710
                                disk_template=opts.disk_template,
2711
                                nics=nics,
2712
                                conflicts_check=opts.conflicts_check,
2713
                                pnode=pnode, snode=snode,
2714
                                ip_check=opts.ip_check,
2715
                                name_check=opts.name_check,
2716
                                wait_for_sync=opts.wait_for_sync,
2717
                                file_storage_dir=opts.file_storage_dir,
2718
                                file_driver=opts.file_driver,
2719
                                iallocator=opts.iallocator,
2720
                                hypervisor=hypervisor,
2721
                                hvparams=hvparams,
2722
                                beparams=opts.beparams,
2723
                                osparams=opts.osparams,
2724
                                mode=mode,
2725
                                start=start,
2726
                                os_type=os_type,
2727
                                force_variant=force_variant,
2728
                                src_node=src_node,
2729
                                src_path=src_path,
2730
                                compress=compress,
2731
                                tags=tags,
2732
                                no_install=no_install,
2733
                                identify_defaults=identify_defaults,
2734
                                ignore_ipolicy=opts.ignore_ipolicy)
2735

    
2736
  SubmitOrSend(op, opts)
2737
  return 0
2738

    
2739

    
2740
class _RunWhileClusterStoppedHelper:
2741
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2742

2743
  """
2744
  def __init__(self, feedback_fn, cluster_name, master_node,
2745
               online_nodes, ssh_ports):
2746
    """Initializes this class.
2747

2748
    @type feedback_fn: callable
2749
    @param feedback_fn: Feedback function
2750
    @type cluster_name: string
2751
    @param cluster_name: Cluster name
2752
    @type master_node: string
2753
    @param master_node Master node name
2754
    @type online_nodes: list
2755
    @param online_nodes: List of names of online nodes
2756
    @type ssh_ports: list
2757
    @param ssh_ports: List of SSH ports of online nodes
2758

2759
    """
2760
    self.feedback_fn = feedback_fn
2761
    self.cluster_name = cluster_name
2762
    self.master_node = master_node
2763
    self.online_nodes = online_nodes
2764
    self.ssh_ports = dict(zip(online_nodes, ssh_ports))
2765

    
2766
    self.ssh = ssh.SshRunner(self.cluster_name)
2767

    
2768
    self.nonmaster_nodes = [name for name in online_nodes
2769
                            if name != master_node]
2770

    
2771
    assert self.master_node not in self.nonmaster_nodes
2772

    
2773
  def _RunCmd(self, node_name, cmd):
2774
    """Runs a command on the local or a remote machine.
2775

2776
    @type node_name: string
2777
    @param node_name: Machine name
2778
    @type cmd: list
2779
    @param cmd: Command
2780

2781
    """
2782
    if node_name is None or node_name == self.master_node:
2783
      # No need to use SSH
2784
      result = utils.RunCmd(cmd)
2785
    else:
2786
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2787
                            utils.ShellQuoteArgs(cmd),
2788
                            port=self.ssh_ports[node_name])
2789

    
2790
    if result.failed:
2791
      errmsg = ["Failed to run command %s" % result.cmd]
2792
      if node_name:
2793
        errmsg.append("on node %s" % node_name)
2794
      errmsg.append(": exitcode %s and error %s" %
2795
                    (result.exit_code, result.output))
2796
      raise errors.OpExecError(" ".join(errmsg))
2797

    
2798
  def Call(self, fn, *args):
2799
    """Call function while all daemons are stopped.
2800

2801
    @type fn: callable
2802
    @param fn: Function to be called
2803

2804
    """
2805
    # Pause watcher by acquiring an exclusive lock on watcher state file
2806
    self.feedback_fn("Blocking watcher")
2807
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2808
    try:
2809
      # TODO: Currently, this just blocks. There's no timeout.
2810
      # TODO: Should it be a shared lock?
2811
      watcher_block.Exclusive(blocking=True)
2812

    
2813
      # Stop master daemons, so that no new jobs can come in and all running
2814
      # ones are finished
2815
      self.feedback_fn("Stopping master daemons")
2816
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2817
      try:
2818
        # Stop daemons on all nodes
2819
        for node_name in self.online_nodes:
2820
          self.feedback_fn("Stopping daemons on %s" % node_name)
2821
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2822

    
2823
        # All daemons are shut down now
2824
        try:
2825
          return fn(self, *args)
2826
        except Exception, err:
2827
          _, errmsg = FormatError(err)
2828
          logging.exception("Caught exception")
2829
          self.feedback_fn(errmsg)
2830
          raise
2831
      finally:
2832
        # Start cluster again, master node last
2833
        for node_name in self.nonmaster_nodes + [self.master_node]:
2834
          self.feedback_fn("Starting daemons on %s" % node_name)
2835
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2836
    finally:
2837
      # Resume watcher
2838
      watcher_block.Close()
2839

    
2840

    
2841
def RunWhileClusterStopped(feedback_fn, fn, *args):
2842
  """Calls a function while all cluster daemons are stopped.
2843

2844
  @type feedback_fn: callable
2845
  @param feedback_fn: Feedback function
2846
  @type fn: callable
2847
  @param fn: Function to be called when daemons are stopped
2848

2849
  """
2850
  feedback_fn("Gathering cluster information")
2851

    
2852
  # This ensures we're running on the master daemon
2853
  cl = GetClient()
2854
  # Query client
2855
  qcl = GetClient(query=True)
2856

    
2857
  (cluster_name, master_node) = \
2858
    cl.QueryConfigValues(["cluster_name", "master_node"])
2859

    
2860
  online_nodes = GetOnlineNodes([], cl=qcl)
2861
  ssh_ports = GetNodesSshPorts(online_nodes, qcl)
2862

    
2863
  # Don't keep a reference to the client. The master daemon will go away.
2864
  del cl
2865
  del qcl
2866

    
2867
  assert master_node in online_nodes
2868

    
2869
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2870
                                       online_nodes, ssh_ports).Call(fn, *args)
2871

    
2872

    
2873
def GenerateTable(headers, fields, separator, data,
2874
                  numfields=None, unitfields=None,
2875
                  units=None):
2876
  """Prints a table with headers and different fields.
2877

2878
  @type headers: dict
2879
  @param headers: dictionary mapping field names to headers for
2880
      the table
2881
  @type fields: list
2882
  @param fields: the field names corresponding to each row in
2883
      the data field
2884
  @param separator: the separator to be used; if this is None,
2885
      the default 'smart' algorithm is used which computes optimal
2886
      field width, otherwise just the separator is used between
2887
      each field
2888
  @type data: list
2889
  @param data: a list of lists, each sublist being one row to be output
2890
  @type numfields: list
2891
  @param numfields: a list with the fields that hold numeric
2892
      values and thus should be right-aligned
2893
  @type unitfields: list
2894
  @param unitfields: a list with the fields that hold numeric
2895
      values that should be formatted with the units field
2896
  @type units: string or None
2897
  @param units: the units we should use for formatting, or None for
2898
      automatic choice (human-readable for non-separator usage, otherwise
2899
      megabytes); this is a one-letter string
2900

2901
  """
2902
  if units is None:
2903
    if separator:
2904
      units = "m"
2905
    else:
2906
      units = "h"
2907

    
2908
  if numfields is None:
2909
    numfields = []
2910
  if unitfields is None:
2911
    unitfields = []
2912

    
2913
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2914
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2915

    
2916
  format_fields = []
2917
  for field in fields:
2918
    if headers and field not in headers:
2919
      # TODO: handle better unknown fields (either revert to old
2920
      # style of raising exception, or deal more intelligently with
2921
      # variable fields)
2922
      headers[field] = field
2923
    if separator is not None:
2924
      format_fields.append("%s")
2925
    elif numfields.Matches(field):
2926
      format_fields.append("%*s")
2927
    else:
2928
      format_fields.append("%-*s")
2929

    
2930
  if separator is None:
2931
    mlens = [0 for name in fields]
2932
    format_str = " ".join(format_fields)
2933
  else:
2934
    format_str = separator.replace("%", "%%").join(format_fields)
2935

    
2936
  for row in data:
2937
    if row is None:
2938
      continue
2939
    for idx, val in enumerate(row):
2940
      if unitfields.Matches(fields[idx]):
2941
        try:
2942
          val = int(val)
2943
        except (TypeError, ValueError):
2944
          pass
2945
        else:
2946
          val = row[idx] = utils.FormatUnit(val, units)
2947
      val = row[idx] = str(val)
2948
      if separator is None:
2949
        mlens[idx] = max(mlens[idx], len(val))
2950

    
2951
  result = []
2952
  if headers:
2953
    args = []
2954
    for idx, name in enumerate(fields):
2955
      hdr = headers[name]
2956
      if separator is None:
2957
        mlens[idx] = max(mlens[idx], len(hdr))
2958
        args.append(mlens[idx])
2959
      args.append(hdr)
2960
    result.append(format_str % tuple(args))
2961

    
2962
  if separator is None:
2963
    assert len(mlens) == len(fields)
2964

    
2965
    if fields and not numfields.Matches(fields[-1]):
2966
      mlens[-1] = 0
2967

    
2968
  for line in data:
2969
    args = []
2970
    if line is None:
2971
      line = ["-" for _ in fields]
2972
    for idx in range(len(fields)):
2973
      if separator is None:
2974
        args.append(mlens[idx])
2975
      args.append(line[idx])
2976
    result.append(format_str % tuple(args))
2977

    
2978
  return result
2979

    
2980

    
2981
def _FormatBool(value):
2982
  """Formats a boolean value as a string.
2983

2984
  """
2985
  if value:
2986
    return "Y"
2987
  return "N"
2988

    
2989

    
2990
#: Default formatting for query results; (callback, align right)
2991
_DEFAULT_FORMAT_QUERY = {
2992
  constants.QFT_TEXT: (str, False),
2993
  constants.QFT_BOOL: (_FormatBool, False),
2994
  constants.QFT_NUMBER: (str, True),
2995
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2996
  constants.QFT_OTHER: (str, False),
2997
  constants.QFT_UNKNOWN: (str, False),
2998
  }
2999

    
3000

    
3001
def _GetColumnFormatter(fdef, override, unit):
3002
  """Returns formatting function for a field.
3003

3004
  @type fdef: L{objects.QueryFieldDefinition}
3005
  @type override: dict
3006
  @param override: Dictionary for overriding field formatting functions,
3007
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3008
  @type unit: string
3009
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
3010
  @rtype: tuple; (callable, bool)
3011
  @return: Returns the function to format a value (takes one parameter) and a
3012
    boolean for aligning the value on the right-hand side
3013

3014
  """
3015
  fmt = override.get(fdef.name, None)
3016
  if fmt is not None:
3017
    return fmt
3018

    
3019
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3020

    
3021
  if fdef.kind == constants.QFT_UNIT:
3022
    # Can't keep this information in the static dictionary
3023
    return (lambda value: utils.FormatUnit(value, unit), True)
3024

    
3025
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3026
  if fmt is not None:
3027
    return fmt
3028

    
3029
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3030

    
3031

    
3032
class _QueryColumnFormatter:
3033
  """Callable class for formatting fields of a query.
3034

3035
  """
3036
  def __init__(self, fn, status_fn, verbose):
3037
    """Initializes this class.
3038

3039
    @type fn: callable
3040
    @param fn: Formatting function
3041
    @type status_fn: callable
3042
    @param status_fn: Function to report fields' status
3043
    @type verbose: boolean
3044
    @param verbose: whether to use verbose field descriptions or not
3045

3046
    """
3047
    self._fn = fn
3048
    self._status_fn = status_fn
3049
    self._verbose = verbose
3050

    
3051
  def __call__(self, data):
3052
    """Returns a field's string representation.
3053

3054
    """
3055
    (status, value) = data
3056

    
3057
    # Report status
3058
    self._status_fn(status)
3059

    
3060
    if status == constants.RS_NORMAL:
3061
      return self._fn(value)
3062

    
3063
    assert value is None, \
3064
           "Found value %r for abnormal status %s" % (value, status)
3065

    
3066
    return FormatResultError(status, self._verbose)
3067

    
3068

    
3069
def FormatResultError(status, verbose):
3070
  """Formats result status other than L{constants.RS_NORMAL}.
3071

3072
  @param status: The result status
3073
  @type verbose: boolean
3074
  @param verbose: Whether to return the verbose text
3075
  @return: Text of result status
3076

3077
  """
3078
  assert status != constants.RS_NORMAL, \
3079
         "FormatResultError called with status equal to constants.RS_NORMAL"
3080
  try:
3081
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3082
  except KeyError:
3083
    raise NotImplementedError("Unknown status %s" % status)
3084
  else:
3085
    if verbose:
3086
      return verbose_text
3087
    return normal_text
3088

    
3089

    
3090
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3091
                      header=False, verbose=False):
3092
  """Formats data in L{objects.QueryResponse}.
3093

3094
  @type result: L{objects.QueryResponse}
3095
  @param result: result of query operation
3096
  @type unit: string
3097
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3098
    see L{utils.text.FormatUnit}
3099
  @type format_override: dict
3100
  @param format_override: Dictionary for overriding field formatting functions,
3101
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3102
  @type separator: string or None
3103
  @param separator: String used to separate fields
3104
  @type header: bool
3105
  @param header: Whether to output header row
3106
  @type verbose: boolean
3107
  @param verbose: whether to use verbose field descriptions or not
3108

3109
  """
3110
  if unit is None:
3111
    if separator:
3112
      unit = "m"
3113
    else:
3114
      unit = "h"
3115

    
3116
  if format_override is None:
3117
    format_override = {}
3118

    
3119
  stats = dict.fromkeys(constants.RS_ALL, 0)
3120

    
3121
  def _RecordStatus(status):
3122
    if status in stats:
3123
      stats[status] += 1
3124

    
3125
  columns = []
3126
  for fdef in result.fields:
3127
    assert fdef.title and fdef.name
3128
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3129
    columns.append(TableColumn(fdef.title,
3130
                               _QueryColumnFormatter(fn, _RecordStatus,
3131
                                                     verbose),
3132
                               align_right))
3133

    
3134
  table = FormatTable(result.data, columns, header, separator)
3135

    
3136
  # Collect statistics
3137
  assert len(stats) == len(constants.RS_ALL)
3138
  assert compat.all(count >= 0 for count in stats.values())
3139

    
3140
  # Determine overall status. If there was no data, unknown fields must be
3141
  # detected via the field definitions.
3142
  if (stats[constants.RS_UNKNOWN] or
3143
      (not result.data and _GetUnknownFields(result.fields))):
3144
    status = QR_UNKNOWN
3145
  elif compat.any(count > 0 for key, count in stats.items()
3146
                  if key != constants.RS_NORMAL):
3147
    status = QR_INCOMPLETE
3148
  else:
3149
    status = QR_NORMAL
3150

    
3151
  return (status, table)
3152

    
3153

    
3154
def _GetUnknownFields(fdefs):
3155
  """Returns list of unknown fields included in C{fdefs}.
3156

3157
  @type fdefs: list of L{objects.QueryFieldDefinition}
3158

3159
  """
3160
  return [fdef for fdef in fdefs
3161
          if fdef.kind == constants.QFT_UNKNOWN]
3162

    
3163

    
3164
def _WarnUnknownFields(fdefs):
3165
  """Prints a warning to stderr if a query included unknown fields.
3166

3167
  @type fdefs: list of L{objects.QueryFieldDefinition}
3168

3169
  """
3170
  unknown = _GetUnknownFields(fdefs)
3171
  if unknown:
3172
    ToStderr("Warning: Queried for unknown fields %s",
3173
             utils.CommaJoin(fdef.name for fdef in unknown))
3174
    return True
3175

    
3176
  return False
3177

    
3178

    
3179
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3180
                format_override=None, verbose=False, force_filter=False,
3181
                namefield=None, qfilter=None, isnumeric=False):
3182
  """Generic implementation for listing all items of a resource.
3183

3184
  @param resource: One of L{constants.QR_VIA_LUXI}
3185
  @type fields: list of strings
3186
  @param fields: List of fields to query for
3187
  @type names: list of strings
3188
  @param names: Names of items to query for
3189
  @type unit: string or None
3190
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3191
    None for automatic choice (human-readable for non-separator usage,
3192
    otherwise megabytes); this is a one-letter string
3193
  @type separator: string or None
3194
  @param separator: String used to separate fields
3195
  @type header: bool
3196
  @param header: Whether to show header row
3197
  @type force_filter: bool
3198
  @param force_filter: Whether to always treat names as filter
3199
  @type format_override: dict
3200
  @param format_override: Dictionary for overriding field formatting functions,
3201
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3202
  @type verbose: boolean
3203
  @param verbose: whether to use verbose field descriptions or not
3204
  @type namefield: string
3205
  @param namefield: Name of field to use for simple filters (see
3206
    L{qlang.MakeFilter} for details)
3207
  @type qfilter: list or None
3208
  @param qfilter: Query filter (in addition to names)
3209
  @param isnumeric: bool
3210
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3211
    any simple filters built by namefield should use integer values to
3212
    reflect that
3213

3214
  """
3215
  if not names:
3216
    names = None
3217

    
3218
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3219
                                isnumeric=isnumeric)
3220

    
3221
  if qfilter is None:
3222
    qfilter = namefilter
3223
  elif namefilter is not None:
3224
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3225

    
3226
  if cl is None:
3227
    cl = GetClient()
3228

    
3229
  response = cl.Query(resource, fields, qfilter)
3230

    
3231
  found_unknown = _WarnUnknownFields(response.fields)
3232

    
3233
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3234
                                     header=header,
3235
                                     format_override=format_override,
3236
                                     verbose=verbose)
3237

    
3238
  for line in data:
3239
    ToStdout(line)
3240

    
3241
  assert ((found_unknown and status == QR_UNKNOWN) or
3242
          (not found_unknown and status != QR_UNKNOWN))
3243

    
3244
  if status == QR_UNKNOWN:
3245
    return constants.EXIT_UNKNOWN_FIELD
3246

    
3247
  # TODO: Should the list command fail if not all data could be collected?
3248
  return constants.EXIT_SUCCESS
3249

    
3250

    
3251
def _FieldDescValues(fdef):
3252
  """Helper function for L{GenericListFields} to get query field description.
3253

3254
  @type fdef: L{objects.QueryFieldDefinition}
3255
  @rtype: list
3256

3257
  """
3258
  return [
3259
    fdef.name,
3260
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3261
    fdef.title,
3262
    fdef.doc,
3263
    ]
3264

    
3265

    
3266
def GenericListFields(resource, fields, separator, header, cl=None):
3267
  """Generic implementation for listing fields for a resource.
3268

3269
  @param resource: One of L{constants.QR_VIA_LUXI}
3270
  @type fields: list of strings
3271
  @param fields: List of fields to query for
3272
  @type separator: string or None
3273
  @param separator: String used to separate fields
3274
  @type header: bool
3275
  @param header: Whether to show header row
3276

3277
  """
3278
  if cl is None:
3279
    cl = GetClient()
3280

    
3281
  if not fields:
3282
    fields = None
3283

    
3284
  response = cl.QueryFields(resource, fields)
3285

    
3286
  found_unknown = _WarnUnknownFields(response.fields)
3287

    
3288
  columns = [
3289
    TableColumn("Name", str, False),
3290
    TableColumn("Type", str, False),
3291
    TableColumn("Title", str, False),
3292
    TableColumn("Description", str, False),
3293
    ]
3294

    
3295
  rows = map(_FieldDescValues, response.fields)
3296

    
3297
  for line in FormatTable(rows, columns, header, separator):
3298
    ToStdout(line)
3299

    
3300
  if found_unknown:
3301
    return constants.EXIT_UNKNOWN_FIELD
3302

    
3303
  return constants.EXIT_SUCCESS
3304

    
3305

    
3306
class TableColumn:
3307
  """Describes a column for L{FormatTable}.
3308

3309
  """
3310
  def __init__(self, title, fn, align_right):
3311
    """Initializes this class.
3312

3313
    @type title: string
3314
    @param title: Column title
3315
    @type fn: callable
3316
    @param fn: Formatting function
3317
    @type align_right: bool
3318
    @param align_right: Whether to align values on the right-hand side
3319

3320
    """
3321
    self.title = title
3322
    self.format = fn
3323
    self.align_right = align_right
3324

    
3325

    
3326
def _GetColFormatString(width, align_right):
3327
  """Returns the format string for a field.
3328

3329
  """
3330
  if align_right:
3331
    sign = ""
3332
  else:
3333
    sign = "-"
3334

    
3335
  return "%%%s%ss" % (sign, width)
3336

    
3337

    
3338
def FormatTable(rows, columns, header, separator):
3339
  """Formats data as a table.
3340

3341
  @type rows: list of lists
3342
  @param rows: Row data, one list per row
3343
  @type columns: list of L{TableColumn}
3344
  @param columns: Column descriptions
3345
  @type header: bool
3346
  @param header: Whether to show header row
3347
  @type separator: string or None
3348
  @param separator: String used to separate columns
3349

3350
  """
3351
  if header:
3352
    data = [[col.title for col in columns]]
3353
    colwidth = [len(col.title) for col in columns]
3354
  else:
3355
    data = []
3356
    colwidth = [0 for _ in columns]
3357

    
3358
  # Format row data
3359
  for row in rows:
3360
    assert len(row) == len(columns)
3361

    
3362
    formatted = [col.format(value) for value, col in zip(row, columns)]
3363

    
3364
    if separator is None:
3365
      # Update column widths
3366
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3367
        # Modifying a list's items while iterating is fine
3368
        colwidth[idx] = max(oldwidth, len(value))
3369

    
3370
    data.append(formatted)
3371

    
3372
  if separator is not None:
3373
    # Return early if a separator is used
3374
    return [separator.join(row) for row in data]
3375

    
3376
  if columns and not columns[-1].align_right:
3377
    # Avoid unnecessary spaces at end of line
3378
    colwidth[-1] = 0
3379

    
3380
  # Build format string
3381
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3382
                  for col, width in zip(columns, colwidth)])
3383

    
3384
  return [fmt % tuple(row) for row in data]
3385

    
3386

    
3387
def FormatTimestamp(ts):
3388
  """Formats a given timestamp.
3389

3390
  @type ts: timestamp
3391
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3392

3393
  @rtype: string
3394
  @return: a string with the formatted timestamp
3395

3396
  """
3397
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3398
    return "?"
3399

    
3400
  (sec, usecs) = ts
3401
  return utils.FormatTime(sec, usecs=usecs)
3402

    
3403

    
3404
def ParseTimespec(value):
3405
  """Parse a time specification.
3406

3407
  The following suffixed will be recognized:
3408

3409
    - s: seconds
3410
    - m: minutes
3411
    - h: hours
3412
    - d: day
3413
    - w: weeks
3414

3415
  Without any suffix, the value will be taken to be in seconds.
3416

3417
  """
3418
  value = str(value)
3419
  if not value:
3420
    raise errors.OpPrereqError("Empty time specification passed",
3421
                               errors.ECODE_INVAL)
3422
  suffix_map = {
3423
    "s": 1,
3424
    "m": 60,
3425
    "h": 3600,
3426
    "d": 86400,
3427
    "w": 604800,
3428
    }
3429
  if value[-1] not in suffix_map:
3430
    try:
3431
      value = int(value)
3432
    except (TypeError, ValueError):
3433
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3434
                                 errors.ECODE_INVAL)
3435
  else:
3436
    multiplier = suffix_map[value[-1]]
3437
    value = value[:-1]
3438
    if not value: # no data left after stripping the suffix
3439
      raise errors.OpPrereqError("Invalid time specification (only"
3440
                                 " suffix passed)", errors.ECODE_INVAL)
3441
    try:
3442
      value = int(value) * multiplier
3443
    except (TypeError, ValueError):
3444
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3445
                                 errors.ECODE_INVAL)
3446
  return value
3447

    
3448

    
3449
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3450
                   filter_master=False, nodegroup=None):
3451
  """Returns the names of online nodes.
3452

3453
  This function will also log a warning on stderr with the names of
3454
  the online nodes.
3455

3456
  @param nodes: if not empty, use only this subset of nodes (minus the
3457
      offline ones)
3458
  @param cl: if not None, luxi client to use
3459
  @type nowarn: boolean
3460
  @param nowarn: by default, this function will output a note with the
3461
      offline nodes that are skipped; if this parameter is True the
3462
      note is not displayed
3463
  @type secondary_ips: boolean
3464
  @param secondary_ips: if True, return the secondary IPs instead of the
3465
      names, useful for doing network traffic over the replication interface
3466
      (if any)
3467
  @type filter_master: boolean
3468
  @param filter_master: if True, do not return the master node in the list
3469
      (useful in coordination with secondary_ips where we cannot check our
3470
      node name against the list)
3471
  @type nodegroup: string
3472
  @param nodegroup: If set, only return nodes in this node group
3473

3474
  """
3475
  if cl is None:
3476
    cl = GetClient(query=True)
3477

    
3478
  qfilter = []
3479

    
3480
  if nodes:
3481
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3482

    
3483
  if nodegroup is not None:
3484
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3485
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3486

    
3487
  if filter_master:
3488
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3489

    
3490
  if qfilter:
3491
    if len(qfilter) > 1:
3492
      final_filter = [qlang.OP_AND] + qfilter
3493
    else:
3494
      assert len(qfilter) == 1
3495
      final_filter = qfilter[0]
3496
  else:
3497
    final_filter = None
3498

    
3499
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3500

    
3501
  def _IsOffline(row):
3502
    (_, (_, offline), _) = row
3503
    return offline
3504

    
3505
  def _GetName(row):
3506
    ((_, name), _, _) = row
3507
    return name
3508

    
3509
  def _GetSip(row):
3510
    (_, _, (_, sip)) = row
3511
    return sip
3512

    
3513
  (offline, online) = compat.partition(result.data, _IsOffline)
3514

    
3515
  if offline and not nowarn:
3516
    ToStderr("Note: skipping offline node(s): %s" %
3517
             utils.CommaJoin(map(_GetName, offline)))
3518

    
3519
  if secondary_ips:
3520
    fn = _GetSip
3521
  else:
3522
    fn = _GetName
3523

    
3524
  return map(fn, online)
3525

    
3526

    
3527
def GetNodesSshPorts(nodes, cl):
3528
  """Retrieves SSH ports of given nodes.
3529

3530
  @param nodes: the names of nodes
3531
  @type nodes: a list of strings
3532
  @param cl: a client to use for the query
3533
  @type cl: L{Client}
3534
  @return: the list of SSH ports corresponding to the nodes
3535
  @rtype: a list of tuples
3536
  """
3537
  return map(lambda t: t[0],
3538
             cl.QueryNodes(names=nodes,
3539
                           fields=["ndp/ssh_port"],
3540
                           use_locking=False))
3541

    
3542

    
3543
def _ToStream(stream, txt, *args):
3544
  """Write a message to a stream, bypassing the logging system
3545

3546
  @type stream: file object
3547
  @param stream: the file to which we should write
3548
  @type txt: str
3549
  @param txt: the message
3550

3551
  """
3552
  try:
3553
    if args:
3554
      args = tuple(args)
3555
      stream.write(txt % args)
3556
    else:
3557
      stream.write(txt)
3558
    stream.write("\n")
3559
    stream.flush()
3560
  except IOError, err:
3561
    if err.errno == errno.EPIPE:
3562
      # our terminal went away, we'll exit
3563
      sys.exit(constants.EXIT_FAILURE)
3564
    else:
3565
      raise
3566

    
3567

    
3568
def ToStdout(txt, *args):
3569
  """Write a message to stdout only, bypassing the logging system
3570

3571
  This is just a wrapper over _ToStream.
3572

3573
  @type txt: str
3574
  @param txt: the message
3575

3576
  """
3577
  _ToStream(sys.stdout, txt, *args)
3578

    
3579

    
3580
def ToStderr(txt, *args):
3581
  """Write a message to stderr only, bypassing the logging system
3582

3583
  This is just a wrapper over _ToStream.
3584

3585
  @type txt: str
3586
  @param txt: the message
3587

3588
  """
3589
  _ToStream(sys.stderr, txt, *args)
3590

    
3591

    
3592
class JobExecutor(object):
3593
  """Class which manages the submission and execution of multiple jobs.
3594

3595
  Note that instances of this class should not be reused between
3596
  GetResults() calls.
3597

3598
  """
3599
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3600
    self.queue = []
3601
    if cl is None:
3602
      cl = GetClient()
3603
    self.cl = cl
3604
    self.verbose = verbose
3605
    self.jobs = []
3606
    self.opts = opts
3607
    self.feedback_fn = feedback_fn
3608
    self._counter = itertools.count()
3609

    
3610
  @staticmethod
3611
  def _IfName(name, fmt):
3612
    """Helper function for formatting name.
3613

3614
    """
3615
    if name:
3616
      return fmt % name
3617

    
3618
    return ""
3619

    
3620
  def QueueJob(self, name, *ops):
3621
    """Record a job for later submit.
3622

3623
    @type name: string
3624
    @param name: a description of the job, will be used in WaitJobSet
3625

3626
    """
3627
    SetGenericOpcodeOpts(ops, self.opts)
3628
    self.queue.append((self._counter.next(), name, ops))
3629

    
3630
  def AddJobId(self, name, status, job_id):
3631
    """Adds a job ID to the internal queue.
3632

3633
    """
3634
    self.jobs.append((self._counter.next(), status, job_id, name))
3635

    
3636
  def SubmitPending(self, each=False):
3637
    """Submit all pending jobs.
3638

3639
    """
3640
    if each:
3641
      results = []
3642
      for (_, _, ops) in self.queue:
3643
        # SubmitJob will remove the success status, but raise an exception if
3644
        # the submission fails, so we'll notice that anyway.
3645
        results.append([True, self.cl.SubmitJob(ops)[0]])
3646
    else:
3647
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3648
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3649
      self.jobs.append((idx, status, data, name))
3650

    
3651
  def _ChooseJob(self):
3652
    """Choose a non-waiting/queued job to poll next.
3653

3654
    """
3655
    assert self.jobs, "_ChooseJob called with empty job list"
3656

    
3657
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3658
                               ["status"])
3659
    assert result
3660

    
3661
    for job_data, status in zip(self.jobs, result):
3662
      if (isinstance(status, list) and status and
3663
          status[0] in (constants.JOB_STATUS_QUEUED,
3664
                        constants.JOB_STATUS_WAITING,
3665
                        constants.JOB_STATUS_CANCELING)):
3666
        # job is still present and waiting
3667
        continue
3668
      # good candidate found (either running job or lost job)
3669
      self.jobs.remove(job_data)
3670
      return job_data
3671

    
3672
    # no job found
3673
    return self.jobs.pop(0)
3674

    
3675
  def GetResults(self):
3676
    """Wait for and return the results of all jobs.
3677

3678
    @rtype: list
3679
    @return: list of tuples (success, job results), in the same order
3680
        as the submitted jobs; if a job has failed, instead of the result
3681
        there will be the error message
3682

3683
    """
3684
    if not self.jobs:
3685
      self.SubmitPending()
3686
    results = []
3687
    if self.verbose:
3688
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3689
      if ok_jobs:
3690
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3691

    
3692
    # first, remove any non-submitted jobs
3693
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3694
    for idx, _, jid, name in failures:
3695
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3696
      results.append((idx, False, jid))
3697

    
3698
    while self.jobs:
3699
      (idx, _, jid, name) = self._ChooseJob()
3700
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3701
      try:
3702
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3703
        success = True
3704
      except errors.JobLost, err:
3705
        _, job_result = FormatError(err)
3706
        ToStderr("Job %s%s has been archived, cannot check its result",
3707
                 jid, self._IfName(name, " for %s"))
3708
        success = False
3709
      except (errors.GenericError, luxi.ProtocolError), err:
3710
        _, job_result = FormatError(err)
3711
        success = False
3712
        # the error message will always be shown, verbose or not
3713
        ToStderr("Job %s%s has failed: %s",
3714
                 jid, self._IfName(name, " for %s"), job_result)
3715

    
3716
      results.append((idx, success, job_result))
3717

    
3718
    # sort based on the index, then drop it
3719
    results.sort()
3720
    results = [i[1:] for i in results]
3721

    
3722
    return results
3723

    
3724
  def WaitOrShow(self, wait):
3725
    """Wait for job results or only print the job IDs.
3726

3727
    @type wait: boolean
3728
    @param wait: whether to wait or not
3729

3730
    """
3731
    if wait:
3732
      return self.GetResults()
3733
    else:
3734
      if not self.jobs:
3735
        self.SubmitPending()
3736
      for _, status, result, name in self.jobs:
3737
        if status:
3738
          ToStdout("%s: %s", result, name)
3739
        else:
3740
          ToStderr("Failure for %s: %s", name, result)
3741
      return [row[1:3] for row in self.jobs]
3742

    
3743

    
3744
def FormatParamsDictInfo(param_dict, actual):
3745
  """Formats a parameter dictionary.
3746

3747
  @type param_dict: dict
3748
  @param param_dict: the own parameters
3749
  @type actual: dict
3750
  @param actual: the current parameter set (including defaults)
3751
  @rtype: dict
3752
  @return: dictionary where the value of each parameter is either a fully
3753
      formatted string or a dictionary containing formatted strings
3754

3755
  """
3756
  ret = {}
3757
  for (key, data) in actual.items():
3758
    if isinstance(data, dict) and data:
3759
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3760
    else:
3761
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3762
  return ret
3763

    
3764

    
3765
def _FormatListInfoDefault(data, def_data):
3766
  if data is not None:
3767
    ret = utils.CommaJoin(data)
3768
  else:
3769
    ret = "default (%s)" % utils.CommaJoin(def_data)
3770
  return ret
3771

    
3772

    
3773
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3774
  """Formats an instance policy.
3775

3776
  @type custom_ipolicy: dict
3777
  @param custom_ipolicy: own policy
3778
  @type eff_ipolicy: dict
3779
  @param eff_ipolicy: effective policy (including defaults); ignored for
3780
      cluster
3781
  @type iscluster: bool
3782
  @param iscluster: the policy is at cluster level
3783
  @rtype: list of pairs
3784
  @return: formatted data, suitable for L{PrintGenericInfo}
3785

3786
  """
3787
  if iscluster:
3788
    eff_ipolicy = custom_ipolicy
3789

    
3790
  minmax_out = []
3791
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3792
  if custom_minmax:
3793
    for (k, minmax) in enumerate(custom_minmax):
3794
      minmax_out.append([
3795
        ("%s/%s" % (key, k),
3796
         FormatParamsDictInfo(minmax[key], minmax[key]))
3797
        for key in constants.ISPECS_MINMAX_KEYS
3798
        ])
3799
  else:
3800
    for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3801
      minmax_out.append([
3802
        ("%s/%s" % (key, k),
3803
         FormatParamsDictInfo({}, minmax[key]))
3804
        for key in constants.ISPECS_MINMAX_KEYS
3805
        ])
3806
  ret = [("bounds specs", minmax_out)]
3807

    
3808
  if iscluster:
3809
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3810
    ret.append(
3811
      (constants.ISPECS_STD,
3812
       FormatParamsDictInfo(stdspecs, stdspecs))
3813
      )
3814

    
3815
  ret.append(
3816
    ("allowed disk templates",
3817
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3818
                            eff_ipolicy[constants.IPOLICY_DTS]))
3819
    )
3820
  ret.extend([
3821
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3822
    for key in constants.IPOLICY_PARAMETERS
3823
    ])
3824
  return ret
3825

    
3826

    
3827
def _PrintSpecsParameters(buf, specs):
3828
  values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3829
  buf.write(",".join(values))
3830

    
3831

    
3832
def PrintIPolicyCommand(buf, ipolicy, isgroup):
3833
  """Print the command option used to generate the given instance policy.
3834

3835
  Currently only the parts dealing with specs are supported.
3836

3837
  @type buf: StringIO
3838
  @param buf: stream to write into
3839
  @type ipolicy: dict
3840
  @param ipolicy: instance policy
3841
  @type isgroup: bool
3842
  @param isgroup: whether the policy is at group level
3843

3844
  """
3845
  if not isgroup:
3846
    stdspecs = ipolicy.get("std")
3847
    if stdspecs:
3848
      buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3849
      _PrintSpecsParameters(buf, stdspecs)
3850
  minmaxes = ipolicy.get("minmax", [])
3851
  first = True
3852
  for minmax in minmaxes:
3853
    minspecs = minmax.get("min")
3854
    maxspecs = minmax.get("max")
3855
    if minspecs and maxspecs:
3856
      if first:
3857
        buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3858
        first = False
3859
      else:
3860
        buf.write("//")
3861
      buf.write("min:")
3862
      _PrintSpecsParameters(buf, minspecs)
3863
      buf.write("/max:")
3864
      _PrintSpecsParameters(buf, maxspecs)
3865

    
3866

    
3867
def ConfirmOperation(names, list_type, text, extra=""):
3868
  """Ask the user to confirm an operation on a list of list_type.
3869

3870
  This function is used to request confirmation for doing an operation
3871
  on a given list of list_type.
3872

3873
  @type names: list
3874
  @param names: the list of names that we display when
3875
      we ask for confirmation
3876
  @type list_type: str
3877
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3878
  @type text: str
3879
  @param text: the operation that the user should confirm
3880
  @rtype: boolean
3881
  @return: True or False depending on user's confirmation.
3882

3883
  """
3884
  count = len(names)
3885
  msg = ("The %s will operate on %d %s.\n%s"
3886
         "Do you want to continue?" % (text, count, list_type, extra))
3887
  affected = (("\nAffected %s:\n" % list_type) +
3888
              "\n".join(["  %s" % name for name in names]))
3889

    
3890
  choices = [("y", True, "Yes, execute the %s" % text),
3891
             ("n", False, "No, abort the %s" % text)]
3892

    
3893
  if count > 20:
3894
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3895
    question = msg
3896
  else:
3897
    question = msg + affected
3898

    
3899
  choice = AskUser(question, choices)
3900
  if choice == "v":
3901
    choices.pop(1)
3902
    choice = AskUser(msg + affected, choices)
3903
  return choice
3904

    
3905

    
3906
def _MaybeParseUnit(elements):
3907
  """Parses and returns an array of potential values with units.
3908

3909
  """
3910
  parsed = {}
3911
  for k, v in elements.items():
3912
    if v == constants.VALUE_DEFAULT:
3913
      parsed[k] = v
3914
    else:
3915
      parsed[k] = utils.ParseUnit(v)
3916
  return parsed
3917

    
3918

    
3919
def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3920
                             ispecs_disk_count, ispecs_disk_size,
3921
                             ispecs_nic_count, group_ipolicy, fill_all):
3922
  try:
3923
    if ispecs_mem_size:
3924
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3925
    if ispecs_disk_size:
3926
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3927
  except (TypeError, ValueError, errors.UnitParseError), err:
3928
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3929
                               " in policy: %s" %
3930
                               (ispecs_disk_size, ispecs_mem_size, err),
3931
                               errors.ECODE_INVAL)
3932

    
3933
  # prepare ipolicy dict
3934
  ispecs_transposed = {
3935
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3936
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3937
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3938
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3939
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3940
    }
3941

    
3942
  # first, check that the values given are correct
3943
  if group_ipolicy:
3944
    forced_type = TISPECS_GROUP_TYPES
3945
  else:
3946
    forced_type = TISPECS_CLUSTER_TYPES
3947
  for specs in ispecs_transposed.values():
3948
    assert type(specs) is dict
3949
    utils.ForceDictType(specs, forced_type)
3950

    
3951
  # then transpose
3952
  ispecs = {
3953
    constants.ISPECS_MIN: {},
3954
    constants.ISPECS_MAX: {},
3955
    constants.ISPECS_STD: {},
3956
    }
3957
  for (name, specs) in ispecs_transposed.iteritems():
3958
    assert name in constants.ISPECS_PARAMETERS
3959
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3960
      assert key in ispecs
3961
      ispecs[key][name] = val
3962
  minmax_out = {}
3963
  for key in constants.ISPECS_MINMAX_KEYS:
3964
    if fill_all:
3965
      minmax_out[key] = \
3966
        objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3967
    else:
3968
      minmax_out[key] = ispecs[key]
3969
  ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3970
  if fill_all:
3971
    ipolicy[constants.ISPECS_STD] = \
3972
        objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3973
                         ispecs[constants.ISPECS_STD])
3974
  else:
3975
    ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3976

    
3977

    
3978
def _ParseSpecUnit(spec, keyname):
3979
  ret = spec.copy()
3980
  for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3981
    if k in ret:
3982
      try:
3983
        ret[k] = utils.ParseUnit(ret[k])
3984
      except (TypeError, ValueError, errors.UnitParseError), err:
3985
        raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3986
                                    " specs: %s" % (k, ret[k], keyname, err)),
3987
                                   errors.ECODE_INVAL)
3988
  return ret
3989

    
3990

    
3991
def _ParseISpec(spec, keyname, required):
3992
  ret = _ParseSpecUnit(spec, keyname)
3993
  utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3994
  missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3995
  if required and missing:
3996
    raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3997
                               (keyname, utils.CommaJoin(missing)),
3998
                               errors.ECODE_INVAL)
3999
  return ret
4000

    
4001

    
4002
def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
4003
  ret = None
4004
  if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
4005
      len(minmax_ispecs[0]) == 1):
4006
    for (key, spec) in minmax_ispecs[0].items():
4007
      # This loop is executed exactly once
4008
      if key in allowed_values and not spec:
4009
        ret = key
4010
  return ret
4011

    
4012

    
4013
def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4014
                            group_ipolicy, allowed_values):
4015
  found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
4016
  if found_allowed is not None:
4017
    ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
4018
  elif minmax_ispecs is not None:
4019
    minmax_out = []
4020
    for mmpair in minmax_ispecs:
4021
      mmpair_out = {}
4022
      for (key, spec) in mmpair.items():
4023
        if key not in constants.ISPECS_MINMAX_KEYS:
4024
          msg = "Invalid key in bounds instance specifications: %s" % key
4025
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
4026
        mmpair_out[key] = _ParseISpec(spec, key, True)
4027
      minmax_out.append(mmpair_out)
4028
    ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
4029
  if std_ispecs is not None:
4030
    assert not group_ipolicy # This is not an option for gnt-group
4031
    ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
4032

    
4033

    
4034
def CreateIPolicyFromOpts(ispecs_mem_size=None,
4035
                          ispecs_cpu_count=None,
4036
                          ispecs_disk_count=None,
4037
                          ispecs_disk_size=None,
4038
                          ispecs_nic_count=None,
4039
                          minmax_ispecs=None,
4040
                          std_ispecs=None,
4041
                          ipolicy_disk_templates=None,
4042
                          ipolicy_vcpu_ratio=None,
4043
                          ipolicy_spindle_ratio=None,
4044
                          group_ipolicy=False,
4045
                          allowed_values=None,
4046
                          fill_all=False):
4047
  """Creation of instance policy based on command line options.
4048

4049
  @param fill_all: whether for cluster policies we should ensure that
4050
    all values are filled
4051

4052
  """
4053
  assert not (fill_all and allowed_values)
4054

    
4055
  split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4056
                 ispecs_disk_size or ispecs_nic_count)
4057
  if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4058
    raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4059
                               " together with any --ipolicy-xxx-specs option",
4060
                               errors.ECODE_INVAL)
4061

    
4062
  ipolicy_out = objects.MakeEmptyIPolicy()
4063
  if split_specs:
4064
    assert fill_all
4065
    _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4066
                             ispecs_disk_count, ispecs_disk_size,
4067
                             ispecs_nic_count, group_ipolicy, fill_all)
4068
  elif (minmax_ispecs is not None or std_ispecs is not None):
4069
    _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4070
                            group_ipolicy, allowed_values)
4071

    
4072
  if ipolicy_disk_templates is not None:
4073
    if allowed_values and ipolicy_disk_templates in allowed_values:
4074
      ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4075
    else:
4076
      ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4077
  if ipolicy_vcpu_ratio is not None:
4078
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4079
  if ipolicy_spindle_ratio is not None:
4080
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4081

    
4082
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4083

    
4084
  if not group_ipolicy and fill_all:
4085
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4086

    
4087
  return ipolicy_out
4088

    
4089

    
4090
def _SerializeGenericInfo(buf, data, level, afterkey=False):
4091
  """Formatting core of L{PrintGenericInfo}.
4092

4093
  @param buf: (string) stream to accumulate the result into
4094
  @param data: data to format
4095
  @type level: int
4096
  @param level: depth in the data hierarchy, used for indenting
4097
  @type afterkey: bool
4098
  @param afterkey: True when we are in the middle of a line after a key (used
4099
      to properly add newlines or indentation)
4100

4101
  """
4102
  baseind = "  "
4103
  if isinstance(data, dict):
4104
    if not data:
4105
      buf.write("\n")
4106
    else:
4107
      if afterkey:
4108
        buf.write("\n")
4109
        doindent = True
4110
      else:
4111
        doindent = False
4112
      for key in sorted(data):
4113
        if doindent:
4114
          buf.write(baseind * level)
4115
        else:
4116
          doindent = True
4117
        buf.write(key)
4118
        buf.write(": ")
4119
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4120
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4121
    # list of tuples (an ordered dictionary)
4122
    if afterkey:
4123
      buf.write("\n")
4124
      doindent = True
4125
    else:
4126
      doindent = False
4127
    for (key, val) in data:
4128
      if doindent:
4129
        buf.write(baseind * level)
4130
      else:
4131
        doindent = True
4132
      buf.write(key)
4133
      buf.write(": ")
4134
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4135
  elif isinstance(data, list):
4136
    if not data:
4137
      buf.write("\n")
4138
    else:
4139
      if afterkey:
4140
        buf.write("\n")
4141
        doindent = True
4142
      else:
4143
        doindent = False
4144
      for item in data:
4145
        if doindent:
4146
          buf.write(baseind * level)
4147
        else:
4148
          doindent = True
4149
        buf.write("-")
4150
        buf.write(baseind[1:])
4151
        _SerializeGenericInfo(buf, item, level + 1)
4152
  else:
4153
    # This branch should be only taken for strings, but it's practically
4154
    # impossible to guarantee that no other types are produced somewhere
4155
    buf.write(str(data))
4156
    buf.write("\n")
4157

    
4158

    
4159
def PrintGenericInfo(data):
4160
  """Print information formatted according to the hierarchy.
4161

4162
  The output is a valid YAML string.
4163

4164
  @param data: the data to print. It's a hierarchical structure whose elements
4165
      can be:
4166
        - dictionaries, where keys are strings and values are of any of the
4167
          types listed here
4168
        - lists of pairs (key, value), where key is a string and value is of
4169
          any of the types listed here; it's a way to encode ordered
4170
          dictionaries
4171
        - lists of any of the types listed here
4172
        - strings
4173

4174
  """
4175
  buf = StringIO()
4176
  _SerializeGenericInfo(buf, data, 0)
4177
  ToStdout(buf.getvalue().rstrip("\n"))