Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 178ad717

History | View | Annotate | Download (138.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
import ganeti.rpc.errors as rpcerr
40
import ganeti.rpc.node as rpc
41
from ganeti import ssh
42
from ganeti import compat
43
from ganeti import netutils
44
from ganeti import qlang
45
from ganeti import objects
46
from ganeti import pathutils
47

    
48
from ganeti.runtime import (GetClient)
49

    
50
from optparse import (OptionParser, TitledHelpFormatter,
51
                      Option, OptionValueError)
52

    
53

    
54
__all__ = [
55
  # Command line options
56
  "ABSOLUTE_OPT",
57
  "ADD_UIDS_OPT",
58
  "ADD_RESERVED_IPS_OPT",
59
  "ALLOCATABLE_OPT",
60
  "ALLOC_POLICY_OPT",
61
  "ALL_OPT",
62
  "ALLOW_FAILOVER_OPT",
63
  "AUTO_PROMOTE_OPT",
64
  "AUTO_REPLACE_OPT",
65
  "BACKEND_OPT",
66
  "BLK_OS_OPT",
67
  "CAPAB_MASTER_OPT",
68
  "CAPAB_VM_OPT",
69
  "CLEANUP_OPT",
70
  "CLUSTER_DOMAIN_SECRET_OPT",
71
  "CONFIRM_OPT",
72
  "CP_SIZE_OPT",
73
  "DEBUG_OPT",
74
  "DEBUG_SIMERR_OPT",
75
  "DISKIDX_OPT",
76
  "DISK_OPT",
77
  "DISK_PARAMS_OPT",
78
  "DISK_TEMPLATE_OPT",
79
  "DRAINED_OPT",
80
  "DRY_RUN_OPT",
81
  "DRBD_HELPER_OPT",
82
  "DST_NODE_OPT",
83
  "EARLY_RELEASE_OPT",
84
  "ENABLED_HV_OPT",
85
  "ENABLED_DISK_TEMPLATES_OPT",
86
  "ERROR_CODES_OPT",
87
  "FAILURE_ONLY_OPT",
88
  "FIELDS_OPT",
89
  "FILESTORE_DIR_OPT",
90
  "FILESTORE_DRIVER_OPT",
91
  "FORCE_FILTER_OPT",
92
  "FORCE_OPT",
93
  "FORCE_VARIANT_OPT",
94
  "GATEWAY_OPT",
95
  "GATEWAY6_OPT",
96
  "GLOBAL_FILEDIR_OPT",
97
  "HID_OS_OPT",
98
  "GLOBAL_GLUSTER_FILEDIR_OPT",
99
  "GLOBAL_SHARED_FILEDIR_OPT",
100
  "HOTPLUG_OPT",
101
  "HOTPLUG_IF_POSSIBLE_OPT",
102
  "HVLIST_OPT",
103
  "HVOPTS_OPT",
104
  "HYPERVISOR_OPT",
105
  "IALLOCATOR_OPT",
106
  "DEFAULT_IALLOCATOR_OPT",
107
  "DEFAULT_IALLOCATOR_PARAMS_OPT",
108
  "IDENTIFY_DEFAULTS_OPT",
109
  "IGNORE_CONSIST_OPT",
110
  "IGNORE_ERRORS_OPT",
111
  "IGNORE_FAILURES_OPT",
112
  "IGNORE_OFFLINE_OPT",
113
  "IGNORE_REMOVE_FAILURES_OPT",
114
  "IGNORE_SECONDARIES_OPT",
115
  "IGNORE_SIZE_OPT",
116
  "INCLUDEDEFAULTS_OPT",
117
  "INTERVAL_OPT",
118
  "MAC_PREFIX_OPT",
119
  "MAINTAIN_NODE_HEALTH_OPT",
120
  "MASTER_NETDEV_OPT",
121
  "MASTER_NETMASK_OPT",
122
  "MC_OPT",
123
  "MIGRATION_MODE_OPT",
124
  "MODIFY_ETCHOSTS_OPT",
125
  "NET_OPT",
126
  "NETWORK_OPT",
127
  "NETWORK6_OPT",
128
  "NEW_CLUSTER_CERT_OPT",
129
  "NEW_NODE_CERT_OPT",
130
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
131
  "NEW_CONFD_HMAC_KEY_OPT",
132
  "NEW_RAPI_CERT_OPT",
133
  "NEW_PRIMARY_OPT",
134
  "NEW_SECONDARY_OPT",
135
  "NEW_SPICE_CERT_OPT",
136
  "NIC_PARAMS_OPT",
137
  "NOCONFLICTSCHECK_OPT",
138
  "NODE_FORCE_JOIN_OPT",
139
  "NODE_LIST_OPT",
140
  "NODE_PLACEMENT_OPT",
141
  "NODEGROUP_OPT",
142
  "NODE_PARAMS_OPT",
143
  "NODE_POWERED_OPT",
144
  "NOHDR_OPT",
145
  "NOIPCHECK_OPT",
146
  "NO_INSTALL_OPT",
147
  "NONAMECHECK_OPT",
148
  "NOMODIFY_ETCHOSTS_OPT",
149
  "NOMODIFY_SSH_SETUP_OPT",
150
  "NONICS_OPT",
151
  "NONLIVE_OPT",
152
  "NONPLUS1_OPT",
153
  "NORUNTIME_CHGS_OPT",
154
  "NOSHUTDOWN_OPT",
155
  "NOSTART_OPT",
156
  "NOSSH_KEYCHECK_OPT",
157
  "NOVOTING_OPT",
158
  "NO_REMEMBER_OPT",
159
  "NWSYNC_OPT",
160
  "OFFLINE_INST_OPT",
161
  "ONLINE_INST_OPT",
162
  "ON_PRIMARY_OPT",
163
  "ON_SECONDARY_OPT",
164
  "OFFLINE_OPT",
165
  "OSPARAMS_OPT",
166
  "OS_OPT",
167
  "OS_SIZE_OPT",
168
  "OOB_TIMEOUT_OPT",
169
  "POWER_DELAY_OPT",
170
  "PREALLOC_WIPE_DISKS_OPT",
171
  "PRIMARY_IP_VERSION_OPT",
172
  "PRIMARY_ONLY_OPT",
173
  "PRINT_JOBID_OPT",
174
  "PRIORITY_OPT",
175
  "RAPI_CERT_OPT",
176
  "READD_OPT",
177
  "REASON_OPT",
178
  "REBOOT_TYPE_OPT",
179
  "REMOVE_INSTANCE_OPT",
180
  "REMOVE_RESERVED_IPS_OPT",
181
  "REMOVE_UIDS_OPT",
182
  "RESERVED_LVS_OPT",
183
  "RQL_OPT",
184
  "RUNTIME_MEM_OPT",
185
  "ROMAN_OPT",
186
  "SECONDARY_IP_OPT",
187
  "SECONDARY_ONLY_OPT",
188
  "SELECT_OS_OPT",
189
  "SEP_OPT",
190
  "SHOWCMD_OPT",
191
  "SHOW_MACHINE_OPT",
192
  "COMPRESS_OPT",
193
  "SHUTDOWN_TIMEOUT_OPT",
194
  "SINGLE_NODE_OPT",
195
  "SPECS_CPU_COUNT_OPT",
196
  "SPECS_DISK_COUNT_OPT",
197
  "SPECS_DISK_SIZE_OPT",
198
  "SPECS_MEM_SIZE_OPT",
199
  "SPECS_NIC_COUNT_OPT",
200
  "SPLIT_ISPECS_OPTS",
201
  "IPOLICY_STD_SPECS_OPT",
202
  "IPOLICY_DISK_TEMPLATES",
203
  "IPOLICY_VCPU_RATIO",
204
  "SPICE_CACERT_OPT",
205
  "SPICE_CERT_OPT",
206
  "SRC_DIR_OPT",
207
  "SRC_NODE_OPT",
208
  "SUBMIT_OPT",
209
  "SUBMIT_OPTS",
210
  "STARTUP_PAUSED_OPT",
211
  "STATIC_OPT",
212
  "SYNC_OPT",
213
  "TAG_ADD_OPT",
214
  "TAG_SRC_OPT",
215
  "TIMEOUT_OPT",
216
  "TO_GROUP_OPT",
217
  "UIDPOOL_OPT",
218
  "USEUNITS_OPT",
219
  "USE_EXTERNAL_MIP_SCRIPT",
220
  "USE_REPL_NET_OPT",
221
  "VERBOSE_OPT",
222
  "VG_NAME_OPT",
223
  "WFSYNC_OPT",
224
  "YES_DOIT_OPT",
225
  "DISK_STATE_OPT",
226
  "HV_STATE_OPT",
227
  "IGNORE_IPOLICY_OPT",
228
  "INSTANCE_POLICY_OPTS",
229
  # Generic functions for CLI programs
230
  "ConfirmOperation",
231
  "CreateIPolicyFromOpts",
232
  "GenericMain",
233
  "GenericInstanceCreate",
234
  "GenericList",
235
  "GenericListFields",
236
  "GetClient",
237
  "GetOnlineNodes",
238
  "GetNodesSshPorts",
239
  "JobExecutor",
240
  "JobSubmittedException",
241
  "ParseTimespec",
242
  "RunWhileClusterStopped",
243
  "SubmitOpCode",
244
  "SubmitOpCodeToDrainedQueue",
245
  "SubmitOrSend",
246
  "UsesRPC",
247
  # Formatting functions
248
  "ToStderr", "ToStdout",
249
  "FormatError",
250
  "FormatQueryResult",
251
  "FormatParamsDictInfo",
252
  "FormatPolicyInfo",
253
  "PrintIPolicyCommand",
254
  "PrintGenericInfo",
255
  "GenerateTable",
256
  "AskUser",
257
  "FormatTimestamp",
258
  "FormatLogMessage",
259
  # Tags functions
260
  "ListTags",
261
  "AddTags",
262
  "RemoveTags",
263
  # command line options support infrastructure
264
  "ARGS_MANY_INSTANCES",
265
  "ARGS_MANY_NODES",
266
  "ARGS_MANY_GROUPS",
267
  "ARGS_MANY_NETWORKS",
268
  "ARGS_NONE",
269
  "ARGS_ONE_INSTANCE",
270
  "ARGS_ONE_NODE",
271
  "ARGS_ONE_GROUP",
272
  "ARGS_ONE_OS",
273
  "ARGS_ONE_NETWORK",
274
  "ArgChoice",
275
  "ArgCommand",
276
  "ArgFile",
277
  "ArgGroup",
278
  "ArgHost",
279
  "ArgInstance",
280
  "ArgJobId",
281
  "ArgNetwork",
282
  "ArgNode",
283
  "ArgOs",
284
  "ArgExtStorage",
285
  "ArgSuggest",
286
  "ArgUnknown",
287
  "OPT_COMPL_INST_ADD_NODES",
288
  "OPT_COMPL_MANY_NODES",
289
  "OPT_COMPL_ONE_IALLOCATOR",
290
  "OPT_COMPL_ONE_INSTANCE",
291
  "OPT_COMPL_ONE_NODE",
292
  "OPT_COMPL_ONE_NODEGROUP",
293
  "OPT_COMPL_ONE_NETWORK",
294
  "OPT_COMPL_ONE_OS",
295
  "OPT_COMPL_ONE_EXTSTORAGE",
296
  "cli_option",
297
  "SplitNodeOption",
298
  "CalculateOSNames",
299
  "ParseFields",
300
  "COMMON_CREATE_OPTS",
301
  ]
302

    
303
NO_PREFIX = "no_"
304
UN_PREFIX = "-"
305

    
306
#: Priorities (sorted)
307
_PRIORITY_NAMES = [
308
  ("low", constants.OP_PRIO_LOW),
309
  ("normal", constants.OP_PRIO_NORMAL),
310
  ("high", constants.OP_PRIO_HIGH),
311
  ]
312

    
313
#: Priority dictionary for easier lookup
314
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
315
# we migrate to Python 2.6
316
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
317

    
318
# Query result status for clients
319
(QR_NORMAL,
320
 QR_UNKNOWN,
321
 QR_INCOMPLETE) = range(3)
322

    
323
#: Maximum batch size for ChooseJob
324
_CHOOSE_BATCH = 25
325

    
326

    
327
# constants used to create InstancePolicy dictionary
328
TISPECS_GROUP_TYPES = {
329
  constants.ISPECS_MIN: constants.VTYPE_INT,
330
  constants.ISPECS_MAX: constants.VTYPE_INT,
331
  }
332

    
333
TISPECS_CLUSTER_TYPES = {
334
  constants.ISPECS_MIN: constants.VTYPE_INT,
335
  constants.ISPECS_MAX: constants.VTYPE_INT,
336
  constants.ISPECS_STD: constants.VTYPE_INT,
337
  }
338

    
339
#: User-friendly names for query2 field types
340
_QFT_NAMES = {
341
  constants.QFT_UNKNOWN: "Unknown",
342
  constants.QFT_TEXT: "Text",
343
  constants.QFT_BOOL: "Boolean",
344
  constants.QFT_NUMBER: "Number",
345
  constants.QFT_UNIT: "Storage size",
346
  constants.QFT_TIMESTAMP: "Timestamp",
347
  constants.QFT_OTHER: "Custom",
348
  }
349

    
350

    
351
class _Argument:
352
  def __init__(self, min=0, max=None): # pylint: disable=W0622
353
    self.min = min
354
    self.max = max
355

    
356
  def __repr__(self):
357
    return ("<%s min=%s max=%s>" %
358
            (self.__class__.__name__, self.min, self.max))
359

    
360

    
361
class ArgSuggest(_Argument):
362
  """Suggesting argument.
363

364
  Value can be any of the ones passed to the constructor.
365

366
  """
367
  # pylint: disable=W0622
368
  def __init__(self, min=0, max=None, choices=None):
369
    _Argument.__init__(self, min=min, max=max)
370
    self.choices = choices
371

    
372
  def __repr__(self):
373
    return ("<%s min=%s max=%s choices=%r>" %
374
            (self.__class__.__name__, self.min, self.max, self.choices))
375

    
376

    
377
class ArgChoice(ArgSuggest):
378
  """Choice argument.
379

380
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
381
  but value must be one of the choices.
382

383
  """
384

    
385

    
386
class ArgUnknown(_Argument):
387
  """Unknown argument to program (e.g. determined at runtime).
388

389
  """
390

    
391

    
392
class ArgInstance(_Argument):
393
  """Instances argument.
394

395
  """
396

    
397

    
398
class ArgNode(_Argument):
399
  """Node argument.
400

401
  """
402

    
403

    
404
class ArgNetwork(_Argument):
405
  """Network argument.
406

407
  """
408

    
409

    
410
class ArgGroup(_Argument):
411
  """Node group argument.
412

413
  """
414

    
415

    
416
class ArgJobId(_Argument):
417
  """Job ID argument.
418

419
  """
420

    
421

    
422
class ArgFile(_Argument):
423
  """File path argument.
424

425
  """
426

    
427

    
428
class ArgCommand(_Argument):
429
  """Command argument.
430

431
  """
432

    
433

    
434
class ArgHost(_Argument):
435
  """Host argument.
436

437
  """
438

    
439

    
440
class ArgOs(_Argument):
441
  """OS argument.
442

443
  """
444

    
445

    
446
class ArgExtStorage(_Argument):
447
  """ExtStorage argument.
448

449
  """
450

    
451

    
452
ARGS_NONE = []
453
ARGS_MANY_INSTANCES = [ArgInstance()]
454
ARGS_MANY_NETWORKS = [ArgNetwork()]
455
ARGS_MANY_NODES = [ArgNode()]
456
ARGS_MANY_GROUPS = [ArgGroup()]
457
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
458
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
459
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
460
# TODO
461
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
462
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
463

    
464

    
465
def _ExtractTagsObject(opts, args):
466
  """Extract the tag type object.
467

468
  Note that this function will modify its args parameter.
469

470
  """
471
  if not hasattr(opts, "tag_type"):
472
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
473
  kind = opts.tag_type
474
  if kind == constants.TAG_CLUSTER:
475
    retval = kind, ""
476
  elif kind in (constants.TAG_NODEGROUP,
477
                constants.TAG_NODE,
478
                constants.TAG_NETWORK,
479
                constants.TAG_INSTANCE):
480
    if not args:
481
      raise errors.OpPrereqError("no arguments passed to the command",
482
                                 errors.ECODE_INVAL)
483
    name = args.pop(0)
484
    retval = kind, name
485
  else:
486
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
487
  return retval
488

    
489

    
490
def _ExtendTags(opts, args):
491
  """Extend the args if a source file has been given.
492

493
  This function will extend the tags with the contents of the file
494
  passed in the 'tags_source' attribute of the opts parameter. A file
495
  named '-' will be replaced by stdin.
496

497
  """
498
  fname = opts.tags_source
499
  if fname is None:
500
    return
501
  if fname == "-":
502
    new_fh = sys.stdin
503
  else:
504
    new_fh = open(fname, "r")
505
  new_data = []
506
  try:
507
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
508
    # because of python bug 1633941
509
    while True:
510
      line = new_fh.readline()
511
      if not line:
512
        break
513
      new_data.append(line.strip())
514
  finally:
515
    new_fh.close()
516
  args.extend(new_data)
517

    
518

    
519
def ListTags(opts, args):
520
  """List the tags on a given object.
521

522
  This is a generic implementation that knows how to deal with all
523
  three cases of tag objects (cluster, node, instance). The opts
524
  argument is expected to contain a tag_type field denoting what
525
  object type we work on.
526

527
  """
528
  kind, name = _ExtractTagsObject(opts, args)
529
  cl = GetClient(query=True)
530
  result = cl.QueryTags(kind, name)
531
  result = list(result)
532
  result.sort()
533
  for tag in result:
534
    ToStdout(tag)
535

    
536

    
537
def AddTags(opts, args):
538
  """Add tags on a given object.
539

540
  This is a generic implementation that knows how to deal with all
541
  three cases of tag objects (cluster, node, instance). The opts
542
  argument is expected to contain a tag_type field denoting what
543
  object type we work on.
544

545
  """
546
  kind, name = _ExtractTagsObject(opts, args)
547
  _ExtendTags(opts, args)
548
  if not args:
549
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
550
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
551
  SubmitOrSend(op, opts)
552

    
553

    
554
def RemoveTags(opts, args):
555
  """Remove tags from a given object.
556

557
  This is a generic implementation that knows how to deal with all
558
  three cases of tag objects (cluster, node, instance). The opts
559
  argument is expected to contain a tag_type field denoting what
560
  object type we work on.
561

562
  """
563
  kind, name = _ExtractTagsObject(opts, args)
564
  _ExtendTags(opts, args)
565
  if not args:
566
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
567
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
568
  SubmitOrSend(op, opts)
569

    
570

    
571
def check_unit(option, opt, value): # pylint: disable=W0613
572
  """OptParsers custom converter for units.
573

574
  """
575
  try:
576
    return utils.ParseUnit(value)
577
  except errors.UnitParseError, err:
578
    raise OptionValueError("option %s: %s" % (opt, err))
579

    
580

    
581
def _SplitKeyVal(opt, data, parse_prefixes):
582
  """Convert a KeyVal string into a dict.
583

584
  This function will convert a key=val[,...] string into a dict. Empty
585
  values will be converted specially: keys which have the prefix 'no_'
586
  will have the value=False and the prefix stripped, keys with the prefix
587
  "-" will have value=None and the prefix stripped, and the others will
588
  have value=True.
589

590
  @type opt: string
591
  @param opt: a string holding the option name for which we process the
592
      data, used in building error messages
593
  @type data: string
594
  @param data: a string of the format key=val,key=val,...
595
  @type parse_prefixes: bool
596
  @param parse_prefixes: whether to handle prefixes specially
597
  @rtype: dict
598
  @return: {key=val, key=val}
599
  @raises errors.ParameterError: if there are duplicate keys
600

601
  """
602
  kv_dict = {}
603
  if data:
604
    for elem in utils.UnescapeAndSplit(data, sep=","):
605
      if "=" in elem:
606
        key, val = elem.split("=", 1)
607
      elif parse_prefixes:
608
        if elem.startswith(NO_PREFIX):
609
          key, val = elem[len(NO_PREFIX):], False
610
        elif elem.startswith(UN_PREFIX):
611
          key, val = elem[len(UN_PREFIX):], None
612
        else:
613
          key, val = elem, True
614
      else:
615
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
616
                                    (elem, opt))
617
      if key in kv_dict:
618
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
619
                                    (key, opt))
620
      kv_dict[key] = val
621
  return kv_dict
622

    
623

    
624
def _SplitIdentKeyVal(opt, value, parse_prefixes):
625
  """Helper function to parse "ident:key=val,key=val" options.
626

627
  @type opt: string
628
  @param opt: option name, used in error messages
629
  @type value: string
630
  @param value: expected to be in the format "ident:key=val,key=val,..."
631
  @type parse_prefixes: bool
632
  @param parse_prefixes: whether to handle prefixes specially (see
633
      L{_SplitKeyVal})
634
  @rtype: tuple
635
  @return: (ident, {key=val, key=val})
636
  @raises errors.ParameterError: in case of duplicates or other parsing errors
637

638
  """
639
  if ":" not in value:
640
    ident, rest = value, ""
641
  else:
642
    ident, rest = value.split(":", 1)
643

    
644
  if parse_prefixes and ident.startswith(NO_PREFIX):
645
    if rest:
646
      msg = "Cannot pass options when removing parameter groups: %s" % value
647
      raise errors.ParameterError(msg)
648
    retval = (ident[len(NO_PREFIX):], False)
649
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
650
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
651
    if rest:
652
      msg = "Cannot pass options when removing parameter groups: %s" % value
653
      raise errors.ParameterError(msg)
654
    retval = (ident[len(UN_PREFIX):], None)
655
  else:
656
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
657
    retval = (ident, kv_dict)
658
  return retval
659

    
660

    
661
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
662
  """Custom parser for ident:key=val,key=val options.
663

664
  This will store the parsed values as a tuple (ident, {key: val}). As such,
665
  multiple uses of this option via action=append is possible.
666

667
  """
668
  return _SplitIdentKeyVal(opt, value, True)
669

    
670

    
671
def check_key_val(option, opt, value):  # pylint: disable=W0613
672
  """Custom parser class for key=val,key=val options.
673

674
  This will store the parsed values as a dict {key: val}.
675

676
  """
677
  return _SplitKeyVal(opt, value, True)
678

    
679

    
680
def _SplitListKeyVal(opt, value):
681
  retval = {}
682
  for elem in value.split("/"):
683
    if not elem:
684
      raise errors.ParameterError("Empty section in option '%s'" % opt)
685
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
686
    if ident in retval:
687
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
688
             (ident, opt, elem))
689
      raise errors.ParameterError(msg)
690
    retval[ident] = valdict
691
  return retval
692

    
693

    
694
def check_multilist_ident_key_val(_, opt, value):
695
  """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
696

697
  @rtype: list of dictionary
698
  @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
699

700
  """
701
  retval = []
702
  for line in value.split("//"):
703
    retval.append(_SplitListKeyVal(opt, line))
704
  return retval
705

    
706

    
707
def check_bool(option, opt, value): # pylint: disable=W0613
708
  """Custom parser for yes/no options.
709

710
  This will store the parsed value as either True or False.
711

712
  """
713
  value = value.lower()
714
  if value == constants.VALUE_FALSE or value == "no":
715
    return False
716
  elif value == constants.VALUE_TRUE or value == "yes":
717
    return True
718
  else:
719
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
720

    
721

    
722
def check_list(option, opt, value): # pylint: disable=W0613
723
  """Custom parser for comma-separated lists.
724

725
  """
726
  # we have to make this explicit check since "".split(",") is [""],
727
  # not an empty list :(
728
  if not value:
729
    return []
730
  else:
731
    return utils.UnescapeAndSplit(value)
732

    
733

    
734
def check_maybefloat(option, opt, value): # pylint: disable=W0613
735
  """Custom parser for float numbers which might be also defaults.
736

737
  """
738
  value = value.lower()
739

    
740
  if value == constants.VALUE_DEFAULT:
741
    return value
742
  else:
743
    return float(value)
744

    
745

    
746
# completion_suggestion is normally a list. Using numeric values not evaluating
747
# to False for dynamic completion.
748
(OPT_COMPL_MANY_NODES,
749
 OPT_COMPL_ONE_NODE,
750
 OPT_COMPL_ONE_INSTANCE,
751
 OPT_COMPL_ONE_OS,
752
 OPT_COMPL_ONE_EXTSTORAGE,
753
 OPT_COMPL_ONE_IALLOCATOR,
754
 OPT_COMPL_ONE_NETWORK,
755
 OPT_COMPL_INST_ADD_NODES,
756
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
757

    
758
OPT_COMPL_ALL = compat.UniqueFrozenset([
759
  OPT_COMPL_MANY_NODES,
760
  OPT_COMPL_ONE_NODE,
761
  OPT_COMPL_ONE_INSTANCE,
762
  OPT_COMPL_ONE_OS,
763
  OPT_COMPL_ONE_EXTSTORAGE,
764
  OPT_COMPL_ONE_IALLOCATOR,
765
  OPT_COMPL_ONE_NETWORK,
766
  OPT_COMPL_INST_ADD_NODES,
767
  OPT_COMPL_ONE_NODEGROUP,
768
  ])
769

    
770

    
771
class CliOption(Option):
772
  """Custom option class for optparse.
773

774
  """
775
  ATTRS = Option.ATTRS + [
776
    "completion_suggest",
777
    ]
778
  TYPES = Option.TYPES + (
779
    "multilistidentkeyval",
780
    "identkeyval",
781
    "keyval",
782
    "unit",
783
    "bool",
784
    "list",
785
    "maybefloat",
786
    )
787
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
788
  TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
789
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
790
  TYPE_CHECKER["keyval"] = check_key_val
791
  TYPE_CHECKER["unit"] = check_unit
792
  TYPE_CHECKER["bool"] = check_bool
793
  TYPE_CHECKER["list"] = check_list
794
  TYPE_CHECKER["maybefloat"] = check_maybefloat
795

    
796

    
797
# optparse.py sets make_option, so we do it for our own option class, too
798
cli_option = CliOption
799

    
800

    
801
_YORNO = "yes|no"
802

    
803
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
804
                       help="Increase debugging level")
805

    
806
NOHDR_OPT = cli_option("--no-headers", default=False,
807
                       action="store_true", dest="no_headers",
808
                       help="Don't display column headers")
809

    
810
SEP_OPT = cli_option("--separator", default=None,
811
                     action="store", dest="separator",
812
                     help=("Separator between output fields"
813
                           " (defaults to one space)"))
814

    
815
USEUNITS_OPT = cli_option("--units", default=None,
816
                          dest="units", choices=("h", "m", "g", "t"),
817
                          help="Specify units for output (one of h/m/g/t)")
818

    
819
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
820
                        type="string", metavar="FIELDS",
821
                        help="Comma separated list of output fields")
822

    
823
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
824
                       default=False, help="Force the operation")
825

    
826
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
827
                         default=False, help="Do not require confirmation")
828

    
829
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
830
                                  action="store_true", default=False,
831
                                  help=("Ignore offline nodes and do as much"
832
                                        " as possible"))
833

    
834
TAG_ADD_OPT = cli_option("--tags", dest="tags",
835
                         default=None, help="Comma-separated list of instance"
836
                                            " tags")
837

    
838
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
839
                         default=None, help="File with tag names")
840

    
841
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
842
                        default=False, action="store_true",
843
                        help=("Submit the job and return the job ID, but"
844
                              " don't wait for the job to finish"))
845

    
846
PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
847
                             default=False, action="store_true",
848
                             help=("Additionally print the job as first line"
849
                                   " on stdout (for scripting)."))
850

    
851
SYNC_OPT = cli_option("--sync", dest="do_locking",
852
                      default=False, action="store_true",
853
                      help=("Grab locks while doing the queries"
854
                            " in order to ensure more consistent results"))
855

    
856
DRY_RUN_OPT = cli_option("--dry-run", default=False,
857
                         action="store_true",
858
                         help=("Do not execute the operation, just run the"
859
                               " check steps and verify if it could be"
860
                               " executed"))
861

    
862
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
863
                         action="store_true",
864
                         help="Increase the verbosity of the operation")
865

    
866
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
867
                              action="store_true", dest="simulate_errors",
868
                              help="Debugging option that makes the operation"
869
                              " treat most runtime checks as failed")
870

    
871
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
872
                        default=True, action="store_false",
873
                        help="Don't wait for sync (DANGEROUS!)")
874

    
875
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
876
                        default=False, action="store_true",
877
                        help="Wait for disks to sync")
878

    
879
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
880
                             action="store_true", default=False,
881
                             help="Enable offline instance")
882

    
883
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
884
                              action="store_true", default=False,
885
                              help="Disable down instance")
886

    
887
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
888
                               help=("Custom disk setup (%s)" %
889
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
890
                               default=None, metavar="TEMPL",
891
                               choices=list(constants.DISK_TEMPLATES))
892

    
893
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
894
                        help="Do not create any network cards for"
895
                        " the instance")
896

    
897
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
898
                               help="Relative path under default cluster-wide"
899
                               " file storage dir to store file-based disks",
900
                               default=None, metavar="<DIR>")
901

    
902
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
903
                                  help="Driver to use for image files",
904
                                  default=None, metavar="<DRIVER>",
905
                                  choices=list(constants.FILE_DRIVER))
906

    
907
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
908
                            help="Select nodes for the instance automatically"
909
                            " using the <NAME> iallocator plugin",
910
                            default=None, type="string",
911
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
912

    
913
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
914
                                    metavar="<NAME>",
915
                                    help="Set the default instance"
916
                                    " allocator plugin",
917
                                    default=None, type="string",
918
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
919

    
920
DEFAULT_IALLOCATOR_PARAMS_OPT = cli_option("--default-iallocator-params",
921
                                           dest="default_iallocator_params",
922
                                           help="iallocator template"
923
                                           " parameters, in the format"
924
                                           " template:option=value,"
925
                                           " option=value,...",
926
                                           type="keyval",
927
                                           default={})
928

    
929
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
930
                    metavar="<os>",
931
                    completion_suggest=OPT_COMPL_ONE_OS)
932

    
933
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
934
                          type="keyval", default={},
935
                          help="OS parameters")
936

    
937
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
938
                               action="store_true", default=False,
939
                               help="Force an unknown variant")
940

    
941
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
942
                            action="store_true", default=False,
943
                            help="Do not install the OS (will"
944
                            " enable no-start)")
945

    
946
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
947
                                dest="allow_runtime_chgs",
948
                                default=True, action="store_false",
949
                                help="Don't allow runtime changes")
950

    
951
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
952
                         type="keyval", default={},
953
                         help="Backend parameters")
954

    
955
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
956
                        default={}, dest="hvparams",
957
                        help="Hypervisor parameters")
958

    
959
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
960
                             help="Disk template parameters, in the format"
961
                             " template:option=value,option=value,...",
962
                             type="identkeyval", action="append", default=[])
963

    
964
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
965
                                 type="keyval", default={},
966
                                 help="Memory size specs: list of key=value,"
967
                                " where key is one of min, max, std"
968
                                 " (in MB or using a unit)")
969

    
970
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
971
                                 type="keyval", default={},
972
                                 help="CPU count specs: list of key=value,"
973
                                 " where key is one of min, max, std")
974

    
975
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
976
                                  dest="ispecs_disk_count",
977
                                  type="keyval", default={},
978
                                  help="Disk count specs: list of key=value,"
979
                                  " where key is one of min, max, std")
980

    
981
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
982
                                 type="keyval", default={},
983
                                 help="Disk size specs: list of key=value,"
984
                                 " where key is one of min, max, std"
985
                                 " (in MB or using a unit)")
986

    
987
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
988
                                 type="keyval", default={},
989
                                 help="NIC count specs: list of key=value,"
990
                                 " where key is one of min, max, std")
991

    
992
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
993
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
994
                                      dest="ipolicy_bounds_specs",
995
                                      type="multilistidentkeyval", default=None,
996
                                      help="Complete instance specs limits")
997

    
998
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
999
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
1000
                                   dest="ipolicy_std_specs",
1001
                                   type="keyval", default=None,
1002
                                   help="Complte standard instance specs")
1003

    
1004
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
1005
                                    dest="ipolicy_disk_templates",
1006
                                    type="list", default=None,
1007
                                    help="Comma-separated list of"
1008
                                    " enabled disk templates")
1009

    
1010
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
1011
                                 dest="ipolicy_vcpu_ratio",
1012
                                 type="maybefloat", default=None,
1013
                                 help="The maximum allowed vcpu-to-cpu ratio")
1014

    
1015
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
1016
                                   dest="ipolicy_spindle_ratio",
1017
                                   type="maybefloat", default=None,
1018
                                   help=("The maximum allowed instances to"
1019
                                         " spindle ratio"))
1020

    
1021
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1022
                            help="Hypervisor and hypervisor options, in the"
1023
                            " format hypervisor:option=value,option=value,...",
1024
                            default=None, type="identkeyval")
1025

    
1026
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1027
                        help="Hypervisor and hypervisor options, in the"
1028
                        " format hypervisor:option=value,option=value,...",
1029
                        default=[], action="append", type="identkeyval")
1030

    
1031
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1032
                           action="store_false",
1033
                           help="Don't check that the instance's IP"
1034
                           " is alive")
1035

    
1036
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1037
                             default=True, action="store_false",
1038
                             help="Don't check that the instance's name"
1039
                             " is resolvable")
1040

    
1041
NET_OPT = cli_option("--net",
1042
                     help="NIC parameters", default=[],
1043
                     dest="nics", action="append", type="identkeyval")
1044

    
1045
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1046
                      dest="disks", action="append", type="identkeyval")
1047

    
1048
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1049
                         help="Comma-separated list of disks"
1050
                         " indices to act on (e.g. 0,2) (optional,"
1051
                         " defaults to all disks)")
1052

    
1053
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1054
                         help="Enforces a single-disk configuration using the"
1055
                         " given disk size, in MiB unless a suffix is used",
1056
                         default=None, type="unit", metavar="<size>")
1057

    
1058
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1059
                                dest="ignore_consistency",
1060
                                action="store_true", default=False,
1061
                                help="Ignore the consistency of the disks on"
1062
                                " the secondary")
1063

    
1064
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1065
                                dest="allow_failover",
1066
                                action="store_true", default=False,
1067
                                help="If migration is not possible fallback to"
1068
                                     " failover")
1069

    
1070
NONLIVE_OPT = cli_option("--non-live", dest="live",
1071
                         default=True, action="store_false",
1072
                         help="Do a non-live migration (this usually means"
1073
                         " freeze the instance, save the state, transfer and"
1074
                         " only then resume running on the secondary node)")
1075

    
1076
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1077
                                default=None,
1078
                                choices=list(constants.HT_MIGRATION_MODES),
1079
                                help="Override default migration mode (choose"
1080
                                " either live or non-live")
1081

    
1082
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1083
                                help="Target node and optional secondary node",
1084
                                metavar="<pnode>[:<snode>]",
1085
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1086

    
1087
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1088
                           action="append", metavar="<node>",
1089
                           help="Use only this node (can be used multiple"
1090
                           " times, if not given defaults to all nodes)",
1091
                           completion_suggest=OPT_COMPL_ONE_NODE)
1092

    
1093
NODEGROUP_OPT_NAME = "--node-group"
1094
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1095
                           dest="nodegroup",
1096
                           help="Node group (name or uuid)",
1097
                           metavar="<nodegroup>",
1098
                           default=None, type="string",
1099
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1100

    
1101
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1102
                             metavar="<node>",
1103
                             completion_suggest=OPT_COMPL_ONE_NODE)
1104

    
1105
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1106
                         action="store_false",
1107
                         help="Don't start the instance after creation")
1108

    
1109
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1110
                         action="store_true", default=False,
1111
                         help="Show command instead of executing it")
1112

    
1113
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1114
                         default=False, action="store_true",
1115
                         help="Instead of performing the migration/failover,"
1116
                         " try to recover from a failed cleanup. This is safe"
1117
                         " to run even if the instance is healthy, but it"
1118
                         " will create extra replication traffic and "
1119
                         " disrupt briefly the replication (like during the"
1120
                         " migration/failover")
1121

    
1122
STATIC_OPT = cli_option("-s", "--static", dest="static",
1123
                        action="store_true", default=False,
1124
                        help="Only show configuration data, not runtime data")
1125

    
1126
ALL_OPT = cli_option("--all", dest="show_all",
1127
                     default=False, action="store_true",
1128
                     help="Show info on all instances on the cluster."
1129
                     " This can take a long time to run, use wisely")
1130

    
1131
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1132
                           action="store_true", default=False,
1133
                           help="Interactive OS reinstall, lists available"
1134
                           " OS templates for selection")
1135

    
1136
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1137
                                 action="store_true", default=False,
1138
                                 help="Remove the instance from the cluster"
1139
                                 " configuration even if there are failures"
1140
                                 " during the removal process")
1141

    
1142
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1143
                                        dest="ignore_remove_failures",
1144
                                        action="store_true", default=False,
1145
                                        help="Remove the instance from the"
1146
                                        " cluster configuration even if there"
1147
                                        " are failures during the removal"
1148
                                        " process")
1149

    
1150
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1151
                                 action="store_true", default=False,
1152
                                 help="Remove the instance from the cluster")
1153

    
1154
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1155
                               help="Specifies the new node for the instance",
1156
                               metavar="NODE", default=None,
1157
                               completion_suggest=OPT_COMPL_ONE_NODE)
1158

    
1159
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1160
                               help="Specifies the new secondary node",
1161
                               metavar="NODE", default=None,
1162
                               completion_suggest=OPT_COMPL_ONE_NODE)
1163

    
1164
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1165
                             help="Specifies the new primary node",
1166
                             metavar="<node>", default=None,
1167
                             completion_suggest=OPT_COMPL_ONE_NODE)
1168

    
1169
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1170
                            default=False, action="store_true",
1171
                            help="Replace the disk(s) on the primary"
1172
                                 " node (applies only to internally mirrored"
1173
                                 " disk templates, e.g. %s)" %
1174
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1175

    
1176
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1177
                              default=False, action="store_true",
1178
                              help="Replace the disk(s) on the secondary"
1179
                                   " node (applies only to internally mirrored"
1180
                                   " disk templates, e.g. %s)" %
1181
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1182

    
1183
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1184
                              default=False, action="store_true",
1185
                              help="Lock all nodes and auto-promote as needed"
1186
                              " to MC status")
1187

    
1188
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1189
                              default=False, action="store_true",
1190
                              help="Automatically replace faulty disks"
1191
                                   " (applies only to internally mirrored"
1192
                                   " disk templates, e.g. %s)" %
1193
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1194

    
1195
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1196
                             default=False, action="store_true",
1197
                             help="Ignore current recorded size"
1198
                             " (useful for forcing activation when"
1199
                             " the recorded size is wrong)")
1200

    
1201
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1202
                          metavar="<node>",
1203
                          completion_suggest=OPT_COMPL_ONE_NODE)
1204

    
1205
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1206
                         metavar="<dir>")
1207

    
1208
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1209
                              help="Specify the secondary ip for the node",
1210
                              metavar="ADDRESS", default=None)
1211

    
1212
READD_OPT = cli_option("--readd", dest="readd",
1213
                       default=False, action="store_true",
1214
                       help="Readd old node after replacing it")
1215

    
1216
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1217
                                default=True, action="store_false",
1218
                                help="Disable SSH key fingerprint checking")
1219

    
1220
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1221
                                 default=False, action="store_true",
1222
                                 help="Force the joining of a node")
1223

    
1224
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1225
                    type="bool", default=None, metavar=_YORNO,
1226
                    help="Set the master_candidate flag on the node")
1227

    
1228
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1229
                         type="bool", default=None,
1230
                         help=("Set the offline flag on the node"
1231
                               " (cluster does not communicate with offline"
1232
                               " nodes)"))
1233

    
1234
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1235
                         type="bool", default=None,
1236
                         help=("Set the drained flag on the node"
1237
                               " (excluded from allocation operations)"))
1238

    
1239
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1240
                              type="bool", default=None, metavar=_YORNO,
1241
                              help="Set the master_capable flag on the node")
1242

    
1243
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1244
                          type="bool", default=None, metavar=_YORNO,
1245
                          help="Set the vm_capable flag on the node")
1246

    
1247
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1248
                             type="bool", default=None, metavar=_YORNO,
1249
                             help="Set the allocatable flag on a volume")
1250

    
1251
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1252
                            dest="enabled_hypervisors",
1253
                            help="Comma-separated list of hypervisors",
1254
                            type="string", default=None)
1255

    
1256
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1257
                                        dest="enabled_disk_templates",
1258
                                        help="Comma-separated list of "
1259
                                             "disk templates",
1260
                                        type="string", default=None)
1261

    
1262
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1263
                            type="keyval", default={},
1264
                            help="NIC parameters")
1265

    
1266
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1267
                         dest="candidate_pool_size", type="int",
1268
                         help="Set the candidate pool size")
1269

    
1270
RQL_OPT = cli_option("--max-running-jobs", dest="max_running_jobs",
1271
                     type="int", help="Set the maximal number of jobs to "
1272
                                      "run simultaneously")
1273

    
1274
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1275
                         help=("Enables LVM and specifies the volume group"
1276
                               " name (cluster-wide) for disk allocation"
1277
                               " [%s]" % constants.DEFAULT_VG),
1278
                         metavar="VG", default=None)
1279

    
1280
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1281
                          help="Destroy cluster", action="store_true")
1282

    
1283
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1284
                          help="Skip node agreement check (dangerous)",
1285
                          action="store_true", default=False)
1286

    
1287
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1288
                            help="Specify the mac prefix for the instance IP"
1289
                            " addresses, in the format XX:XX:XX",
1290
                            metavar="PREFIX",
1291
                            default=None)
1292

    
1293
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1294
                               help="Specify the node interface (cluster-wide)"
1295
                               " on which the master IP address will be added"
1296
                               " (cluster init default: %s)" %
1297
                               constants.DEFAULT_BRIDGE,
1298
                               metavar="NETDEV",
1299
                               default=None)
1300

    
1301
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1302
                                help="Specify the netmask of the master IP",
1303
                                metavar="NETMASK",
1304
                                default=None)
1305

    
1306
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1307
                                     dest="use_external_mip_script",
1308
                                     help="Specify whether to run a"
1309
                                     " user-provided script for the master"
1310
                                     " IP address turnup and"
1311
                                     " turndown operations",
1312
                                     type="bool", metavar=_YORNO, default=None)
1313

    
1314
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1315
                                help="Specify the default directory (cluster-"
1316
                                "wide) for storing the file-based disks [%s]" %
1317
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1318
                                metavar="DIR",
1319
                                default=None)
1320

    
1321
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1322
  "--shared-file-storage-dir",
1323
  dest="shared_file_storage_dir",
1324
  help="Specify the default directory (cluster-wide) for storing the"
1325
  " shared file-based disks [%s]" %
1326
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1327
  metavar="SHAREDDIR", default=None)
1328

    
1329
GLOBAL_GLUSTER_FILEDIR_OPT = cli_option(
1330
  "--gluster-storage-dir",
1331
  dest="gluster_storage_dir",
1332
  help="Specify the default directory (cluster-wide) for mounting Gluster"
1333
  " file systems [%s]" %
1334
  pathutils.DEFAULT_GLUSTER_STORAGE_DIR,
1335
  metavar="GLUSTERDIR",
1336
  default=pathutils.DEFAULT_GLUSTER_STORAGE_DIR)
1337

    
1338
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1339
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1340
                                   action="store_false", default=True)
1341

    
1342
MODIFY_ETCHOSTS_OPT = \
1343
 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1344
            default=None, type="bool",
1345
            help="Defines whether the cluster should autonomously modify"
1346
            " and keep in sync the /etc/hosts file of the nodes")
1347

    
1348
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1349
                                    help="Don't initialize SSH keys",
1350
                                    action="store_false", default=True)
1351

    
1352
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1353
                             help="Enable parseable error messages",
1354
                             action="store_true", default=False)
1355

    
1356
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1357
                          help="Skip N+1 memory redundancy tests",
1358
                          action="store_true", default=False)
1359

    
1360
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1361
                             help="Type of reboot: soft/hard/full",
1362
                             default=constants.INSTANCE_REBOOT_HARD,
1363
                             metavar="<REBOOT>",
1364
                             choices=list(constants.REBOOT_TYPES))
1365

    
1366
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1367
                                    dest="ignore_secondaries",
1368
                                    default=False, action="store_true",
1369
                                    help="Ignore errors from secondaries")
1370

    
1371
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1372
                            action="store_false", default=True,
1373
                            help="Don't shutdown the instance (unsafe)")
1374

    
1375
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1376
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1377
                         help="Maximum time to wait")
1378

    
1379
COMPRESS_OPT = cli_option("--compress", dest="compress",
1380
                          default=constants.IEC_NONE,
1381
                          help="The compression mode to use",
1382
                          choices=list(constants.IEC_ALL))
1383

    
1384
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1385
                                  dest="shutdown_timeout", type="int",
1386
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1387
                                  help="Maximum time to wait for instance"
1388
                                  " shutdown")
1389

    
1390
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1391
                          default=None,
1392
                          help=("Number of seconds between repetions of the"
1393
                                " command"))
1394

    
1395
EARLY_RELEASE_OPT = cli_option("--early-release",
1396
                               dest="early_release", default=False,
1397
                               action="store_true",
1398
                               help="Release the locks on the secondary"
1399
                               " node(s) early")
1400

    
1401
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1402
                                  dest="new_cluster_cert",
1403
                                  default=False, action="store_true",
1404
                                  help="Generate a new cluster certificate")
1405

    
1406
NEW_NODE_CERT_OPT = cli_option(
1407
  "--new-node-certificates", dest="new_node_cert", default=False,
1408
  action="store_true", help="Generate new node certificates (for all nodes)")
1409

    
1410
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1411
                           default=None,
1412
                           help="File containing new RAPI certificate")
1413

    
1414
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1415
                               default=None, action="store_true",
1416
                               help=("Generate a new self-signed RAPI"
1417
                                     " certificate"))
1418

    
1419
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1420
                            default=None,
1421
                            help="File containing new SPICE certificate")
1422

    
1423
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1424
                              default=None,
1425
                              help="File containing the certificate of the CA"
1426
                              " which signed the SPICE certificate")
1427

    
1428
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1429
                                dest="new_spice_cert", default=None,
1430
                                action="store_true",
1431
                                help=("Generate a new self-signed SPICE"
1432
                                      " certificate"))
1433

    
1434
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1435
                                    dest="new_confd_hmac_key",
1436
                                    default=False, action="store_true",
1437
                                    help=("Create a new HMAC key for %s" %
1438
                                          constants.CONFD))
1439

    
1440
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1441
                                       dest="cluster_domain_secret",
1442
                                       default=None,
1443
                                       help=("Load new new cluster domain"
1444
                                             " secret from file"))
1445

    
1446
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1447
                                           dest="new_cluster_domain_secret",
1448
                                           default=False, action="store_true",
1449
                                           help=("Create a new cluster domain"
1450
                                                 " secret"))
1451

    
1452
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1453
                              dest="use_replication_network",
1454
                              help="Whether to use the replication network"
1455
                              " for talking to the nodes",
1456
                              action="store_true", default=False)
1457

    
1458
MAINTAIN_NODE_HEALTH_OPT = \
1459
    cli_option("--maintain-node-health", dest="maintain_node_health",
1460
               metavar=_YORNO, default=None, type="bool",
1461
               help="Configure the cluster to automatically maintain node"
1462
               " health, by shutting down unknown instances, shutting down"
1463
               " unknown DRBD devices, etc.")
1464

    
1465
IDENTIFY_DEFAULTS_OPT = \
1466
    cli_option("--identify-defaults", dest="identify_defaults",
1467
               default=False, action="store_true",
1468
               help="Identify which saved instance parameters are equal to"
1469
               " the current cluster defaults and set them as such, instead"
1470
               " of marking them as overridden")
1471

    
1472
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1473
                         action="store", dest="uid_pool",
1474
                         help=("A list of user-ids or user-id"
1475
                               " ranges separated by commas"))
1476

    
1477
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1478
                          action="store", dest="add_uids",
1479
                          help=("A list of user-ids or user-id"
1480
                                " ranges separated by commas, to be"
1481
                                " added to the user-id pool"))
1482

    
1483
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1484
                             action="store", dest="remove_uids",
1485
                             help=("A list of user-ids or user-id"
1486
                                   " ranges separated by commas, to be"
1487
                                   " removed from the user-id pool"))
1488

    
1489
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1490
                              action="store", dest="reserved_lvs",
1491
                              help=("A comma-separated list of reserved"
1492
                                    " logical volumes names, that will be"
1493
                                    " ignored by cluster verify"))
1494

    
1495
ROMAN_OPT = cli_option("--roman",
1496
                       dest="roman_integers", default=False,
1497
                       action="store_true",
1498
                       help="Use roman numbers for positive integers")
1499

    
1500
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1501
                             action="store", default=None,
1502
                             help="Specifies usermode helper for DRBD")
1503

    
1504
PRIMARY_IP_VERSION_OPT = \
1505
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1506
               action="store", dest="primary_ip_version",
1507
               metavar="%d|%d" % (constants.IP4_VERSION,
1508
                                  constants.IP6_VERSION),
1509
               help="Cluster-wide IP version for primary IP")
1510

    
1511
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1512
                              action="store_true",
1513
                              help="Show machine name for every line in output")
1514

    
1515
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1516
                              action="store_true",
1517
                              help=("Hide successful results and show failures"
1518
                                    " only (determined by the exit code)"))
1519

    
1520
REASON_OPT = cli_option("--reason", default=None,
1521
                        help="The reason for executing the command")
1522

    
1523

    
1524
def _PriorityOptionCb(option, _, value, parser):
1525
  """Callback for processing C{--priority} option.
1526

1527
  """
1528
  value = _PRIONAME_TO_VALUE[value]
1529

    
1530
  setattr(parser.values, option.dest, value)
1531

    
1532

    
1533
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1534
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1535
                          choices=_PRIONAME_TO_VALUE.keys(),
1536
                          action="callback", type="choice",
1537
                          callback=_PriorityOptionCb,
1538
                          help="Priority for opcode processing")
1539

    
1540
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1541
                        type="bool", default=None, metavar=_YORNO,
1542
                        help="Sets the hidden flag on the OS")
1543

    
1544
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1545
                        type="bool", default=None, metavar=_YORNO,
1546
                        help="Sets the blacklisted flag on the OS")
1547

    
1548
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1549
                                     type="bool", metavar=_YORNO,
1550
                                     dest="prealloc_wipe_disks",
1551
                                     help=("Wipe disks prior to instance"
1552
                                           " creation"))
1553

    
1554
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1555
                             type="keyval", default=None,
1556
                             help="Node parameters")
1557

    
1558
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1559
                              action="store", metavar="POLICY", default=None,
1560
                              help="Allocation policy for the node group")
1561

    
1562
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1563
                              type="bool", metavar=_YORNO,
1564
                              dest="node_powered",
1565
                              help="Specify if the SoR for node is powered")
1566

    
1567
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1568
                             default=constants.OOB_TIMEOUT,
1569
                             help="Maximum time to wait for out-of-band helper")
1570

    
1571
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1572
                             default=constants.OOB_POWER_DELAY,
1573
                             help="Time in seconds to wait between power-ons")
1574

    
1575
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1576
                              action="store_true", default=False,
1577
                              help=("Whether command argument should be treated"
1578
                                    " as filter"))
1579

    
1580
NO_REMEMBER_OPT = cli_option("--no-remember",
1581
                             dest="no_remember",
1582
                             action="store_true", default=False,
1583
                             help="Perform but do not record the change"
1584
                             " in the configuration")
1585

    
1586
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1587
                              default=False, action="store_true",
1588
                              help="Evacuate primary instances only")
1589

    
1590
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1591
                                default=False, action="store_true",
1592
                                help="Evacuate secondary instances only"
1593
                                     " (applies only to internally mirrored"
1594
                                     " disk templates, e.g. %s)" %
1595
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1596

    
1597
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1598
                                action="store_true", default=False,
1599
                                help="Pause instance at startup")
1600

    
1601
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1602
                          help="Destination node group (name or uuid)",
1603
                          default=None, action="append",
1604
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1605

    
1606
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1607
                               action="append", dest="ignore_errors",
1608
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1609
                               help="Error code to be ignored")
1610

    
1611
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1612
                            action="append",
1613
                            help=("Specify disk state information in the"
1614
                                  " format"
1615
                                  " storage_type/identifier:option=value,...;"
1616
                                  " note this is unused for now"),
1617
                            type="identkeyval")
1618

    
1619
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1620
                          action="append",
1621
                          help=("Specify hypervisor state information in the"
1622
                                " format hypervisor:option=value,...;"
1623
                                " note this is unused for now"),
1624
                          type="identkeyval")
1625

    
1626
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1627
                                action="store_true", default=False,
1628
                                help="Ignore instance policy violations")
1629

    
1630
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1631
                             help="Sets the instance's runtime memory,"
1632
                             " ballooning it up or down to the new value",
1633
                             default=None, type="unit", metavar="<size>")
1634

    
1635
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1636
                          action="store_true", default=False,
1637
                          help="Marks the grow as absolute instead of the"
1638
                          " (default) relative mode")
1639

    
1640
NETWORK_OPT = cli_option("--network",
1641
                         action="store", default=None, dest="network",
1642
                         help="IP network in CIDR notation")
1643

    
1644
GATEWAY_OPT = cli_option("--gateway",
1645
                         action="store", default=None, dest="gateway",
1646
                         help="IP address of the router (gateway)")
1647

    
1648
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1649
                                  action="store", default=None,
1650
                                  dest="add_reserved_ips",
1651
                                  help="Comma-separated list of"
1652
                                  " reserved IPs to add")
1653

    
1654
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1655
                                     action="store", default=None,
1656
                                     dest="remove_reserved_ips",
1657
                                     help="Comma-delimited list of"
1658
                                     " reserved IPs to remove")
1659

    
1660
NETWORK6_OPT = cli_option("--network6",
1661
                          action="store", default=None, dest="network6",
1662
                          help="IP network in CIDR notation")
1663

    
1664
GATEWAY6_OPT = cli_option("--gateway6",
1665
                          action="store", default=None, dest="gateway6",
1666
                          help="IP6 address of the router (gateway)")
1667

    
1668
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1669
                                  dest="conflicts_check",
1670
                                  default=True,
1671
                                  action="store_false",
1672
                                  help="Don't check for conflicting IPs")
1673

    
1674
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1675
                                 default=False, action="store_true",
1676
                                 help="Include default values")
1677

    
1678
HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1679
                         action="store_true", default=False,
1680
                         help="Hotplug supported devices (NICs and Disks)")
1681

    
1682
HOTPLUG_IF_POSSIBLE_OPT = cli_option("--hotplug-if-possible",
1683
                                     dest="hotplug_if_possible",
1684
                                     action="store_true", default=False,
1685
                                     help="Hotplug devices in case"
1686
                                          " hotplug is supported")
1687

    
1688
#: Options provided by all commands
1689
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1690

    
1691
# options related to asynchronous job handling
1692

    
1693
SUBMIT_OPTS = [
1694
  SUBMIT_OPT,
1695
  PRINT_JOBID_OPT,
1696
  ]
1697

    
1698
# common options for creating instances. add and import then add their own
1699
# specific ones.
1700
COMMON_CREATE_OPTS = [
1701
  BACKEND_OPT,
1702
  DISK_OPT,
1703
  DISK_TEMPLATE_OPT,
1704
  FILESTORE_DIR_OPT,
1705
  FILESTORE_DRIVER_OPT,
1706
  HYPERVISOR_OPT,
1707
  IALLOCATOR_OPT,
1708
  NET_OPT,
1709
  NODE_PLACEMENT_OPT,
1710
  NOIPCHECK_OPT,
1711
  NOCONFLICTSCHECK_OPT,
1712
  NONAMECHECK_OPT,
1713
  NONICS_OPT,
1714
  NWSYNC_OPT,
1715
  OSPARAMS_OPT,
1716
  OS_SIZE_OPT,
1717
  SUBMIT_OPT,
1718
  PRINT_JOBID_OPT,
1719
  TAG_ADD_OPT,
1720
  DRY_RUN_OPT,
1721
  PRIORITY_OPT,
1722
  ]
1723

    
1724
# common instance policy options
1725
INSTANCE_POLICY_OPTS = [
1726
  IPOLICY_BOUNDS_SPECS_OPT,
1727
  IPOLICY_DISK_TEMPLATES,
1728
  IPOLICY_VCPU_RATIO,
1729
  IPOLICY_SPINDLE_RATIO,
1730
  ]
1731

    
1732
# instance policy split specs options
1733
SPLIT_ISPECS_OPTS = [
1734
  SPECS_CPU_COUNT_OPT,
1735
  SPECS_DISK_COUNT_OPT,
1736
  SPECS_DISK_SIZE_OPT,
1737
  SPECS_MEM_SIZE_OPT,
1738
  SPECS_NIC_COUNT_OPT,
1739
  ]
1740

    
1741

    
1742
class _ShowUsage(Exception):
1743
  """Exception class for L{_ParseArgs}.
1744

1745
  """
1746
  def __init__(self, exit_error):
1747
    """Initializes instances of this class.
1748

1749
    @type exit_error: bool
1750
    @param exit_error: Whether to report failure on exit
1751

1752
    """
1753
    Exception.__init__(self)
1754
    self.exit_error = exit_error
1755

    
1756

    
1757
class _ShowVersion(Exception):
1758
  """Exception class for L{_ParseArgs}.
1759

1760
  """
1761

    
1762

    
1763
def _ParseArgs(binary, argv, commands, aliases, env_override):
1764
  """Parser for the command line arguments.
1765

1766
  This function parses the arguments and returns the function which
1767
  must be executed together with its (modified) arguments.
1768

1769
  @param binary: Script name
1770
  @param argv: Command line arguments
1771
  @param commands: Dictionary containing command definitions
1772
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1773
  @param env_override: list of env variables allowed for default args
1774
  @raise _ShowUsage: If usage description should be shown
1775
  @raise _ShowVersion: If version should be shown
1776

1777
  """
1778
  assert not (env_override - set(commands))
1779
  assert not (set(aliases.keys()) & set(commands.keys()))
1780

    
1781
  if len(argv) > 1:
1782
    cmd = argv[1]
1783
  else:
1784
    # No option or command given
1785
    raise _ShowUsage(exit_error=True)
1786

    
1787
  if cmd == "--version":
1788
    raise _ShowVersion()
1789
  elif cmd == "--help":
1790
    raise _ShowUsage(exit_error=False)
1791
  elif not (cmd in commands or cmd in aliases):
1792
    raise _ShowUsage(exit_error=True)
1793

    
1794
  # get command, unalias it, and look it up in commands
1795
  if cmd in aliases:
1796
    if aliases[cmd] not in commands:
1797
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1798
                                   " command '%s'" % (cmd, aliases[cmd]))
1799

    
1800
    cmd = aliases[cmd]
1801

    
1802
  if cmd in env_override:
1803
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1804
    env_args = os.environ.get(args_env_name)
1805
    if env_args:
1806
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1807

    
1808
  func, args_def, parser_opts, usage, description = commands[cmd]
1809
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1810
                        description=description,
1811
                        formatter=TitledHelpFormatter(),
1812
                        usage="%%prog %s %s" % (cmd, usage))
1813
  parser.disable_interspersed_args()
1814
  options, args = parser.parse_args(args=argv[2:])
1815

    
1816
  if not _CheckArguments(cmd, args_def, args):
1817
    return None, None, None
1818

    
1819
  return func, options, args
1820

    
1821

    
1822
def _FormatUsage(binary, commands):
1823
  """Generates a nice description of all commands.
1824

1825
  @param binary: Script name
1826
  @param commands: Dictionary containing command definitions
1827

1828
  """
1829
  # compute the max line length for cmd + usage
1830
  mlen = min(60, max(map(len, commands)))
1831

    
1832
  yield "Usage: %s {command} [options...] [argument...]" % binary
1833
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1834
  yield ""
1835
  yield "Commands:"
1836

    
1837
  # and format a nice command list
1838
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1839
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1840
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1841
    for line in help_lines:
1842
      yield " %-*s   %s" % (mlen, "", line)
1843

    
1844
  yield ""
1845

    
1846

    
1847
def _CheckArguments(cmd, args_def, args):
1848
  """Verifies the arguments using the argument definition.
1849

1850
  Algorithm:
1851

1852
    1. Abort with error if values specified by user but none expected.
1853

1854
    1. For each argument in definition
1855

1856
      1. Keep running count of minimum number of values (min_count)
1857
      1. Keep running count of maximum number of values (max_count)
1858
      1. If it has an unlimited number of values
1859

1860
        1. Abort with error if it's not the last argument in the definition
1861

1862
    1. If last argument has limited number of values
1863

1864
      1. Abort with error if number of values doesn't match or is too large
1865

1866
    1. Abort with error if user didn't pass enough values (min_count)
1867

1868
  """
1869
  if args and not args_def:
1870
    ToStderr("Error: Command %s expects no arguments", cmd)
1871
    return False
1872

    
1873
  min_count = None
1874
  max_count = None
1875
  check_max = None
1876

    
1877
  last_idx = len(args_def) - 1
1878

    
1879
  for idx, arg in enumerate(args_def):
1880
    if min_count is None:
1881
      min_count = arg.min
1882
    elif arg.min is not None:
1883
      min_count += arg.min
1884

    
1885
    if max_count is None:
1886
      max_count = arg.max
1887
    elif arg.max is not None:
1888
      max_count += arg.max
1889

    
1890
    if idx == last_idx:
1891
      check_max = (arg.max is not None)
1892

    
1893
    elif arg.max is None:
1894
      raise errors.ProgrammerError("Only the last argument can have max=None")
1895

    
1896
  if check_max:
1897
    # Command with exact number of arguments
1898
    if (min_count is not None and max_count is not None and
1899
        min_count == max_count and len(args) != min_count):
1900
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1901
      return False
1902

    
1903
    # Command with limited number of arguments
1904
    if max_count is not None and len(args) > max_count:
1905
      ToStderr("Error: Command %s expects only %d argument(s)",
1906
               cmd, max_count)
1907
      return False
1908

    
1909
  # Command with some required arguments
1910
  if min_count is not None and len(args) < min_count:
1911
    ToStderr("Error: Command %s expects at least %d argument(s)",
1912
             cmd, min_count)
1913
    return False
1914

    
1915
  return True
1916

    
1917

    
1918
def SplitNodeOption(value):
1919
  """Splits the value of a --node option.
1920

1921
  """
1922
  if value and ":" in value:
1923
    return value.split(":", 1)
1924
  else:
1925
    return (value, None)
1926

    
1927

    
1928
def CalculateOSNames(os_name, os_variants):
1929
  """Calculates all the names an OS can be called, according to its variants.
1930

1931
  @type os_name: string
1932
  @param os_name: base name of the os
1933
  @type os_variants: list or None
1934
  @param os_variants: list of supported variants
1935
  @rtype: list
1936
  @return: list of valid names
1937

1938
  """
1939
  if os_variants:
1940
    return ["%s+%s" % (os_name, v) for v in os_variants]
1941
  else:
1942
    return [os_name]
1943

    
1944

    
1945
def ParseFields(selected, default):
1946
  """Parses the values of "--field"-like options.
1947

1948
  @type selected: string or None
1949
  @param selected: User-selected options
1950
  @type default: list
1951
  @param default: Default fields
1952

1953
  """
1954
  if selected is None:
1955
    return default
1956

    
1957
  if selected.startswith("+"):
1958
    return default + selected[1:].split(",")
1959

    
1960
  return selected.split(",")
1961

    
1962

    
1963
UsesRPC = rpc.RunWithRPC
1964

    
1965

    
1966
def AskUser(text, choices=None):
1967
  """Ask the user a question.
1968

1969
  @param text: the question to ask
1970

1971
  @param choices: list with elements tuples (input_char, return_value,
1972
      description); if not given, it will default to: [('y', True,
1973
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1974
      note that the '?' char is reserved for help
1975

1976
  @return: one of the return values from the choices list; if input is
1977
      not possible (i.e. not running with a tty, we return the last
1978
      entry from the list
1979

1980
  """
1981
  if choices is None:
1982
    choices = [("y", True, "Perform the operation"),
1983
               ("n", False, "Do not perform the operation")]
1984
  if not choices or not isinstance(choices, list):
1985
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1986
  for entry in choices:
1987
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1988
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1989

    
1990
  answer = choices[-1][1]
1991
  new_text = []
1992
  for line in text.splitlines():
1993
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1994
  text = "\n".join(new_text)
1995
  try:
1996
    f = file("/dev/tty", "a+")
1997
  except IOError:
1998
    return answer
1999
  try:
2000
    chars = [entry[0] for entry in choices]
2001
    chars[-1] = "[%s]" % chars[-1]
2002
    chars.append("?")
2003
    maps = dict([(entry[0], entry[1]) for entry in choices])
2004
    while True:
2005
      f.write(text)
2006
      f.write("\n")
2007
      f.write("/".join(chars))
2008
      f.write(": ")
2009
      line = f.readline(2).strip().lower()
2010
      if line in maps:
2011
        answer = maps[line]
2012
        break
2013
      elif line == "?":
2014
        for entry in choices:
2015
          f.write(" %s - %s\n" % (entry[0], entry[2]))
2016
        f.write("\n")
2017
        continue
2018
  finally:
2019
    f.close()
2020
  return answer
2021

    
2022

    
2023
class JobSubmittedException(Exception):
2024
  """Job was submitted, client should exit.
2025

2026
  This exception has one argument, the ID of the job that was
2027
  submitted. The handler should print this ID.
2028

2029
  This is not an error, just a structured way to exit from clients.
2030

2031
  """
2032

    
2033

    
2034
def SendJob(ops, cl=None):
2035
  """Function to submit an opcode without waiting for the results.
2036

2037
  @type ops: list
2038
  @param ops: list of opcodes
2039
  @type cl: luxi.Client
2040
  @param cl: the luxi client to use for communicating with the master;
2041
             if None, a new client will be created
2042

2043
  """
2044
  if cl is None:
2045
    cl = GetClient()
2046

    
2047
  job_id = cl.SubmitJob(ops)
2048

    
2049
  return job_id
2050

    
2051

    
2052
def GenericPollJob(job_id, cbs, report_cbs):
2053
  """Generic job-polling function.
2054

2055
  @type job_id: number
2056
  @param job_id: Job ID
2057
  @type cbs: Instance of L{JobPollCbBase}
2058
  @param cbs: Data callbacks
2059
  @type report_cbs: Instance of L{JobPollReportCbBase}
2060
  @param report_cbs: Reporting callbacks
2061

2062
  """
2063
  prev_job_info = None
2064
  prev_logmsg_serial = None
2065

    
2066
  status = None
2067

    
2068
  while True:
2069
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2070
                                      prev_logmsg_serial)
2071
    if not result:
2072
      # job not found, go away!
2073
      raise errors.JobLost("Job with id %s lost" % job_id)
2074

    
2075
    if result == constants.JOB_NOTCHANGED:
2076
      report_cbs.ReportNotChanged(job_id, status)
2077

    
2078
      # Wait again
2079
      continue
2080

    
2081
    # Split result, a tuple of (field values, log entries)
2082
    (job_info, log_entries) = result
2083
    (status, ) = job_info
2084

    
2085
    if log_entries:
2086
      for log_entry in log_entries:
2087
        (serial, timestamp, log_type, message) = log_entry
2088
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2089
                                    log_type, message)
2090
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2091

    
2092
    # TODO: Handle canceled and archived jobs
2093
    elif status in (constants.JOB_STATUS_SUCCESS,
2094
                    constants.JOB_STATUS_ERROR,
2095
                    constants.JOB_STATUS_CANCELING,
2096
                    constants.JOB_STATUS_CANCELED):
2097
      break
2098

    
2099
    prev_job_info = job_info
2100

    
2101
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2102
  if not jobs:
2103
    raise errors.JobLost("Job with id %s lost" % job_id)
2104

    
2105
  status, opstatus, result = jobs[0]
2106

    
2107
  if status == constants.JOB_STATUS_SUCCESS:
2108
    return result
2109

    
2110
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2111
    raise errors.OpExecError("Job was canceled")
2112

    
2113
  has_ok = False
2114
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2115
    if status == constants.OP_STATUS_SUCCESS:
2116
      has_ok = True
2117
    elif status == constants.OP_STATUS_ERROR:
2118
      errors.MaybeRaise(msg)
2119

    
2120
      if has_ok:
2121
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2122
                                 (idx, msg))
2123

    
2124
      raise errors.OpExecError(str(msg))
2125

    
2126
  # default failure mode
2127
  raise errors.OpExecError(result)
2128

    
2129

    
2130
class JobPollCbBase:
2131
  """Base class for L{GenericPollJob} callbacks.
2132

2133
  """
2134
  def __init__(self):
2135
    """Initializes this class.
2136

2137
    """
2138

    
2139
  def WaitForJobChangeOnce(self, job_id, fields,
2140
                           prev_job_info, prev_log_serial):
2141
    """Waits for changes on a job.
2142

2143
    """
2144
    raise NotImplementedError()
2145

    
2146
  def QueryJobs(self, job_ids, fields):
2147
    """Returns the selected fields for the selected job IDs.
2148

2149
    @type job_ids: list of numbers
2150
    @param job_ids: Job IDs
2151
    @type fields: list of strings
2152
    @param fields: Fields
2153

2154
    """
2155
    raise NotImplementedError()
2156

    
2157

    
2158
class JobPollReportCbBase:
2159
  """Base class for L{GenericPollJob} reporting callbacks.
2160

2161
  """
2162
  def __init__(self):
2163
    """Initializes this class.
2164

2165
    """
2166

    
2167
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2168
    """Handles a log message.
2169

2170
    """
2171
    raise NotImplementedError()
2172

    
2173
  def ReportNotChanged(self, job_id, status):
2174
    """Called for if a job hasn't changed in a while.
2175

2176
    @type job_id: number
2177
    @param job_id: Job ID
2178
    @type status: string or None
2179
    @param status: Job status if available
2180

2181
    """
2182
    raise NotImplementedError()
2183

    
2184

    
2185
class _LuxiJobPollCb(JobPollCbBase):
2186
  def __init__(self, cl):
2187
    """Initializes this class.
2188

2189
    """
2190
    JobPollCbBase.__init__(self)
2191
    self.cl = cl
2192

    
2193
  def WaitForJobChangeOnce(self, job_id, fields,
2194
                           prev_job_info, prev_log_serial):
2195
    """Waits for changes on a job.
2196

2197
    """
2198
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2199
                                        prev_job_info, prev_log_serial)
2200

    
2201
  def QueryJobs(self, job_ids, fields):
2202
    """Returns the selected fields for the selected job IDs.
2203

2204
    """
2205
    return self.cl.QueryJobs(job_ids, fields)
2206

    
2207

    
2208
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2209
  def __init__(self, feedback_fn):
2210
    """Initializes this class.
2211

2212
    """
2213
    JobPollReportCbBase.__init__(self)
2214

    
2215
    self.feedback_fn = feedback_fn
2216

    
2217
    assert callable(feedback_fn)
2218

    
2219
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2220
    """Handles a log message.
2221

2222
    """
2223
    self.feedback_fn((timestamp, log_type, log_msg))
2224

    
2225
  def ReportNotChanged(self, job_id, status):
2226
    """Called if a job hasn't changed in a while.
2227

2228
    """
2229
    # Ignore
2230

    
2231

    
2232
class StdioJobPollReportCb(JobPollReportCbBase):
2233
  def __init__(self):
2234
    """Initializes this class.
2235

2236
    """
2237
    JobPollReportCbBase.__init__(self)
2238

    
2239
    self.notified_queued = False
2240
    self.notified_waitlock = False
2241

    
2242
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2243
    """Handles a log message.
2244

2245
    """
2246
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2247
             FormatLogMessage(log_type, log_msg))
2248

    
2249
  def ReportNotChanged(self, job_id, status):
2250
    """Called if a job hasn't changed in a while.
2251

2252
    """
2253
    if status is None:
2254
      return
2255

    
2256
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2257
      ToStderr("Job %s is waiting in queue", job_id)
2258
      self.notified_queued = True
2259

    
2260
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2261
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2262
      self.notified_waitlock = True
2263

    
2264

    
2265
def FormatLogMessage(log_type, log_msg):
2266
  """Formats a job message according to its type.
2267

2268
  """
2269
  if log_type != constants.ELOG_MESSAGE:
2270
    log_msg = str(log_msg)
2271

    
2272
  return utils.SafeEncode(log_msg)
2273

    
2274

    
2275
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2276
  """Function to poll for the result of a job.
2277

2278
  @type job_id: job identified
2279
  @param job_id: the job to poll for results
2280
  @type cl: luxi.Client
2281
  @param cl: the luxi client to use for communicating with the master;
2282
             if None, a new client will be created
2283

2284
  """
2285
  if cl is None:
2286
    cl = GetClient()
2287

    
2288
  if reporter is None:
2289
    if feedback_fn:
2290
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2291
    else:
2292
      reporter = StdioJobPollReportCb()
2293
  elif feedback_fn:
2294
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2295

    
2296
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2297

    
2298

    
2299
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2300
  """Legacy function to submit an opcode.
2301

2302
  This is just a simple wrapper over the construction of the processor
2303
  instance. It should be extended to better handle feedback and
2304
  interaction functions.
2305

2306
  """
2307
  if cl is None:
2308
    cl = GetClient()
2309

    
2310
  SetGenericOpcodeOpts([op], opts)
2311

    
2312
  job_id = SendJob([op], cl=cl)
2313
  if hasattr(opts, "print_jobid") and opts.print_jobid:
2314
    ToStdout("%d" % job_id)
2315

    
2316
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2317
                       reporter=reporter)
2318

    
2319
  return op_results[0]
2320

    
2321

    
2322
def SubmitOpCodeToDrainedQueue(op):
2323
  """Forcefully insert a job in the queue, even if it is drained.
2324

2325
  """
2326
  cl = GetClient()
2327
  job_id = cl.SubmitJobToDrainedQueue([op])
2328
  op_results = PollJob(job_id, cl=cl)
2329
  return op_results[0]
2330

    
2331

    
2332
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2333
  """Wrapper around SubmitOpCode or SendJob.
2334

2335
  This function will decide, based on the 'opts' parameter, whether to
2336
  submit and wait for the result of the opcode (and return it), or
2337
  whether to just send the job and print its identifier. It is used in
2338
  order to simplify the implementation of the '--submit' option.
2339

2340
  It will also process the opcodes if we're sending the via SendJob
2341
  (otherwise SubmitOpCode does it).
2342

2343
  """
2344
  if opts and opts.submit_only:
2345
    job = [op]
2346
    SetGenericOpcodeOpts(job, opts)
2347
    job_id = SendJob(job, cl=cl)
2348
    if opts.print_jobid:
2349
      ToStdout("%d" % job_id)
2350
    raise JobSubmittedException(job_id)
2351
  else:
2352
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2353

    
2354

    
2355
def _InitReasonTrail(op, opts):
2356
  """Builds the first part of the reason trail
2357

2358
  Builds the initial part of the reason trail, adding the user provided reason
2359
  (if it exists) and the name of the command starting the operation.
2360

2361
  @param op: the opcode the reason trail will be added to
2362
  @param opts: the command line options selected by the user
2363

2364
  """
2365
  assert len(sys.argv) >= 2
2366
  trail = []
2367

    
2368
  if opts.reason:
2369
    trail.append((constants.OPCODE_REASON_SRC_USER,
2370
                  opts.reason,
2371
                  utils.EpochNano()))
2372

    
2373
  binary = os.path.basename(sys.argv[0])
2374
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2375
  command = sys.argv[1]
2376
  trail.append((source, command, utils.EpochNano()))
2377
  op.reason = trail
2378

    
2379

    
2380
def SetGenericOpcodeOpts(opcode_list, options):
2381
  """Processor for generic options.
2382

2383
  This function updates the given opcodes based on generic command
2384
  line options (like debug, dry-run, etc.).
2385

2386
  @param opcode_list: list of opcodes
2387
  @param options: command line options or None
2388
  @return: None (in-place modification)
2389

2390
  """
2391
  if not options:
2392
    return
2393
  for op in opcode_list:
2394
    op.debug_level = options.debug
2395
    if hasattr(options, "dry_run"):
2396
      op.dry_run = options.dry_run
2397
    if getattr(options, "priority", None) is not None:
2398
      op.priority = options.priority
2399
    _InitReasonTrail(op, options)
2400

    
2401

    
2402
def FormatError(err):
2403
  """Return a formatted error message for a given error.
2404

2405
  This function takes an exception instance and returns a tuple
2406
  consisting of two values: first, the recommended exit code, and
2407
  second, a string describing the error message (not
2408
  newline-terminated).
2409

2410
  """
2411
  retcode = 1
2412
  obuf = StringIO()
2413
  msg = str(err)
2414
  if isinstance(err, errors.ConfigurationError):
2415
    txt = "Corrupt configuration file: %s" % msg
2416
    logging.error(txt)
2417
    obuf.write(txt + "\n")
2418
    obuf.write("Aborting.")
2419
    retcode = 2
2420
  elif isinstance(err, errors.HooksAbort):
2421
    obuf.write("Failure: hooks execution failed:\n")
2422
    for node, script, out in err.args[0]:
2423
      if out:
2424
        obuf.write("  node: %s, script: %s, output: %s\n" %
2425
                   (node, script, out))
2426
      else:
2427
        obuf.write("  node: %s, script: %s (no output)\n" %
2428
                   (node, script))
2429
  elif isinstance(err, errors.HooksFailure):
2430
    obuf.write("Failure: hooks general failure: %s" % msg)
2431
  elif isinstance(err, errors.ResolverError):
2432
    this_host = netutils.Hostname.GetSysName()
2433
    if err.args[0] == this_host:
2434
      msg = "Failure: can't resolve my own hostname ('%s')"
2435
    else:
2436
      msg = "Failure: can't resolve hostname '%s'"
2437
    obuf.write(msg % err.args[0])
2438
  elif isinstance(err, errors.OpPrereqError):
2439
    if len(err.args) == 2:
2440
      obuf.write("Failure: prerequisites not met for this"
2441
                 " operation:\nerror type: %s, error details:\n%s" %
2442
                 (err.args[1], err.args[0]))
2443
    else:
2444
      obuf.write("Failure: prerequisites not met for this"
2445
                 " operation:\n%s" % msg)
2446
  elif isinstance(err, errors.OpExecError):
2447
    obuf.write("Failure: command execution error:\n%s" % msg)
2448
  elif isinstance(err, errors.TagError):
2449
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2450
  elif isinstance(err, errors.JobQueueDrainError):
2451
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2452
               " accept new requests\n")
2453
  elif isinstance(err, errors.JobQueueFull):
2454
    obuf.write("Failure: the job queue is full and doesn't accept new"
2455
               " job submissions until old jobs are archived\n")
2456
  elif isinstance(err, errors.TypeEnforcementError):
2457
    obuf.write("Parameter Error: %s" % msg)
2458
  elif isinstance(err, errors.ParameterError):
2459
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2460
  elif isinstance(err, rpcerr.NoMasterError):
2461
    if err.args[0] == pathutils.MASTER_SOCKET:
2462
      daemon = "the master daemon"
2463
    elif err.args[0] == pathutils.QUERY_SOCKET:
2464
      daemon = "the config daemon"
2465
    else:
2466
      daemon = "socket '%s'" % str(err.args[0])
2467
    obuf.write("Cannot communicate with %s.\nIs the process running"
2468
               " and listening for connections?" % daemon)
2469
  elif isinstance(err, rpcerr.TimeoutError):
2470
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2471
               " been submitted and will continue to run even if the call"
2472
               " timed out. Useful commands in this situation are \"gnt-job"
2473
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2474
    obuf.write(msg)
2475
  elif isinstance(err, rpcerr.PermissionError):
2476
    obuf.write("It seems you don't have permissions to connect to the"
2477
               " master daemon.\nPlease retry as a different user.")
2478
  elif isinstance(err, rpcerr.ProtocolError):
2479
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2480
               "%s" % msg)
2481
  elif isinstance(err, errors.JobLost):
2482
    obuf.write("Error checking job status: %s" % msg)
2483
  elif isinstance(err, errors.QueryFilterParseError):
2484
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2485
    obuf.write("\n".join(err.GetDetails()))
2486
  elif isinstance(err, errors.GenericError):
2487
    obuf.write("Unhandled Ganeti error: %s" % msg)
2488
  elif isinstance(err, JobSubmittedException):
2489
    obuf.write("JobID: %s\n" % err.args[0])
2490
    retcode = 0
2491
  else:
2492
    obuf.write("Unhandled exception: %s" % msg)
2493
  return retcode, obuf.getvalue().rstrip("\n")
2494

    
2495

    
2496
def GenericMain(commands, override=None, aliases=None,
2497
                env_override=frozenset()):
2498
  """Generic main function for all the gnt-* commands.
2499

2500
  @param commands: a dictionary with a special structure, see the design doc
2501
                   for command line handling.
2502
  @param override: if not None, we expect a dictionary with keys that will
2503
                   override command line options; this can be used to pass
2504
                   options from the scripts to generic functions
2505
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2506
  @param env_override: list of environment names which are allowed to submit
2507
                       default args for commands
2508

2509
  """
2510
  # save the program name and the entire command line for later logging
2511
  if sys.argv:
2512
    binary = os.path.basename(sys.argv[0])
2513
    if not binary:
2514
      binary = sys.argv[0]
2515

    
2516
    if len(sys.argv) >= 2:
2517
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2518
    else:
2519
      logname = binary
2520

    
2521
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2522
  else:
2523
    binary = "<unknown program>"
2524
    cmdline = "<unknown>"
2525

    
2526
  if aliases is None:
2527
    aliases = {}
2528

    
2529
  try:
2530
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2531
                                       env_override)
2532
  except _ShowVersion:
2533
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2534
             constants.RELEASE_VERSION)
2535
    return constants.EXIT_SUCCESS
2536
  except _ShowUsage, err:
2537
    for line in _FormatUsage(binary, commands):
2538
      ToStdout(line)
2539

    
2540
    if err.exit_error:
2541
      return constants.EXIT_FAILURE
2542
    else:
2543
      return constants.EXIT_SUCCESS
2544
  except errors.ParameterError, err:
2545
    result, err_msg = FormatError(err)
2546
    ToStderr(err_msg)
2547
    return 1
2548

    
2549
  if func is None: # parse error
2550
    return 1
2551

    
2552
  if override is not None:
2553
    for key, val in override.iteritems():
2554
      setattr(options, key, val)
2555

    
2556
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2557
                     stderr_logging=True)
2558

    
2559
  logging.info("Command line: %s", cmdline)
2560

    
2561
  try:
2562
    result = func(options, args)
2563
  except (errors.GenericError, rpcerr.ProtocolError,
2564
          JobSubmittedException), err:
2565
    result, err_msg = FormatError(err)
2566
    logging.exception("Error during command processing")
2567
    ToStderr(err_msg)
2568
  except KeyboardInterrupt:
2569
    result = constants.EXIT_FAILURE
2570
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2571
             " might have been submitted and"
2572
             " will continue to run in the background.")
2573
  except IOError, err:
2574
    if err.errno == errno.EPIPE:
2575
      # our terminal went away, we'll exit
2576
      sys.exit(constants.EXIT_FAILURE)
2577
    else:
2578
      raise
2579

    
2580
  return result
2581

    
2582

    
2583
def ParseNicOption(optvalue):
2584
  """Parses the value of the --net option(s).
2585

2586
  """
2587
  try:
2588
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2589
  except (TypeError, ValueError), err:
2590
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2591
                               errors.ECODE_INVAL)
2592

    
2593
  nics = [{}] * nic_max
2594
  for nidx, ndict in optvalue:
2595
    nidx = int(nidx)
2596

    
2597
    if not isinstance(ndict, dict):
2598
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2599
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2600

    
2601
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2602

    
2603
    nics[nidx] = ndict
2604

    
2605
  return nics
2606

    
2607

    
2608
def GenericInstanceCreate(mode, opts, args):
2609
  """Add an instance to the cluster via either creation or import.
2610

2611
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2612
  @param opts: the command line options selected by the user
2613
  @type args: list
2614
  @param args: should contain only one element, the new instance name
2615
  @rtype: int
2616
  @return: the desired exit code
2617

2618
  """
2619
  instance = args[0]
2620

    
2621
  (pnode, snode) = SplitNodeOption(opts.node)
2622

    
2623
  hypervisor = None
2624
  hvparams = {}
2625
  if opts.hypervisor:
2626
    hypervisor, hvparams = opts.hypervisor
2627

    
2628
  if opts.nics:
2629
    nics = ParseNicOption(opts.nics)
2630
  elif opts.no_nics:
2631
    # no nics
2632
    nics = []
2633
  elif mode == constants.INSTANCE_CREATE:
2634
    # default of one nic, all auto
2635
    nics = [{}]
2636
  else:
2637
    # mode == import
2638
    nics = []
2639

    
2640
  if opts.disk_template == constants.DT_DISKLESS:
2641
    if opts.disks or opts.sd_size is not None:
2642
      raise errors.OpPrereqError("Diskless instance but disk"
2643
                                 " information passed", errors.ECODE_INVAL)
2644
    disks = []
2645
  else:
2646
    if (not opts.disks and not opts.sd_size
2647
        and mode == constants.INSTANCE_CREATE):
2648
      raise errors.OpPrereqError("No disk information specified",
2649
                                 errors.ECODE_INVAL)
2650
    if opts.disks and opts.sd_size is not None:
2651
      raise errors.OpPrereqError("Please use either the '--disk' or"
2652
                                 " '-s' option", errors.ECODE_INVAL)
2653
    if opts.sd_size is not None:
2654
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2655

    
2656
    if opts.disks:
2657
      try:
2658
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2659
      except ValueError, err:
2660
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2661
                                   errors.ECODE_INVAL)
2662
      disks = [{}] * disk_max
2663
    else:
2664
      disks = []
2665
    for didx, ddict in opts.disks:
2666
      didx = int(didx)
2667
      if not isinstance(ddict, dict):
2668
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2669
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2670
      elif constants.IDISK_SIZE in ddict:
2671
        if constants.IDISK_ADOPT in ddict:
2672
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2673
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2674
        try:
2675
          ddict[constants.IDISK_SIZE] = \
2676
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2677
        except ValueError, err:
2678
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2679
                                     (didx, err), errors.ECODE_INVAL)
2680
      elif constants.IDISK_ADOPT in ddict:
2681
        if constants.IDISK_SPINDLES in ddict:
2682
          raise errors.OpPrereqError("spindles is not a valid option when"
2683
                                     " adopting a disk", errors.ECODE_INVAL)
2684
        if mode == constants.INSTANCE_IMPORT:
2685
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2686
                                     " import", errors.ECODE_INVAL)
2687
        ddict[constants.IDISK_SIZE] = 0
2688
      else:
2689
        raise errors.OpPrereqError("Missing size or adoption source for"
2690
                                   " disk %d" % didx, errors.ECODE_INVAL)
2691
      if constants.IDISK_SPINDLES in ddict:
2692
        ddict[constants.IDISK_SPINDLES] = int(ddict[constants.IDISK_SPINDLES])
2693

    
2694
      disks[didx] = ddict
2695

    
2696
  if opts.tags is not None:
2697
    tags = opts.tags.split(",")
2698
  else:
2699
    tags = []
2700

    
2701
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2702
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2703

    
2704
  if mode == constants.INSTANCE_CREATE:
2705
    start = opts.start
2706
    os_type = opts.os
2707
    force_variant = opts.force_variant
2708
    src_node = None
2709
    src_path = None
2710
    no_install = opts.no_install
2711
    identify_defaults = False
2712
    compress = constants.IEC_NONE
2713
  elif mode == constants.INSTANCE_IMPORT:
2714
    start = False
2715
    os_type = None
2716
    force_variant = False
2717
    src_node = opts.src_node
2718
    src_path = opts.src_dir
2719
    no_install = None
2720
    identify_defaults = opts.identify_defaults
2721
    compress = opts.compress
2722
  else:
2723
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2724

    
2725
  op = opcodes.OpInstanceCreate(instance_name=instance,
2726
                                disks=disks,
2727
                                disk_template=opts.disk_template,
2728
                                nics=nics,
2729
                                conflicts_check=opts.conflicts_check,
2730
                                pnode=pnode, snode=snode,
2731
                                ip_check=opts.ip_check,
2732
                                name_check=opts.name_check,
2733
                                wait_for_sync=opts.wait_for_sync,
2734
                                file_storage_dir=opts.file_storage_dir,
2735
                                file_driver=opts.file_driver,
2736
                                iallocator=opts.iallocator,
2737
                                hypervisor=hypervisor,
2738
                                hvparams=hvparams,
2739
                                beparams=opts.beparams,
2740
                                osparams=opts.osparams,
2741
                                mode=mode,
2742
                                start=start,
2743
                                os_type=os_type,
2744
                                force_variant=force_variant,
2745
                                src_node=src_node,
2746
                                src_path=src_path,
2747
                                compress=compress,
2748
                                tags=tags,
2749
                                no_install=no_install,
2750
                                identify_defaults=identify_defaults,
2751
                                ignore_ipolicy=opts.ignore_ipolicy)
2752

    
2753
  SubmitOrSend(op, opts)
2754
  return 0
2755

    
2756

    
2757
class _RunWhileClusterStoppedHelper:
2758
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2759

2760
  """
2761
  def __init__(self, feedback_fn, cluster_name, master_node,
2762
               online_nodes, ssh_ports):
2763
    """Initializes this class.
2764

2765
    @type feedback_fn: callable
2766
    @param feedback_fn: Feedback function
2767
    @type cluster_name: string
2768
    @param cluster_name: Cluster name
2769
    @type master_node: string
2770
    @param master_node Master node name
2771
    @type online_nodes: list
2772
    @param online_nodes: List of names of online nodes
2773
    @type ssh_ports: list
2774
    @param ssh_ports: List of SSH ports of online nodes
2775

2776
    """
2777
    self.feedback_fn = feedback_fn
2778
    self.cluster_name = cluster_name
2779
    self.master_node = master_node
2780
    self.online_nodes = online_nodes
2781
    self.ssh_ports = dict(zip(online_nodes, ssh_ports))
2782

    
2783
    self.ssh = ssh.SshRunner(self.cluster_name)
2784

    
2785
    self.nonmaster_nodes = [name for name in online_nodes
2786
                            if name != master_node]
2787

    
2788
    assert self.master_node not in self.nonmaster_nodes
2789

    
2790
  def _RunCmd(self, node_name, cmd):
2791
    """Runs a command on the local or a remote machine.
2792

2793
    @type node_name: string
2794
    @param node_name: Machine name
2795
    @type cmd: list
2796
    @param cmd: Command
2797

2798
    """
2799
    if node_name is None or node_name == self.master_node:
2800
      # No need to use SSH
2801
      result = utils.RunCmd(cmd)
2802
    else:
2803
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2804
                            utils.ShellQuoteArgs(cmd),
2805
                            port=self.ssh_ports[node_name])
2806

    
2807
    if result.failed:
2808
      errmsg = ["Failed to run command %s" % result.cmd]
2809
      if node_name:
2810
        errmsg.append("on node %s" % node_name)
2811
      errmsg.append(": exitcode %s and error %s" %
2812
                    (result.exit_code, result.output))
2813
      raise errors.OpExecError(" ".join(errmsg))
2814

    
2815
  def Call(self, fn, *args):
2816
    """Call function while all daemons are stopped.
2817

2818
    @type fn: callable
2819
    @param fn: Function to be called
2820

2821
    """
2822
    # Pause watcher by acquiring an exclusive lock on watcher state file
2823
    self.feedback_fn("Blocking watcher")
2824
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2825
    try:
2826
      # TODO: Currently, this just blocks. There's no timeout.
2827
      # TODO: Should it be a shared lock?
2828
      watcher_block.Exclusive(blocking=True)
2829

    
2830
      # Stop master daemons, so that no new jobs can come in and all running
2831
      # ones are finished
2832
      self.feedback_fn("Stopping master daemons")
2833
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2834
      try:
2835
        # Stop daemons on all nodes
2836
        for node_name in self.online_nodes:
2837
          self.feedback_fn("Stopping daemons on %s" % node_name)
2838
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2839

    
2840
        # All daemons are shut down now
2841
        try:
2842
          return fn(self, *args)
2843
        except Exception, err:
2844
          _, errmsg = FormatError(err)
2845
          logging.exception("Caught exception")
2846
          self.feedback_fn(errmsg)
2847
          raise
2848
      finally:
2849
        # Start cluster again, master node last
2850
        for node_name in self.nonmaster_nodes + [self.master_node]:
2851
          self.feedback_fn("Starting daemons on %s" % node_name)
2852
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2853
    finally:
2854
      # Resume watcher
2855
      watcher_block.Close()
2856

    
2857

    
2858
def RunWhileClusterStopped(feedback_fn, fn, *args):
2859
  """Calls a function while all cluster daemons are stopped.
2860

2861
  @type feedback_fn: callable
2862
  @param feedback_fn: Feedback function
2863
  @type fn: callable
2864
  @param fn: Function to be called when daemons are stopped
2865

2866
  """
2867
  feedback_fn("Gathering cluster information")
2868

    
2869
  # This ensures we're running on the master daemon
2870
  cl = GetClient()
2871
  # Query client
2872
  qcl = GetClient(query=True)
2873

    
2874
  (cluster_name, master_node) = \
2875
    cl.QueryConfigValues(["cluster_name", "master_node"])
2876

    
2877
  online_nodes = GetOnlineNodes([], cl=qcl)
2878
  ssh_ports = GetNodesSshPorts(online_nodes, qcl)
2879

    
2880
  # Don't keep a reference to the client. The master daemon will go away.
2881
  del cl
2882
  del qcl
2883

    
2884
  assert master_node in online_nodes
2885

    
2886
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2887
                                       online_nodes, ssh_ports).Call(fn, *args)
2888

    
2889

    
2890
def GenerateTable(headers, fields, separator, data,
2891
                  numfields=None, unitfields=None,
2892
                  units=None):
2893
  """Prints a table with headers and different fields.
2894

2895
  @type headers: dict
2896
  @param headers: dictionary mapping field names to headers for
2897
      the table
2898
  @type fields: list
2899
  @param fields: the field names corresponding to each row in
2900
      the data field
2901
  @param separator: the separator to be used; if this is None,
2902
      the default 'smart' algorithm is used which computes optimal
2903
      field width, otherwise just the separator is used between
2904
      each field
2905
  @type data: list
2906
  @param data: a list of lists, each sublist being one row to be output
2907
  @type numfields: list
2908
  @param numfields: a list with the fields that hold numeric
2909
      values and thus should be right-aligned
2910
  @type unitfields: list
2911
  @param unitfields: a list with the fields that hold numeric
2912
      values that should be formatted with the units field
2913
  @type units: string or None
2914
  @param units: the units we should use for formatting, or None for
2915
      automatic choice (human-readable for non-separator usage, otherwise
2916
      megabytes); this is a one-letter string
2917

2918
  """
2919
  if units is None:
2920
    if separator:
2921
      units = "m"
2922
    else:
2923
      units = "h"
2924

    
2925
  if numfields is None:
2926
    numfields = []
2927
  if unitfields is None:
2928
    unitfields = []
2929

    
2930
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2931
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2932

    
2933
  format_fields = []
2934
  for field in fields:
2935
    if headers and field not in headers:
2936
      # TODO: handle better unknown fields (either revert to old
2937
      # style of raising exception, or deal more intelligently with
2938
      # variable fields)
2939
      headers[field] = field
2940
    if separator is not None:
2941
      format_fields.append("%s")
2942
    elif numfields.Matches(field):
2943
      format_fields.append("%*s")
2944
    else:
2945
      format_fields.append("%-*s")
2946

    
2947
  if separator is None:
2948
    mlens = [0 for name in fields]
2949
    format_str = " ".join(format_fields)
2950
  else:
2951
    format_str = separator.replace("%", "%%").join(format_fields)
2952

    
2953
  for row in data:
2954
    if row is None:
2955
      continue
2956
    for idx, val in enumerate(row):
2957
      if unitfields.Matches(fields[idx]):
2958
        try:
2959
          val = int(val)
2960
        except (TypeError, ValueError):
2961
          pass
2962
        else:
2963
          val = row[idx] = utils.FormatUnit(val, units)
2964
      val = row[idx] = str(val)
2965
      if separator is None:
2966
        mlens[idx] = max(mlens[idx], len(val))
2967

    
2968
  result = []
2969
  if headers:
2970
    args = []
2971
    for idx, name in enumerate(fields):
2972
      hdr = headers[name]
2973
      if separator is None:
2974
        mlens[idx] = max(mlens[idx], len(hdr))
2975
        args.append(mlens[idx])
2976
      args.append(hdr)
2977
    result.append(format_str % tuple(args))
2978

    
2979
  if separator is None:
2980
    assert len(mlens) == len(fields)
2981

    
2982
    if fields and not numfields.Matches(fields[-1]):
2983
      mlens[-1] = 0
2984

    
2985
  for line in data:
2986
    args = []
2987
    if line is None:
2988
      line = ["-" for _ in fields]
2989
    for idx in range(len(fields)):
2990
      if separator is None:
2991
        args.append(mlens[idx])
2992
      args.append(line[idx])
2993
    result.append(format_str % tuple(args))
2994

    
2995
  return result
2996

    
2997

    
2998
def _FormatBool(value):
2999
  """Formats a boolean value as a string.
3000

3001
  """
3002
  if value:
3003
    return "Y"
3004
  return "N"
3005

    
3006

    
3007
#: Default formatting for query results; (callback, align right)
3008
_DEFAULT_FORMAT_QUERY = {
3009
  constants.QFT_TEXT: (str, False),
3010
  constants.QFT_BOOL: (_FormatBool, False),
3011
  constants.QFT_NUMBER: (str, True),
3012
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
3013
  constants.QFT_OTHER: (str, False),
3014
  constants.QFT_UNKNOWN: (str, False),
3015
  }
3016

    
3017

    
3018
def _GetColumnFormatter(fdef, override, unit):
3019
  """Returns formatting function for a field.
3020

3021
  @type fdef: L{objects.QueryFieldDefinition}
3022
  @type override: dict
3023
  @param override: Dictionary for overriding field formatting functions,
3024
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3025
  @type unit: string
3026
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
3027
  @rtype: tuple; (callable, bool)
3028
  @return: Returns the function to format a value (takes one parameter) and a
3029
    boolean for aligning the value on the right-hand side
3030

3031
  """
3032
  fmt = override.get(fdef.name, None)
3033
  if fmt is not None:
3034
    return fmt
3035

    
3036
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3037

    
3038
  if fdef.kind == constants.QFT_UNIT:
3039
    # Can't keep this information in the static dictionary
3040
    return (lambda value: utils.FormatUnit(value, unit), True)
3041

    
3042
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3043
  if fmt is not None:
3044
    return fmt
3045

    
3046
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3047

    
3048

    
3049
class _QueryColumnFormatter:
3050
  """Callable class for formatting fields of a query.
3051

3052
  """
3053
  def __init__(self, fn, status_fn, verbose):
3054
    """Initializes this class.
3055

3056
    @type fn: callable
3057
    @param fn: Formatting function
3058
    @type status_fn: callable
3059
    @param status_fn: Function to report fields' status
3060
    @type verbose: boolean
3061
    @param verbose: whether to use verbose field descriptions or not
3062

3063
    """
3064
    self._fn = fn
3065
    self._status_fn = status_fn
3066
    self._verbose = verbose
3067

    
3068
  def __call__(self, data):
3069
    """Returns a field's string representation.
3070

3071
    """
3072
    (status, value) = data
3073

    
3074
    # Report status
3075
    self._status_fn(status)
3076

    
3077
    if status == constants.RS_NORMAL:
3078
      return self._fn(value)
3079

    
3080
    assert value is None, \
3081
           "Found value %r for abnormal status %s" % (value, status)
3082

    
3083
    return FormatResultError(status, self._verbose)
3084

    
3085

    
3086
def FormatResultError(status, verbose):
3087
  """Formats result status other than L{constants.RS_NORMAL}.
3088

3089
  @param status: The result status
3090
  @type verbose: boolean
3091
  @param verbose: Whether to return the verbose text
3092
  @return: Text of result status
3093

3094
  """
3095
  assert status != constants.RS_NORMAL, \
3096
         "FormatResultError called with status equal to constants.RS_NORMAL"
3097
  try:
3098
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3099
  except KeyError:
3100
    raise NotImplementedError("Unknown status %s" % status)
3101
  else:
3102
    if verbose:
3103
      return verbose_text
3104
    return normal_text
3105

    
3106

    
3107
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3108
                      header=False, verbose=False):
3109
  """Formats data in L{objects.QueryResponse}.
3110

3111
  @type result: L{objects.QueryResponse}
3112
  @param result: result of query operation
3113
  @type unit: string
3114
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3115
    see L{utils.text.FormatUnit}
3116
  @type format_override: dict
3117
  @param format_override: Dictionary for overriding field formatting functions,
3118
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3119
  @type separator: string or None
3120
  @param separator: String used to separate fields
3121
  @type header: bool
3122
  @param header: Whether to output header row
3123
  @type verbose: boolean
3124
  @param verbose: whether to use verbose field descriptions or not
3125

3126
  """
3127
  if unit is None:
3128
    if separator:
3129
      unit = "m"
3130
    else:
3131
      unit = "h"
3132

    
3133
  if format_override is None:
3134
    format_override = {}
3135

    
3136
  stats = dict.fromkeys(constants.RS_ALL, 0)
3137

    
3138
  def _RecordStatus(status):
3139
    if status in stats:
3140
      stats[status] += 1
3141

    
3142
  columns = []
3143
  for fdef in result.fields:
3144
    assert fdef.title and fdef.name
3145
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3146
    columns.append(TableColumn(fdef.title,
3147
                               _QueryColumnFormatter(fn, _RecordStatus,
3148
                                                     verbose),
3149
                               align_right))
3150

    
3151
  table = FormatTable(result.data, columns, header, separator)
3152

    
3153
  # Collect statistics
3154
  assert len(stats) == len(constants.RS_ALL)
3155
  assert compat.all(count >= 0 for count in stats.values())
3156

    
3157
  # Determine overall status. If there was no data, unknown fields must be
3158
  # detected via the field definitions.
3159
  if (stats[constants.RS_UNKNOWN] or
3160
      (not result.data and _GetUnknownFields(result.fields))):
3161
    status = QR_UNKNOWN
3162
  elif compat.any(count > 0 for key, count in stats.items()
3163
                  if key != constants.RS_NORMAL):
3164
    status = QR_INCOMPLETE
3165
  else:
3166
    status = QR_NORMAL
3167

    
3168
  return (status, table)
3169

    
3170

    
3171
def _GetUnknownFields(fdefs):
3172
  """Returns list of unknown fields included in C{fdefs}.
3173

3174
  @type fdefs: list of L{objects.QueryFieldDefinition}
3175

3176
  """
3177
  return [fdef for fdef in fdefs
3178
          if fdef.kind == constants.QFT_UNKNOWN]
3179

    
3180

    
3181
def _WarnUnknownFields(fdefs):
3182
  """Prints a warning to stderr if a query included unknown fields.
3183

3184
  @type fdefs: list of L{objects.QueryFieldDefinition}
3185

3186
  """
3187
  unknown = _GetUnknownFields(fdefs)
3188
  if unknown:
3189
    ToStderr("Warning: Queried for unknown fields %s",
3190
             utils.CommaJoin(fdef.name for fdef in unknown))
3191
    return True
3192

    
3193
  return False
3194

    
3195

    
3196
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3197
                format_override=None, verbose=False, force_filter=False,
3198
                namefield=None, qfilter=None, isnumeric=False):
3199
  """Generic implementation for listing all items of a resource.
3200

3201
  @param resource: One of L{constants.QR_VIA_LUXI}
3202
  @type fields: list of strings
3203
  @param fields: List of fields to query for
3204
  @type names: list of strings
3205
  @param names: Names of items to query for
3206
  @type unit: string or None
3207
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3208
    None for automatic choice (human-readable for non-separator usage,
3209
    otherwise megabytes); this is a one-letter string
3210
  @type separator: string or None
3211
  @param separator: String used to separate fields
3212
  @type header: bool
3213
  @param header: Whether to show header row
3214
  @type force_filter: bool
3215
  @param force_filter: Whether to always treat names as filter
3216
  @type format_override: dict
3217
  @param format_override: Dictionary for overriding field formatting functions,
3218
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3219
  @type verbose: boolean
3220
  @param verbose: whether to use verbose field descriptions or not
3221
  @type namefield: string
3222
  @param namefield: Name of field to use for simple filters (see
3223
    L{qlang.MakeFilter} for details)
3224
  @type qfilter: list or None
3225
  @param qfilter: Query filter (in addition to names)
3226
  @param isnumeric: bool
3227
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3228
    any simple filters built by namefield should use integer values to
3229
    reflect that
3230

3231
  """
3232
  if not names:
3233
    names = None
3234

    
3235
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3236
                                isnumeric=isnumeric)
3237

    
3238
  if qfilter is None:
3239
    qfilter = namefilter
3240
  elif namefilter is not None:
3241
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3242

    
3243
  if cl is None:
3244
    cl = GetClient()
3245

    
3246
  response = cl.Query(resource, fields, qfilter)
3247

    
3248
  found_unknown = _WarnUnknownFields(response.fields)
3249

    
3250
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3251
                                     header=header,
3252
                                     format_override=format_override,
3253
                                     verbose=verbose)
3254

    
3255
  for line in data:
3256
    ToStdout(line)
3257

    
3258
  assert ((found_unknown and status == QR_UNKNOWN) or
3259
          (not found_unknown and status != QR_UNKNOWN))
3260

    
3261
  if status == QR_UNKNOWN:
3262
    return constants.EXIT_UNKNOWN_FIELD
3263

    
3264
  # TODO: Should the list command fail if not all data could be collected?
3265
  return constants.EXIT_SUCCESS
3266

    
3267

    
3268
def _FieldDescValues(fdef):
3269
  """Helper function for L{GenericListFields} to get query field description.
3270

3271
  @type fdef: L{objects.QueryFieldDefinition}
3272
  @rtype: list
3273

3274
  """
3275
  return [
3276
    fdef.name,
3277
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3278
    fdef.title,
3279
    fdef.doc,
3280
    ]
3281

    
3282

    
3283
def GenericListFields(resource, fields, separator, header, cl=None):
3284
  """Generic implementation for listing fields for a resource.
3285

3286
  @param resource: One of L{constants.QR_VIA_LUXI}
3287
  @type fields: list of strings
3288
  @param fields: List of fields to query for
3289
  @type separator: string or None
3290
  @param separator: String used to separate fields
3291
  @type header: bool
3292
  @param header: Whether to show header row
3293

3294
  """
3295
  if cl is None:
3296
    cl = GetClient()
3297

    
3298
  if not fields:
3299
    fields = None
3300

    
3301
  response = cl.QueryFields(resource, fields)
3302

    
3303
  found_unknown = _WarnUnknownFields(response.fields)
3304

    
3305
  columns = [
3306
    TableColumn("Name", str, False),
3307
    TableColumn("Type", str, False),
3308
    TableColumn("Title", str, False),
3309
    TableColumn("Description", str, False),
3310
    ]
3311

    
3312
  rows = map(_FieldDescValues, response.fields)
3313

    
3314
  for line in FormatTable(rows, columns, header, separator):
3315
    ToStdout(line)
3316

    
3317
  if found_unknown:
3318
    return constants.EXIT_UNKNOWN_FIELD
3319

    
3320
  return constants.EXIT_SUCCESS
3321

    
3322

    
3323
class TableColumn:
3324
  """Describes a column for L{FormatTable}.
3325

3326
  """
3327
  def __init__(self, title, fn, align_right):
3328
    """Initializes this class.
3329

3330
    @type title: string
3331
    @param title: Column title
3332
    @type fn: callable
3333
    @param fn: Formatting function
3334
    @type align_right: bool
3335
    @param align_right: Whether to align values on the right-hand side
3336

3337
    """
3338
    self.title = title
3339
    self.format = fn
3340
    self.align_right = align_right
3341

    
3342

    
3343
def _GetColFormatString(width, align_right):
3344
  """Returns the format string for a field.
3345

3346
  """
3347
  if align_right:
3348
    sign = ""
3349
  else:
3350
    sign = "-"
3351

    
3352
  return "%%%s%ss" % (sign, width)
3353

    
3354

    
3355
def FormatTable(rows, columns, header, separator):
3356
  """Formats data as a table.
3357

3358
  @type rows: list of lists
3359
  @param rows: Row data, one list per row
3360
  @type columns: list of L{TableColumn}
3361
  @param columns: Column descriptions
3362
  @type header: bool
3363
  @param header: Whether to show header row
3364
  @type separator: string or None
3365
  @param separator: String used to separate columns
3366

3367
  """
3368
  if header:
3369
    data = [[col.title for col in columns]]
3370
    colwidth = [len(col.title) for col in columns]
3371
  else:
3372
    data = []
3373
    colwidth = [0 for _ in columns]
3374

    
3375
  # Format row data
3376
  for row in rows:
3377
    assert len(row) == len(columns)
3378

    
3379
    formatted = [col.format(value) for value, col in zip(row, columns)]
3380

    
3381
    if separator is None:
3382
      # Update column widths
3383
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3384
        # Modifying a list's items while iterating is fine
3385
        colwidth[idx] = max(oldwidth, len(value))
3386

    
3387
    data.append(formatted)
3388

    
3389
  if separator is not None:
3390
    # Return early if a separator is used
3391
    return [separator.join(row) for row in data]
3392

    
3393
  if columns and not columns[-1].align_right:
3394
    # Avoid unnecessary spaces at end of line
3395
    colwidth[-1] = 0
3396

    
3397
  # Build format string
3398
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3399
                  for col, width in zip(columns, colwidth)])
3400

    
3401
  return [fmt % tuple(row) for row in data]
3402

    
3403

    
3404
def FormatTimestamp(ts):
3405
  """Formats a given timestamp.
3406

3407
  @type ts: timestamp
3408
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3409

3410
  @rtype: string
3411
  @return: a string with the formatted timestamp
3412

3413
  """
3414
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3415
    return "?"
3416

    
3417
  (sec, usecs) = ts
3418
  return utils.FormatTime(sec, usecs=usecs)
3419

    
3420

    
3421
def ParseTimespec(value):
3422
  """Parse a time specification.
3423

3424
  The following suffixed will be recognized:
3425

3426
    - s: seconds
3427
    - m: minutes
3428
    - h: hours
3429
    - d: day
3430
    - w: weeks
3431

3432
  Without any suffix, the value will be taken to be in seconds.
3433

3434
  """
3435
  value = str(value)
3436
  if not value:
3437
    raise errors.OpPrereqError("Empty time specification passed",
3438
                               errors.ECODE_INVAL)
3439
  suffix_map = {
3440
    "s": 1,
3441
    "m": 60,
3442
    "h": 3600,
3443
    "d": 86400,
3444
    "w": 604800,
3445
    }
3446
  if value[-1] not in suffix_map:
3447
    try:
3448
      value = int(value)
3449
    except (TypeError, ValueError):
3450
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3451
                                 errors.ECODE_INVAL)
3452
  else:
3453
    multiplier = suffix_map[value[-1]]
3454
    value = value[:-1]
3455
    if not value: # no data left after stripping the suffix
3456
      raise errors.OpPrereqError("Invalid time specification (only"
3457
                                 " suffix passed)", errors.ECODE_INVAL)
3458
    try:
3459
      value = int(value) * multiplier
3460
    except (TypeError, ValueError):
3461
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3462
                                 errors.ECODE_INVAL)
3463
  return value
3464

    
3465

    
3466
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3467
                   filter_master=False, nodegroup=None):
3468
  """Returns the names of online nodes.
3469

3470
  This function will also log a warning on stderr with the names of
3471
  the online nodes.
3472

3473
  @param nodes: if not empty, use only this subset of nodes (minus the
3474
      offline ones)
3475
  @param cl: if not None, luxi client to use
3476
  @type nowarn: boolean
3477
  @param nowarn: by default, this function will output a note with the
3478
      offline nodes that are skipped; if this parameter is True the
3479
      note is not displayed
3480
  @type secondary_ips: boolean
3481
  @param secondary_ips: if True, return the secondary IPs instead of the
3482
      names, useful for doing network traffic over the replication interface
3483
      (if any)
3484
  @type filter_master: boolean
3485
  @param filter_master: if True, do not return the master node in the list
3486
      (useful in coordination with secondary_ips where we cannot check our
3487
      node name against the list)
3488
  @type nodegroup: string
3489
  @param nodegroup: If set, only return nodes in this node group
3490

3491
  """
3492
  if cl is None:
3493
    cl = GetClient(query=True)
3494

    
3495
  qfilter = []
3496

    
3497
  if nodes:
3498
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3499

    
3500
  if nodegroup is not None:
3501
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3502
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3503

    
3504
  if filter_master:
3505
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3506

    
3507
  if qfilter:
3508
    if len(qfilter) > 1:
3509
      final_filter = [qlang.OP_AND] + qfilter
3510
    else:
3511
      assert len(qfilter) == 1
3512
      final_filter = qfilter[0]
3513
  else:
3514
    final_filter = None
3515

    
3516
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3517

    
3518
  def _IsOffline(row):
3519
    (_, (_, offline), _) = row
3520
    return offline
3521

    
3522
  def _GetName(row):
3523
    ((_, name), _, _) = row
3524
    return name
3525

    
3526
  def _GetSip(row):
3527
    (_, _, (_, sip)) = row
3528
    return sip
3529

    
3530
  (offline, online) = compat.partition(result.data, _IsOffline)
3531

    
3532
  if offline and not nowarn:
3533
    ToStderr("Note: skipping offline node(s): %s" %
3534
             utils.CommaJoin(map(_GetName, offline)))
3535

    
3536
  if secondary_ips:
3537
    fn = _GetSip
3538
  else:
3539
    fn = _GetName
3540

    
3541
  return map(fn, online)
3542

    
3543

    
3544
def GetNodesSshPorts(nodes, cl):
3545
  """Retrieves SSH ports of given nodes.
3546

3547
  @param nodes: the names of nodes
3548
  @type nodes: a list of strings
3549
  @param cl: a client to use for the query
3550
  @type cl: L{Client}
3551
  @return: the list of SSH ports corresponding to the nodes
3552
  @rtype: a list of tuples
3553
  """
3554
  return map(lambda t: t[0],
3555
             cl.QueryNodes(names=nodes,
3556
                           fields=["ndp/ssh_port"],
3557
                           use_locking=False))
3558

    
3559

    
3560
def _ToStream(stream, txt, *args):
3561
  """Write a message to a stream, bypassing the logging system
3562

3563
  @type stream: file object
3564
  @param stream: the file to which we should write
3565
  @type txt: str
3566
  @param txt: the message
3567

3568
  """
3569
  try:
3570
    if args:
3571
      args = tuple(args)
3572
      stream.write(txt % args)
3573
    else:
3574
      stream.write(txt)
3575
    stream.write("\n")
3576
    stream.flush()
3577
  except IOError, err:
3578
    if err.errno == errno.EPIPE:
3579
      # our terminal went away, we'll exit
3580
      sys.exit(constants.EXIT_FAILURE)
3581
    else:
3582
      raise
3583

    
3584

    
3585
def ToStdout(txt, *args):
3586
  """Write a message to stdout only, bypassing the logging system
3587

3588
  This is just a wrapper over _ToStream.
3589

3590
  @type txt: str
3591
  @param txt: the message
3592

3593
  """
3594
  _ToStream(sys.stdout, txt, *args)
3595

    
3596

    
3597
def ToStderr(txt, *args):
3598
  """Write a message to stderr only, bypassing the logging system
3599

3600
  This is just a wrapper over _ToStream.
3601

3602
  @type txt: str
3603
  @param txt: the message
3604

3605
  """
3606
  _ToStream(sys.stderr, txt, *args)
3607

    
3608

    
3609
class JobExecutor(object):
3610
  """Class which manages the submission and execution of multiple jobs.
3611

3612
  Note that instances of this class should not be reused between
3613
  GetResults() calls.
3614

3615
  """
3616
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3617
    self.queue = []
3618
    if cl is None:
3619
      cl = GetClient()
3620
    self.cl = cl
3621
    self.verbose = verbose
3622
    self.jobs = []
3623
    self.opts = opts
3624
    self.feedback_fn = feedback_fn
3625
    self._counter = itertools.count()
3626

    
3627
  @staticmethod
3628
  def _IfName(name, fmt):
3629
    """Helper function for formatting name.
3630

3631
    """
3632
    if name:
3633
      return fmt % name
3634

    
3635
    return ""
3636

    
3637
  def QueueJob(self, name, *ops):
3638
    """Record a job for later submit.
3639

3640
    @type name: string
3641
    @param name: a description of the job, will be used in WaitJobSet
3642

3643
    """
3644
    SetGenericOpcodeOpts(ops, self.opts)
3645
    self.queue.append((self._counter.next(), name, ops))
3646

    
3647
  def AddJobId(self, name, status, job_id):
3648
    """Adds a job ID to the internal queue.
3649

3650
    """
3651
    self.jobs.append((self._counter.next(), status, job_id, name))
3652

    
3653
  def SubmitPending(self, each=False):
3654
    """Submit all pending jobs.
3655

3656
    """
3657
    if each:
3658
      results = []
3659
      for (_, _, ops) in self.queue:
3660
        # SubmitJob will remove the success status, but raise an exception if
3661
        # the submission fails, so we'll notice that anyway.
3662
        results.append([True, self.cl.SubmitJob(ops)[0]])
3663
    else:
3664
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3665
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3666
      self.jobs.append((idx, status, data, name))
3667

    
3668
  def _ChooseJob(self):
3669
    """Choose a non-waiting/queued job to poll next.
3670

3671
    """
3672
    assert self.jobs, "_ChooseJob called with empty job list"
3673

    
3674
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3675
                               ["status"])
3676
    assert result
3677

    
3678
    for job_data, status in zip(self.jobs, result):
3679
      if (isinstance(status, list) and status and
3680
          status[0] in (constants.JOB_STATUS_QUEUED,
3681
                        constants.JOB_STATUS_WAITING,
3682
                        constants.JOB_STATUS_CANCELING)):
3683
        # job is still present and waiting
3684
        continue
3685
      # good candidate found (either running job or lost job)
3686
      self.jobs.remove(job_data)
3687
      return job_data
3688

    
3689
    # no job found
3690
    return self.jobs.pop(0)
3691

    
3692
  def GetResults(self):
3693
    """Wait for and return the results of all jobs.
3694

3695
    @rtype: list
3696
    @return: list of tuples (success, job results), in the same order
3697
        as the submitted jobs; if a job has failed, instead of the result
3698
        there will be the error message
3699

3700
    """
3701
    if not self.jobs:
3702
      self.SubmitPending()
3703
    results = []
3704
    if self.verbose:
3705
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3706
      if ok_jobs:
3707
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3708

    
3709
    # first, remove any non-submitted jobs
3710
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3711
    for idx, _, jid, name in failures:
3712
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3713
      results.append((idx, False, jid))
3714

    
3715
    while self.jobs:
3716
      (idx, _, jid, name) = self._ChooseJob()
3717
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3718
      try:
3719
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3720
        success = True
3721
      except errors.JobLost, err:
3722
        _, job_result = FormatError(err)
3723
        ToStderr("Job %s%s has been archived, cannot check its result",
3724
                 jid, self._IfName(name, " for %s"))
3725
        success = False
3726
      except (errors.GenericError, rpcerr.ProtocolError), err:
3727
        _, job_result = FormatError(err)
3728
        success = False
3729
        # the error message will always be shown, verbose or not
3730
        ToStderr("Job %s%s has failed: %s",
3731
                 jid, self._IfName(name, " for %s"), job_result)
3732

    
3733
      results.append((idx, success, job_result))
3734

    
3735
    # sort based on the index, then drop it
3736
    results.sort()
3737
    results = [i[1:] for i in results]
3738

    
3739
    return results
3740

    
3741
  def WaitOrShow(self, wait):
3742
    """Wait for job results or only print the job IDs.
3743

3744
    @type wait: boolean
3745
    @param wait: whether to wait or not
3746

3747
    """
3748
    if wait:
3749
      return self.GetResults()
3750
    else:
3751
      if not self.jobs:
3752
        self.SubmitPending()
3753
      for _, status, result, name in self.jobs:
3754
        if status:
3755
          ToStdout("%s: %s", result, name)
3756
        else:
3757
          ToStderr("Failure for %s: %s", name, result)
3758
      return [row[1:3] for row in self.jobs]
3759

    
3760

    
3761
def FormatParamsDictInfo(param_dict, actual):
3762
  """Formats a parameter dictionary.
3763

3764
  @type param_dict: dict
3765
  @param param_dict: the own parameters
3766
  @type actual: dict
3767
  @param actual: the current parameter set (including defaults)
3768
  @rtype: dict
3769
  @return: dictionary where the value of each parameter is either a fully
3770
      formatted string or a dictionary containing formatted strings
3771

3772
  """
3773
  ret = {}
3774
  for (key, data) in actual.items():
3775
    if isinstance(data, dict) and data:
3776
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3777
    else:
3778
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3779
  return ret
3780

    
3781

    
3782
def _FormatListInfoDefault(data, def_data):
3783
  if data is not None:
3784
    ret = utils.CommaJoin(data)
3785
  else:
3786
    ret = "default (%s)" % utils.CommaJoin(def_data)
3787
  return ret
3788

    
3789

    
3790
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3791
  """Formats an instance policy.
3792

3793
  @type custom_ipolicy: dict
3794
  @param custom_ipolicy: own policy
3795
  @type eff_ipolicy: dict
3796
  @param eff_ipolicy: effective policy (including defaults); ignored for
3797
      cluster
3798
  @type iscluster: bool
3799
  @param iscluster: the policy is at cluster level
3800
  @rtype: list of pairs
3801
  @return: formatted data, suitable for L{PrintGenericInfo}
3802

3803
  """
3804
  if iscluster:
3805
    eff_ipolicy = custom_ipolicy
3806

    
3807
  minmax_out = []
3808
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3809
  if custom_minmax:
3810
    for (k, minmax) in enumerate(custom_minmax):
3811
      minmax_out.append([
3812
        ("%s/%s" % (key, k),
3813
         FormatParamsDictInfo(minmax[key], minmax[key]))
3814
        for key in constants.ISPECS_MINMAX_KEYS
3815
        ])
3816
  else:
3817
    for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3818
      minmax_out.append([
3819
        ("%s/%s" % (key, k),
3820
         FormatParamsDictInfo({}, minmax[key]))
3821
        for key in constants.ISPECS_MINMAX_KEYS
3822
        ])
3823
  ret = [("bounds specs", minmax_out)]
3824

    
3825
  if iscluster:
3826
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3827
    ret.append(
3828
      (constants.ISPECS_STD,
3829
       FormatParamsDictInfo(stdspecs, stdspecs))
3830
      )
3831

    
3832
  ret.append(
3833
    ("allowed disk templates",
3834
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3835
                            eff_ipolicy[constants.IPOLICY_DTS]))
3836
    )
3837
  ret.extend([
3838
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3839
    for key in constants.IPOLICY_PARAMETERS
3840
    ])
3841
  return ret
3842

    
3843

    
3844
def _PrintSpecsParameters(buf, specs):
3845
  values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3846
  buf.write(",".join(values))
3847

    
3848

    
3849
def PrintIPolicyCommand(buf, ipolicy, isgroup):
3850
  """Print the command option used to generate the given instance policy.
3851

3852
  Currently only the parts dealing with specs are supported.
3853

3854
  @type buf: StringIO
3855
  @param buf: stream to write into
3856
  @type ipolicy: dict
3857
  @param ipolicy: instance policy
3858
  @type isgroup: bool
3859
  @param isgroup: whether the policy is at group level
3860

3861
  """
3862
  if not isgroup:
3863
    stdspecs = ipolicy.get("std")
3864
    if stdspecs:
3865
      buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3866
      _PrintSpecsParameters(buf, stdspecs)
3867
  minmaxes = ipolicy.get("minmax", [])
3868
  first = True
3869
  for minmax in minmaxes:
3870
    minspecs = minmax.get("min")
3871
    maxspecs = minmax.get("max")
3872
    if minspecs and maxspecs:
3873
      if first:
3874
        buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3875
        first = False
3876
      else:
3877
        buf.write("//")
3878
      buf.write("min:")
3879
      _PrintSpecsParameters(buf, minspecs)
3880
      buf.write("/max:")
3881
      _PrintSpecsParameters(buf, maxspecs)
3882

    
3883

    
3884
def ConfirmOperation(names, list_type, text, extra=""):
3885
  """Ask the user to confirm an operation on a list of list_type.
3886

3887
  This function is used to request confirmation for doing an operation
3888
  on a given list of list_type.
3889

3890
  @type names: list
3891
  @param names: the list of names that we display when
3892
      we ask for confirmation
3893
  @type list_type: str
3894
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3895
  @type text: str
3896
  @param text: the operation that the user should confirm
3897
  @rtype: boolean
3898
  @return: True or False depending on user's confirmation.
3899

3900
  """
3901
  count = len(names)
3902
  msg = ("The %s will operate on %d %s.\n%s"
3903
         "Do you want to continue?" % (text, count, list_type, extra))
3904
  affected = (("\nAffected %s:\n" % list_type) +
3905
              "\n".join(["  %s" % name for name in names]))
3906

    
3907
  choices = [("y", True, "Yes, execute the %s" % text),
3908
             ("n", False, "No, abort the %s" % text)]
3909

    
3910
  if count > 20:
3911
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3912
    question = msg
3913
  else:
3914
    question = msg + affected
3915

    
3916
  choice = AskUser(question, choices)
3917
  if choice == "v":
3918
    choices.pop(1)
3919
    choice = AskUser(msg + affected, choices)
3920
  return choice
3921

    
3922

    
3923
def _MaybeParseUnit(elements):
3924
  """Parses and returns an array of potential values with units.
3925

3926
  """
3927
  parsed = {}
3928
  for k, v in elements.items():
3929
    if v == constants.VALUE_DEFAULT:
3930
      parsed[k] = v
3931
    else:
3932
      parsed[k] = utils.ParseUnit(v)
3933
  return parsed
3934

    
3935

    
3936
def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3937
                             ispecs_disk_count, ispecs_disk_size,
3938
                             ispecs_nic_count, group_ipolicy, fill_all):
3939
  try:
3940
    if ispecs_mem_size:
3941
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3942
    if ispecs_disk_size:
3943
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3944
  except (TypeError, ValueError, errors.UnitParseError), err:
3945
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3946
                               " in policy: %s" %
3947
                               (ispecs_disk_size, ispecs_mem_size, err),
3948
                               errors.ECODE_INVAL)
3949

    
3950
  # prepare ipolicy dict
3951
  ispecs_transposed = {
3952
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3953
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3954
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3955
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3956
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3957
    }
3958

    
3959
  # first, check that the values given are correct
3960
  if group_ipolicy:
3961
    forced_type = TISPECS_GROUP_TYPES
3962
  else:
3963
    forced_type = TISPECS_CLUSTER_TYPES
3964
  for specs in ispecs_transposed.values():
3965
    assert type(specs) is dict
3966
    utils.ForceDictType(specs, forced_type)
3967

    
3968
  # then transpose
3969
  ispecs = {
3970
    constants.ISPECS_MIN: {},
3971
    constants.ISPECS_MAX: {},
3972
    constants.ISPECS_STD: {},
3973
    }
3974
  for (name, specs) in ispecs_transposed.iteritems():
3975
    assert name in constants.ISPECS_PARAMETERS
3976
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3977
      assert key in ispecs
3978
      ispecs[key][name] = val
3979
  minmax_out = {}
3980
  for key in constants.ISPECS_MINMAX_KEYS:
3981
    if fill_all:
3982
      minmax_out[key] = \
3983
        objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3984
    else:
3985
      minmax_out[key] = ispecs[key]
3986
  ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3987
  if fill_all:
3988
    ipolicy[constants.ISPECS_STD] = \
3989
        objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3990
                         ispecs[constants.ISPECS_STD])
3991
  else:
3992
    ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3993

    
3994

    
3995
def _ParseSpecUnit(spec, keyname):
3996
  ret = spec.copy()
3997
  for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3998
    if k in ret:
3999
      try:
4000
        ret[k] = utils.ParseUnit(ret[k])
4001
      except (TypeError, ValueError, errors.UnitParseError), err:
4002
        raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
4003
                                    " specs: %s" % (k, ret[k], keyname, err)),
4004
                                   errors.ECODE_INVAL)
4005
  return ret
4006

    
4007

    
4008
def _ParseISpec(spec, keyname, required):
4009
  ret = _ParseSpecUnit(spec, keyname)
4010
  utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
4011
  missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
4012
  if required and missing:
4013
    raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
4014
                               (keyname, utils.CommaJoin(missing)),
4015
                               errors.ECODE_INVAL)
4016
  return ret
4017

    
4018

    
4019
def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
4020
  ret = None
4021
  if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
4022
      len(minmax_ispecs[0]) == 1):
4023
    for (key, spec) in minmax_ispecs[0].items():
4024
      # This loop is executed exactly once
4025
      if key in allowed_values and not spec:
4026
        ret = key
4027
  return ret
4028

    
4029

    
4030
def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4031
                            group_ipolicy, allowed_values):
4032
  found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
4033
  if found_allowed is not None:
4034
    ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
4035
  elif minmax_ispecs is not None:
4036
    minmax_out = []
4037
    for mmpair in minmax_ispecs:
4038
      mmpair_out = {}
4039
      for (key, spec) in mmpair.items():
4040
        if key not in constants.ISPECS_MINMAX_KEYS:
4041
          msg = "Invalid key in bounds instance specifications: %s" % key
4042
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
4043
        mmpair_out[key] = _ParseISpec(spec, key, True)
4044
      minmax_out.append(mmpair_out)
4045
    ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
4046
  if std_ispecs is not None:
4047
    assert not group_ipolicy # This is not an option for gnt-group
4048
    ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
4049

    
4050

    
4051
def CreateIPolicyFromOpts(ispecs_mem_size=None,
4052
                          ispecs_cpu_count=None,
4053
                          ispecs_disk_count=None,
4054
                          ispecs_disk_size=None,
4055
                          ispecs_nic_count=None,
4056
                          minmax_ispecs=None,
4057
                          std_ispecs=None,
4058
                          ipolicy_disk_templates=None,
4059
                          ipolicy_vcpu_ratio=None,
4060
                          ipolicy_spindle_ratio=None,
4061
                          group_ipolicy=False,
4062
                          allowed_values=None,
4063
                          fill_all=False):
4064
  """Creation of instance policy based on command line options.
4065

4066
  @param fill_all: whether for cluster policies we should ensure that
4067
    all values are filled
4068

4069
  """
4070
  assert not (fill_all and allowed_values)
4071

    
4072
  split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4073
                 ispecs_disk_size or ispecs_nic_count)
4074
  if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4075
    raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4076
                               " together with any --ipolicy-xxx-specs option",
4077
                               errors.ECODE_INVAL)
4078

    
4079
  ipolicy_out = objects.MakeEmptyIPolicy()
4080
  if split_specs:
4081
    assert fill_all
4082
    _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4083
                             ispecs_disk_count, ispecs_disk_size,
4084
                             ispecs_nic_count, group_ipolicy, fill_all)
4085
  elif (minmax_ispecs is not None or std_ispecs is not None):
4086
    _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4087
                            group_ipolicy, allowed_values)
4088

    
4089
  if ipolicy_disk_templates is not None:
4090
    if allowed_values and ipolicy_disk_templates in allowed_values:
4091
      ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4092
    else:
4093
      ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4094
  if ipolicy_vcpu_ratio is not None:
4095
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4096
  if ipolicy_spindle_ratio is not None:
4097
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4098

    
4099
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4100

    
4101
  if not group_ipolicy and fill_all:
4102
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4103

    
4104
  return ipolicy_out
4105

    
4106

    
4107
def _SerializeGenericInfo(buf, data, level, afterkey=False):
4108
  """Formatting core of L{PrintGenericInfo}.
4109

4110
  @param buf: (string) stream to accumulate the result into
4111
  @param data: data to format
4112
  @type level: int
4113
  @param level: depth in the data hierarchy, used for indenting
4114
  @type afterkey: bool
4115
  @param afterkey: True when we are in the middle of a line after a key (used
4116
      to properly add newlines or indentation)
4117

4118
  """
4119
  baseind = "  "
4120
  if isinstance(data, dict):
4121
    if not data:
4122
      buf.write("\n")
4123
    else:
4124
      if afterkey:
4125
        buf.write("\n")
4126
        doindent = True
4127
      else:
4128
        doindent = False
4129
      for key in sorted(data):
4130
        if doindent:
4131
          buf.write(baseind * level)
4132
        else:
4133
          doindent = True
4134
        buf.write(key)
4135
        buf.write(": ")
4136
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4137
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4138
    # list of tuples (an ordered dictionary)
4139
    if afterkey:
4140
      buf.write("\n")
4141
      doindent = True
4142
    else:
4143
      doindent = False
4144
    for (key, val) in data:
4145
      if doindent:
4146
        buf.write(baseind * level)
4147
      else:
4148
        doindent = True
4149
      buf.write(key)
4150
      buf.write(": ")
4151
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4152
  elif isinstance(data, list):
4153
    if not data:
4154
      buf.write("\n")
4155
    else:
4156
      if afterkey:
4157
        buf.write("\n")
4158
        doindent = True
4159
      else:
4160
        doindent = False
4161
      for item in data:
4162
        if doindent:
4163
          buf.write(baseind * level)
4164
        else:
4165
          doindent = True
4166
        buf.write("-")
4167
        buf.write(baseind[1:])
4168
        _SerializeGenericInfo(buf, item, level + 1)
4169
  else:
4170
    # This branch should be only taken for strings, but it's practically
4171
    # impossible to guarantee that no other types are produced somewhere
4172
    buf.write(str(data))
4173
    buf.write("\n")
4174

    
4175

    
4176
def PrintGenericInfo(data):
4177
  """Print information formatted according to the hierarchy.
4178

4179
  The output is a valid YAML string.
4180

4181
  @param data: the data to print. It's a hierarchical structure whose elements
4182
      can be:
4183
        - dictionaries, where keys are strings and values are of any of the
4184
          types listed here
4185
        - lists of pairs (key, value), where key is a string and value is of
4186
          any of the types listed here; it's a way to encode ordered
4187
          dictionaries
4188
        - lists of any of the types listed here
4189
        - strings
4190

4191
  """
4192
  buf = StringIO()
4193
  _SerializeGenericInfo(buf, data, 0)
4194
  ToStdout(buf.getvalue().rstrip("\n"))