Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 5349519d

History | View | Annotate | Download (141 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
import ganeti.rpc.errors as rpcerr
40
import ganeti.rpc.node as rpc
41
from ganeti import ssh
42
from ganeti import compat
43
from ganeti import netutils
44
from ganeti import qlang
45
from ganeti import objects
46
from ganeti import pathutils
47
from ganeti import serializer
48

    
49
from ganeti.runtime import (GetClient)
50

    
51
from optparse import (OptionParser, TitledHelpFormatter,
52
                      Option, OptionValueError)
53

    
54

    
55
__all__ = [
56
  # Command line options
57
  "ABSOLUTE_OPT",
58
  "ADD_UIDS_OPT",
59
  "ADD_RESERVED_IPS_OPT",
60
  "ALLOCATABLE_OPT",
61
  "ALLOC_POLICY_OPT",
62
  "ALL_OPT",
63
  "ALLOW_FAILOVER_OPT",
64
  "AUTO_PROMOTE_OPT",
65
  "AUTO_REPLACE_OPT",
66
  "BACKEND_OPT",
67
  "BLK_OS_OPT",
68
  "CAPAB_MASTER_OPT",
69
  "CAPAB_VM_OPT",
70
  "CLEANUP_OPT",
71
  "CLUSTER_DOMAIN_SECRET_OPT",
72
  "CONFIRM_OPT",
73
  "CP_SIZE_OPT",
74
  "DEBUG_OPT",
75
  "DEBUG_SIMERR_OPT",
76
  "DISKIDX_OPT",
77
  "DISK_OPT",
78
  "DISK_PARAMS_OPT",
79
  "DISK_TEMPLATE_OPT",
80
  "DRAINED_OPT",
81
  "DRY_RUN_OPT",
82
  "DRBD_HELPER_OPT",
83
  "DST_NODE_OPT",
84
  "EARLY_RELEASE_OPT",
85
  "ENABLED_HV_OPT",
86
  "ENABLED_DISK_TEMPLATES_OPT",
87
  "ERROR_CODES_OPT",
88
  "FAILURE_ONLY_OPT",
89
  "FIELDS_OPT",
90
  "FILESTORE_DIR_OPT",
91
  "FILESTORE_DRIVER_OPT",
92
  "FORCE_FILTER_OPT",
93
  "FORCE_OPT",
94
  "FORCE_VARIANT_OPT",
95
  "GATEWAY_OPT",
96
  "GATEWAY6_OPT",
97
  "GLOBAL_FILEDIR_OPT",
98
  "HID_OS_OPT",
99
  "GLOBAL_GLUSTER_FILEDIR_OPT",
100
  "GLOBAL_SHARED_FILEDIR_OPT",
101
  "HOTPLUG_OPT",
102
  "HOTPLUG_IF_POSSIBLE_OPT",
103
  "HVLIST_OPT",
104
  "HVOPTS_OPT",
105
  "HYPERVISOR_OPT",
106
  "IALLOCATOR_OPT",
107
  "DEFAULT_IALLOCATOR_OPT",
108
  "DEFAULT_IALLOCATOR_PARAMS_OPT",
109
  "IDENTIFY_DEFAULTS_OPT",
110
  "IGNORE_CONSIST_OPT",
111
  "IGNORE_ERRORS_OPT",
112
  "IGNORE_FAILURES_OPT",
113
  "IGNORE_OFFLINE_OPT",
114
  "IGNORE_REMOVE_FAILURES_OPT",
115
  "IGNORE_SECONDARIES_OPT",
116
  "IGNORE_SIZE_OPT",
117
  "INCLUDEDEFAULTS_OPT",
118
  "INTERVAL_OPT",
119
  "INSTANCE_COMMUNICATION_OPT",
120
  "MAC_PREFIX_OPT",
121
  "MAINTAIN_NODE_HEALTH_OPT",
122
  "MASTER_NETDEV_OPT",
123
  "MASTER_NETMASK_OPT",
124
  "MC_OPT",
125
  "MIGRATION_MODE_OPT",
126
  "MODIFY_ETCHOSTS_OPT",
127
  "NET_OPT",
128
  "NETWORK_OPT",
129
  "NETWORK6_OPT",
130
  "NEW_CLUSTER_CERT_OPT",
131
  "NEW_NODE_CERT_OPT",
132
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
133
  "NEW_CONFD_HMAC_KEY_OPT",
134
  "NEW_RAPI_CERT_OPT",
135
  "NEW_PRIMARY_OPT",
136
  "NEW_SECONDARY_OPT",
137
  "NEW_SPICE_CERT_OPT",
138
  "NIC_PARAMS_OPT",
139
  "NOCONFLICTSCHECK_OPT",
140
  "NODE_FORCE_JOIN_OPT",
141
  "NODE_LIST_OPT",
142
  "NODE_PLACEMENT_OPT",
143
  "NODEGROUP_OPT",
144
  "NODE_PARAMS_OPT",
145
  "NODE_POWERED_OPT",
146
  "NOHDR_OPT",
147
  "NOIPCHECK_OPT",
148
  "NO_INSTALL_OPT",
149
  "NONAMECHECK_OPT",
150
  "NOMODIFY_ETCHOSTS_OPT",
151
  "NOMODIFY_SSH_SETUP_OPT",
152
  "NONICS_OPT",
153
  "NONLIVE_OPT",
154
  "NONPLUS1_OPT",
155
  "NORUNTIME_CHGS_OPT",
156
  "NOSHUTDOWN_OPT",
157
  "NOSTART_OPT",
158
  "NOSSH_KEYCHECK_OPT",
159
  "NOVOTING_OPT",
160
  "NO_REMEMBER_OPT",
161
  "NWSYNC_OPT",
162
  "OFFLINE_INST_OPT",
163
  "ONLINE_INST_OPT",
164
  "ON_PRIMARY_OPT",
165
  "ON_SECONDARY_OPT",
166
  "OFFLINE_OPT",
167
  "OSPARAMS_OPT",
168
  "OSPARAMS_PRIVATE_OPT",
169
  "OSPARAMS_SECRET_OPT",
170
  "OS_OPT",
171
  "OS_SIZE_OPT",
172
  "OOB_TIMEOUT_OPT",
173
  "POWER_DELAY_OPT",
174
  "PREALLOC_WIPE_DISKS_OPT",
175
  "PRIMARY_IP_VERSION_OPT",
176
  "PRIMARY_ONLY_OPT",
177
  "PRINT_JOBID_OPT",
178
  "PRIORITY_OPT",
179
  "RAPI_CERT_OPT",
180
  "READD_OPT",
181
  "REASON_OPT",
182
  "REBOOT_TYPE_OPT",
183
  "REMOVE_INSTANCE_OPT",
184
  "REMOVE_RESERVED_IPS_OPT",
185
  "REMOVE_UIDS_OPT",
186
  "RESERVED_LVS_OPT",
187
  "RQL_OPT",
188
  "INSTANCE_COMMUNICATION_NETWORK_OPT",
189
  "RUNTIME_MEM_OPT",
190
  "ROMAN_OPT",
191
  "SECONDARY_IP_OPT",
192
  "SECONDARY_ONLY_OPT",
193
  "SELECT_OS_OPT",
194
  "SEP_OPT",
195
  "SHOWCMD_OPT",
196
  "SHOW_MACHINE_OPT",
197
  "COMPRESS_OPT",
198
  "SHUTDOWN_TIMEOUT_OPT",
199
  "SINGLE_NODE_OPT",
200
  "SPECS_CPU_COUNT_OPT",
201
  "SPECS_DISK_COUNT_OPT",
202
  "SPECS_DISK_SIZE_OPT",
203
  "SPECS_MEM_SIZE_OPT",
204
  "SPECS_NIC_COUNT_OPT",
205
  "SPLIT_ISPECS_OPTS",
206
  "IPOLICY_STD_SPECS_OPT",
207
  "IPOLICY_DISK_TEMPLATES",
208
  "IPOLICY_VCPU_RATIO",
209
  "SPICE_CACERT_OPT",
210
  "SPICE_CERT_OPT",
211
  "SRC_DIR_OPT",
212
  "SRC_NODE_OPT",
213
  "SUBMIT_OPT",
214
  "SUBMIT_OPTS",
215
  "STARTUP_PAUSED_OPT",
216
  "STATIC_OPT",
217
  "SYNC_OPT",
218
  "TAG_ADD_OPT",
219
  "TAG_SRC_OPT",
220
  "TIMEOUT_OPT",
221
  "TO_GROUP_OPT",
222
  "UIDPOOL_OPT",
223
  "USEUNITS_OPT",
224
  "USE_EXTERNAL_MIP_SCRIPT",
225
  "USE_REPL_NET_OPT",
226
  "VERBOSE_OPT",
227
  "VG_NAME_OPT",
228
  "WFSYNC_OPT",
229
  "YES_DOIT_OPT",
230
  "DISK_STATE_OPT",
231
  "HV_STATE_OPT",
232
  "IGNORE_IPOLICY_OPT",
233
  "INSTANCE_POLICY_OPTS",
234
  # Generic functions for CLI programs
235
  "ConfirmOperation",
236
  "CreateIPolicyFromOpts",
237
  "GenericMain",
238
  "GenericInstanceCreate",
239
  "GenericList",
240
  "GenericListFields",
241
  "GetClient",
242
  "GetOnlineNodes",
243
  "GetNodesSshPorts",
244
  "JobExecutor",
245
  "JobSubmittedException",
246
  "ParseTimespec",
247
  "RunWhileClusterStopped",
248
  "SubmitOpCode",
249
  "SubmitOpCodeToDrainedQueue",
250
  "SubmitOrSend",
251
  "UsesRPC",
252
  # Formatting functions
253
  "ToStderr", "ToStdout",
254
  "FormatError",
255
  "FormatQueryResult",
256
  "FormatParamsDictInfo",
257
  "FormatPolicyInfo",
258
  "PrintIPolicyCommand",
259
  "PrintGenericInfo",
260
  "GenerateTable",
261
  "AskUser",
262
  "FormatTimestamp",
263
  "FormatLogMessage",
264
  # Tags functions
265
  "ListTags",
266
  "AddTags",
267
  "RemoveTags",
268
  # command line options support infrastructure
269
  "ARGS_MANY_INSTANCES",
270
  "ARGS_MANY_NODES",
271
  "ARGS_MANY_GROUPS",
272
  "ARGS_MANY_NETWORKS",
273
  "ARGS_NONE",
274
  "ARGS_ONE_INSTANCE",
275
  "ARGS_ONE_NODE",
276
  "ARGS_ONE_GROUP",
277
  "ARGS_ONE_OS",
278
  "ARGS_ONE_NETWORK",
279
  "ArgChoice",
280
  "ArgCommand",
281
  "ArgFile",
282
  "ArgGroup",
283
  "ArgHost",
284
  "ArgInstance",
285
  "ArgJobId",
286
  "ArgNetwork",
287
  "ArgNode",
288
  "ArgOs",
289
  "ArgExtStorage",
290
  "ArgSuggest",
291
  "ArgUnknown",
292
  "OPT_COMPL_INST_ADD_NODES",
293
  "OPT_COMPL_MANY_NODES",
294
  "OPT_COMPL_ONE_IALLOCATOR",
295
  "OPT_COMPL_ONE_INSTANCE",
296
  "OPT_COMPL_ONE_NODE",
297
  "OPT_COMPL_ONE_NODEGROUP",
298
  "OPT_COMPL_ONE_NETWORK",
299
  "OPT_COMPL_ONE_OS",
300
  "OPT_COMPL_ONE_EXTSTORAGE",
301
  "cli_option",
302
  "FixHvParams",
303
  "SplitNodeOption",
304
  "CalculateOSNames",
305
  "ParseFields",
306
  "COMMON_CREATE_OPTS",
307
  ]
308

    
309
NO_PREFIX = "no_"
310
UN_PREFIX = "-"
311

    
312
#: Priorities (sorted)
313
_PRIORITY_NAMES = [
314
  ("low", constants.OP_PRIO_LOW),
315
  ("normal", constants.OP_PRIO_NORMAL),
316
  ("high", constants.OP_PRIO_HIGH),
317
  ]
318

    
319
#: Priority dictionary for easier lookup
320
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
321
# we migrate to Python 2.6
322
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
323

    
324
# Query result status for clients
325
(QR_NORMAL,
326
 QR_UNKNOWN,
327
 QR_INCOMPLETE) = range(3)
328

    
329
#: Maximum batch size for ChooseJob
330
_CHOOSE_BATCH = 25
331

    
332

    
333
# constants used to create InstancePolicy dictionary
334
TISPECS_GROUP_TYPES = {
335
  constants.ISPECS_MIN: constants.VTYPE_INT,
336
  constants.ISPECS_MAX: constants.VTYPE_INT,
337
  }
338

    
339
TISPECS_CLUSTER_TYPES = {
340
  constants.ISPECS_MIN: constants.VTYPE_INT,
341
  constants.ISPECS_MAX: constants.VTYPE_INT,
342
  constants.ISPECS_STD: constants.VTYPE_INT,
343
  }
344

    
345
#: User-friendly names for query2 field types
346
_QFT_NAMES = {
347
  constants.QFT_UNKNOWN: "Unknown",
348
  constants.QFT_TEXT: "Text",
349
  constants.QFT_BOOL: "Boolean",
350
  constants.QFT_NUMBER: "Number",
351
  constants.QFT_UNIT: "Storage size",
352
  constants.QFT_TIMESTAMP: "Timestamp",
353
  constants.QFT_OTHER: "Custom",
354
  }
355

    
356

    
357
class _Argument:
358
  def __init__(self, min=0, max=None): # pylint: disable=W0622
359
    self.min = min
360
    self.max = max
361

    
362
  def __repr__(self):
363
    return ("<%s min=%s max=%s>" %
364
            (self.__class__.__name__, self.min, self.max))
365

    
366

    
367
class ArgSuggest(_Argument):
368
  """Suggesting argument.
369

370
  Value can be any of the ones passed to the constructor.
371

372
  """
373
  # pylint: disable=W0622
374
  def __init__(self, min=0, max=None, choices=None):
375
    _Argument.__init__(self, min=min, max=max)
376
    self.choices = choices
377

    
378
  def __repr__(self):
379
    return ("<%s min=%s max=%s choices=%r>" %
380
            (self.__class__.__name__, self.min, self.max, self.choices))
381

    
382

    
383
class ArgChoice(ArgSuggest):
384
  """Choice argument.
385

386
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
387
  but value must be one of the choices.
388

389
  """
390

    
391

    
392
class ArgUnknown(_Argument):
393
  """Unknown argument to program (e.g. determined at runtime).
394

395
  """
396

    
397

    
398
class ArgInstance(_Argument):
399
  """Instances argument.
400

401
  """
402

    
403

    
404
class ArgNode(_Argument):
405
  """Node argument.
406

407
  """
408

    
409

    
410
class ArgNetwork(_Argument):
411
  """Network argument.
412

413
  """
414

    
415

    
416
class ArgGroup(_Argument):
417
  """Node group argument.
418

419
  """
420

    
421

    
422
class ArgJobId(_Argument):
423
  """Job ID argument.
424

425
  """
426

    
427

    
428
class ArgFile(_Argument):
429
  """File path argument.
430

431
  """
432

    
433

    
434
class ArgCommand(_Argument):
435
  """Command argument.
436

437
  """
438

    
439

    
440
class ArgHost(_Argument):
441
  """Host argument.
442

443
  """
444

    
445

    
446
class ArgOs(_Argument):
447
  """OS argument.
448

449
  """
450

    
451

    
452
class ArgExtStorage(_Argument):
453
  """ExtStorage argument.
454

455
  """
456

    
457

    
458
ARGS_NONE = []
459
ARGS_MANY_INSTANCES = [ArgInstance()]
460
ARGS_MANY_NETWORKS = [ArgNetwork()]
461
ARGS_MANY_NODES = [ArgNode()]
462
ARGS_MANY_GROUPS = [ArgGroup()]
463
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
464
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
465
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
466
# TODO
467
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
468
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
469

    
470

    
471
def _ExtractTagsObject(opts, args):
472
  """Extract the tag type object.
473

474
  Note that this function will modify its args parameter.
475

476
  """
477
  if not hasattr(opts, "tag_type"):
478
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
479
  kind = opts.tag_type
480
  if kind == constants.TAG_CLUSTER:
481
    retval = kind, ""
482
  elif kind in (constants.TAG_NODEGROUP,
483
                constants.TAG_NODE,
484
                constants.TAG_NETWORK,
485
                constants.TAG_INSTANCE):
486
    if not args:
487
      raise errors.OpPrereqError("no arguments passed to the command",
488
                                 errors.ECODE_INVAL)
489
    name = args.pop(0)
490
    retval = kind, name
491
  else:
492
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
493
  return retval
494

    
495

    
496
def _ExtendTags(opts, args):
497
  """Extend the args if a source file has been given.
498

499
  This function will extend the tags with the contents of the file
500
  passed in the 'tags_source' attribute of the opts parameter. A file
501
  named '-' will be replaced by stdin.
502

503
  """
504
  fname = opts.tags_source
505
  if fname is None:
506
    return
507
  if fname == "-":
508
    new_fh = sys.stdin
509
  else:
510
    new_fh = open(fname, "r")
511
  new_data = []
512
  try:
513
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
514
    # because of python bug 1633941
515
    while True:
516
      line = new_fh.readline()
517
      if not line:
518
        break
519
      new_data.append(line.strip())
520
  finally:
521
    new_fh.close()
522
  args.extend(new_data)
523

    
524

    
525
def ListTags(opts, args):
526
  """List the tags on a given object.
527

528
  This is a generic implementation that knows how to deal with all
529
  three cases of tag objects (cluster, node, instance). The opts
530
  argument is expected to contain a tag_type field denoting what
531
  object type we work on.
532

533
  """
534
  kind, name = _ExtractTagsObject(opts, args)
535
  cl = GetClient()
536
  result = cl.QueryTags(kind, name)
537
  result = list(result)
538
  result.sort()
539
  for tag in result:
540
    ToStdout(tag)
541

    
542

    
543
def AddTags(opts, args):
544
  """Add tags on a given object.
545

546
  This is a generic implementation that knows how to deal with all
547
  three cases of tag objects (cluster, node, instance). The opts
548
  argument is expected to contain a tag_type field denoting what
549
  object type we work on.
550

551
  """
552
  kind, name = _ExtractTagsObject(opts, args)
553
  _ExtendTags(opts, args)
554
  if not args:
555
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
556
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
557
  SubmitOrSend(op, opts)
558

    
559

    
560
def RemoveTags(opts, args):
561
  """Remove tags from a given object.
562

563
  This is a generic implementation that knows how to deal with all
564
  three cases of tag objects (cluster, node, instance). The opts
565
  argument is expected to contain a tag_type field denoting what
566
  object type we work on.
567

568
  """
569
  kind, name = _ExtractTagsObject(opts, args)
570
  _ExtendTags(opts, args)
571
  if not args:
572
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
573
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
574
  SubmitOrSend(op, opts)
575

    
576

    
577
def check_unit(option, opt, value): # pylint: disable=W0613
578
  """OptParsers custom converter for units.
579

580
  """
581
  try:
582
    return utils.ParseUnit(value)
583
  except errors.UnitParseError, err:
584
    raise OptionValueError("option %s: %s" % (opt, err))
585

    
586

    
587
def _SplitKeyVal(opt, data, parse_prefixes):
588
  """Convert a KeyVal string into a dict.
589

590
  This function will convert a key=val[,...] string into a dict. Empty
591
  values will be converted specially: keys which have the prefix 'no_'
592
  will have the value=False and the prefix stripped, keys with the prefix
593
  "-" will have value=None and the prefix stripped, and the others will
594
  have value=True.
595

596
  @type opt: string
597
  @param opt: a string holding the option name for which we process the
598
      data, used in building error messages
599
  @type data: string
600
  @param data: a string of the format key=val,key=val,...
601
  @type parse_prefixes: bool
602
  @param parse_prefixes: whether to handle prefixes specially
603
  @rtype: dict
604
  @return: {key=val, key=val}
605
  @raises errors.ParameterError: if there are duplicate keys
606

607
  """
608
  kv_dict = {}
609
  if data:
610
    for elem in utils.UnescapeAndSplit(data, sep=","):
611
      if "=" in elem:
612
        key, val = elem.split("=", 1)
613
      elif parse_prefixes:
614
        if elem.startswith(NO_PREFIX):
615
          key, val = elem[len(NO_PREFIX):], False
616
        elif elem.startswith(UN_PREFIX):
617
          key, val = elem[len(UN_PREFIX):], None
618
        else:
619
          key, val = elem, True
620
      else:
621
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
622
                                    (elem, opt))
623
      if key in kv_dict:
624
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
625
                                    (key, opt))
626
      kv_dict[key] = val
627
  return kv_dict
628

    
629

    
630
def _SplitIdentKeyVal(opt, value, parse_prefixes):
631
  """Helper function to parse "ident:key=val,key=val" options.
632

633
  @type opt: string
634
  @param opt: option name, used in error messages
635
  @type value: string
636
  @param value: expected to be in the format "ident:key=val,key=val,..."
637
  @type parse_prefixes: bool
638
  @param parse_prefixes: whether to handle prefixes specially (see
639
      L{_SplitKeyVal})
640
  @rtype: tuple
641
  @return: (ident, {key=val, key=val})
642
  @raises errors.ParameterError: in case of duplicates or other parsing errors
643

644
  """
645
  if ":" not in value:
646
    ident, rest = value, ""
647
  else:
648
    ident, rest = value.split(":", 1)
649

    
650
  if parse_prefixes and ident.startswith(NO_PREFIX):
651
    if rest:
652
      msg = "Cannot pass options when removing parameter groups: %s" % value
653
      raise errors.ParameterError(msg)
654
    retval = (ident[len(NO_PREFIX):], False)
655
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
656
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
657
    if rest:
658
      msg = "Cannot pass options when removing parameter groups: %s" % value
659
      raise errors.ParameterError(msg)
660
    retval = (ident[len(UN_PREFIX):], None)
661
  else:
662
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
663
    retval = (ident, kv_dict)
664
  return retval
665

    
666

    
667
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
668
  """Custom parser for ident:key=val,key=val options.
669

670
  This will store the parsed values as a tuple (ident, {key: val}). As such,
671
  multiple uses of this option via action=append is possible.
672

673
  """
674
  return _SplitIdentKeyVal(opt, value, True)
675

    
676

    
677
def check_key_val(option, opt, value):  # pylint: disable=W0613
678
  """Custom parser class for key=val,key=val options.
679

680
  This will store the parsed values as a dict {key: val}.
681

682
  """
683
  return _SplitKeyVal(opt, value, True)
684

    
685

    
686
def check_key_private_val(option, opt, value):  # pylint: disable=W0613
687
  """Custom parser class for private and secret key=val,key=val options.
688

689
  This will store the parsed values as a dict {key: val}.
690

691
  """
692
  return serializer.PrivateDict(_SplitKeyVal(opt, value, True))
693

    
694

    
695
def _SplitListKeyVal(opt, value):
696
  retval = {}
697
  for elem in value.split("/"):
698
    if not elem:
699
      raise errors.ParameterError("Empty section in option '%s'" % opt)
700
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
701
    if ident in retval:
702
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
703
             (ident, opt, elem))
704
      raise errors.ParameterError(msg)
705
    retval[ident] = valdict
706
  return retval
707

    
708

    
709
def check_multilist_ident_key_val(_, opt, value):
710
  """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
711

712
  @rtype: list of dictionary
713
  @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
714

715
  """
716
  retval = []
717
  for line in value.split("//"):
718
    retval.append(_SplitListKeyVal(opt, line))
719
  return retval
720

    
721

    
722
def check_bool(option, opt, value): # pylint: disable=W0613
723
  """Custom parser for yes/no options.
724

725
  This will store the parsed value as either True or False.
726

727
  """
728
  value = value.lower()
729
  if value == constants.VALUE_FALSE or value == "no":
730
    return False
731
  elif value == constants.VALUE_TRUE or value == "yes":
732
    return True
733
  else:
734
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
735

    
736

    
737
def check_list(option, opt, value): # pylint: disable=W0613
738
  """Custom parser for comma-separated lists.
739

740
  """
741
  # we have to make this explicit check since "".split(",") is [""],
742
  # not an empty list :(
743
  if not value:
744
    return []
745
  else:
746
    return utils.UnescapeAndSplit(value)
747

    
748

    
749
def check_maybefloat(option, opt, value): # pylint: disable=W0613
750
  """Custom parser for float numbers which might be also defaults.
751

752
  """
753
  value = value.lower()
754

    
755
  if value == constants.VALUE_DEFAULT:
756
    return value
757
  else:
758
    return float(value)
759

    
760

    
761
# completion_suggestion is normally a list. Using numeric values not evaluating
762
# to False for dynamic completion.
763
(OPT_COMPL_MANY_NODES,
764
 OPT_COMPL_ONE_NODE,
765
 OPT_COMPL_ONE_INSTANCE,
766
 OPT_COMPL_ONE_OS,
767
 OPT_COMPL_ONE_EXTSTORAGE,
768
 OPT_COMPL_ONE_IALLOCATOR,
769
 OPT_COMPL_ONE_NETWORK,
770
 OPT_COMPL_INST_ADD_NODES,
771
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
772

    
773
OPT_COMPL_ALL = compat.UniqueFrozenset([
774
  OPT_COMPL_MANY_NODES,
775
  OPT_COMPL_ONE_NODE,
776
  OPT_COMPL_ONE_INSTANCE,
777
  OPT_COMPL_ONE_OS,
778
  OPT_COMPL_ONE_EXTSTORAGE,
779
  OPT_COMPL_ONE_IALLOCATOR,
780
  OPT_COMPL_ONE_NETWORK,
781
  OPT_COMPL_INST_ADD_NODES,
782
  OPT_COMPL_ONE_NODEGROUP,
783
  ])
784

    
785

    
786
class CliOption(Option):
787
  """Custom option class for optparse.
788

789
  """
790
  ATTRS = Option.ATTRS + [
791
    "completion_suggest",
792
    ]
793
  TYPES = Option.TYPES + (
794
    "multilistidentkeyval",
795
    "identkeyval",
796
    "keyval",
797
    "keyprivateval",
798
    "unit",
799
    "bool",
800
    "list",
801
    "maybefloat",
802
    )
803
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
804
  TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
805
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
806
  TYPE_CHECKER["keyval"] = check_key_val
807
  TYPE_CHECKER["keyprivateval"] = check_key_private_val
808
  TYPE_CHECKER["unit"] = check_unit
809
  TYPE_CHECKER["bool"] = check_bool
810
  TYPE_CHECKER["list"] = check_list
811
  TYPE_CHECKER["maybefloat"] = check_maybefloat
812

    
813

    
814
# optparse.py sets make_option, so we do it for our own option class, too
815
cli_option = CliOption
816

    
817

    
818
_YORNO = "yes|no"
819

    
820
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
821
                       help="Increase debugging level")
822

    
823
NOHDR_OPT = cli_option("--no-headers", default=False,
824
                       action="store_true", dest="no_headers",
825
                       help="Don't display column headers")
826

    
827
SEP_OPT = cli_option("--separator", default=None,
828
                     action="store", dest="separator",
829
                     help=("Separator between output fields"
830
                           " (defaults to one space)"))
831

    
832
USEUNITS_OPT = cli_option("--units", default=None,
833
                          dest="units", choices=("h", "m", "g", "t"),
834
                          help="Specify units for output (one of h/m/g/t)")
835

    
836
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
837
                        type="string", metavar="FIELDS",
838
                        help="Comma separated list of output fields")
839

    
840
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
841
                       default=False, help="Force the operation")
842

    
843
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
844
                         default=False, help="Do not require confirmation")
845

    
846
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
847
                                  action="store_true", default=False,
848
                                  help=("Ignore offline nodes and do as much"
849
                                        " as possible"))
850

    
851
TAG_ADD_OPT = cli_option("--tags", dest="tags",
852
                         default=None, help="Comma-separated list of instance"
853
                                            " tags")
854

    
855
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
856
                         default=None, help="File with tag names")
857

    
858
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
859
                        default=False, action="store_true",
860
                        help=("Submit the job and return the job ID, but"
861
                              " don't wait for the job to finish"))
862

    
863
PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
864
                             default=False, action="store_true",
865
                             help=("Additionally print the job as first line"
866
                                   " on stdout (for scripting)."))
867

    
868
SYNC_OPT = cli_option("--sync", dest="do_locking",
869
                      default=False, action="store_true",
870
                      help=("Grab locks while doing the queries"
871
                            " in order to ensure more consistent results"))
872

    
873
DRY_RUN_OPT = cli_option("--dry-run", default=False,
874
                         action="store_true",
875
                         help=("Do not execute the operation, just run the"
876
                               " check steps and verify if it could be"
877
                               " executed"))
878

    
879
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
880
                         action="store_true",
881
                         help="Increase the verbosity of the operation")
882

    
883
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
884
                              action="store_true", dest="simulate_errors",
885
                              help="Debugging option that makes the operation"
886
                              " treat most runtime checks as failed")
887

    
888
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
889
                        default=True, action="store_false",
890
                        help="Don't wait for sync (DANGEROUS!)")
891

    
892
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
893
                        default=False, action="store_true",
894
                        help="Wait for disks to sync")
895

    
896
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
897
                             action="store_true", default=False,
898
                             help="Enable offline instance")
899

    
900
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
901
                              action="store_true", default=False,
902
                              help="Disable down instance")
903

    
904
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
905
                               help=("Custom disk setup (%s)" %
906
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
907
                               default=None, metavar="TEMPL",
908
                               choices=list(constants.DISK_TEMPLATES))
909

    
910
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
911
                        help="Do not create any network cards for"
912
                        " the instance")
913

    
914
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
915
                               help="Relative path under default cluster-wide"
916
                               " file storage dir to store file-based disks",
917
                               default=None, metavar="<DIR>")
918

    
919
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
920
                                  help="Driver to use for image files",
921
                                  default=None, metavar="<DRIVER>",
922
                                  choices=list(constants.FILE_DRIVER))
923

    
924
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
925
                            help="Select nodes for the instance automatically"
926
                            " using the <NAME> iallocator plugin",
927
                            default=None, type="string",
928
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
929

    
930
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
931
                                    metavar="<NAME>",
932
                                    help="Set the default instance"
933
                                    " allocator plugin",
934
                                    default=None, type="string",
935
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
936

    
937
DEFAULT_IALLOCATOR_PARAMS_OPT = cli_option("--default-iallocator-params",
938
                                           dest="default_iallocator_params",
939
                                           help="iallocator template"
940
                                           " parameters, in the format"
941
                                           " template:option=value,"
942
                                           " option=value,...",
943
                                           type="keyval",
944
                                           default={})
945

    
946
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
947
                    metavar="<os>",
948
                    completion_suggest=OPT_COMPL_ONE_OS)
949

    
950
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
951
                          type="keyval", default={},
952
                          help="OS parameters")
953

    
954
OSPARAMS_PRIVATE_OPT = cli_option("--os-parameters-private",
955
                                  dest="osparams_private",
956
                                  type="keyprivateval",
957
                                  default=serializer.PrivateDict(),
958
                                  help="Private OS parameters"
959
                                       " (won't be logged)")
960

    
961
OSPARAMS_SECRET_OPT = cli_option("--os-parameters-secret",
962
                                 dest="osparams_secret",
963
                                 type="keyprivateval",
964
                                 default=serializer.PrivateDict(),
965
                                 help="Secret OS parameters (won't be logged or"
966
                                      " saved; you must supply these for every"
967
                                      " operation.)")
968

    
969
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
970
                               action="store_true", default=False,
971
                               help="Force an unknown variant")
972

    
973
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
974
                            action="store_true", default=False,
975
                            help="Do not install the OS (will"
976
                            " enable no-start)")
977

    
978
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
979
                                dest="allow_runtime_chgs",
980
                                default=True, action="store_false",
981
                                help="Don't allow runtime changes")
982

    
983
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
984
                         type="keyval", default={},
985
                         help="Backend parameters")
986

    
987
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
988
                        default={}, dest="hvparams",
989
                        help="Hypervisor parameters")
990

    
991
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
992
                             help="Disk template parameters, in the format"
993
                             " template:option=value,option=value,...",
994
                             type="identkeyval", action="append", default=[])
995

    
996
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
997
                                 type="keyval", default={},
998
                                 help="Memory size specs: list of key=value,"
999
                                " where key is one of min, max, std"
1000
                                 " (in MB or using a unit)")
1001

    
1002
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
1003
                                 type="keyval", default={},
1004
                                 help="CPU count specs: list of key=value,"
1005
                                 " where key is one of min, max, std")
1006

    
1007
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
1008
                                  dest="ispecs_disk_count",
1009
                                  type="keyval", default={},
1010
                                  help="Disk count specs: list of key=value,"
1011
                                  " where key is one of min, max, std")
1012

    
1013
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
1014
                                 type="keyval", default={},
1015
                                 help="Disk size specs: list of key=value,"
1016
                                 " where key is one of min, max, std"
1017
                                 " (in MB or using a unit)")
1018

    
1019
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
1020
                                 type="keyval", default={},
1021
                                 help="NIC count specs: list of key=value,"
1022
                                 " where key is one of min, max, std")
1023

    
1024
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
1025
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
1026
                                      dest="ipolicy_bounds_specs",
1027
                                      type="multilistidentkeyval", default=None,
1028
                                      help="Complete instance specs limits")
1029

    
1030
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
1031
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
1032
                                   dest="ipolicy_std_specs",
1033
                                   type="keyval", default=None,
1034
                                   help="Complte standard instance specs")
1035

    
1036
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
1037
                                    dest="ipolicy_disk_templates",
1038
                                    type="list", default=None,
1039
                                    help="Comma-separated list of"
1040
                                    " enabled disk templates")
1041

    
1042
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
1043
                                 dest="ipolicy_vcpu_ratio",
1044
                                 type="maybefloat", default=None,
1045
                                 help="The maximum allowed vcpu-to-cpu ratio")
1046

    
1047
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
1048
                                   dest="ipolicy_spindle_ratio",
1049
                                   type="maybefloat", default=None,
1050
                                   help=("The maximum allowed instances to"
1051
                                         " spindle ratio"))
1052

    
1053
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1054
                            help="Hypervisor and hypervisor options, in the"
1055
                            " format hypervisor:option=value,option=value,...",
1056
                            default=None, type="identkeyval")
1057

    
1058
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1059
                        help="Hypervisor and hypervisor options, in the"
1060
                        " format hypervisor:option=value,option=value,...",
1061
                        default=[], action="append", type="identkeyval")
1062

    
1063
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1064
                           action="store_false",
1065
                           help="Don't check that the instance's IP"
1066
                           " is alive")
1067

    
1068
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1069
                             default=True, action="store_false",
1070
                             help="Don't check that the instance's name"
1071
                             " is resolvable")
1072

    
1073
NET_OPT = cli_option("--net",
1074
                     help="NIC parameters", default=[],
1075
                     dest="nics", action="append", type="identkeyval")
1076

    
1077
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1078
                      dest="disks", action="append", type="identkeyval")
1079

    
1080
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1081
                         help="Comma-separated list of disks"
1082
                         " indices to act on (e.g. 0,2) (optional,"
1083
                         " defaults to all disks)")
1084

    
1085
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1086
                         help="Enforces a single-disk configuration using the"
1087
                         " given disk size, in MiB unless a suffix is used",
1088
                         default=None, type="unit", metavar="<size>")
1089

    
1090
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1091
                                dest="ignore_consistency",
1092
                                action="store_true", default=False,
1093
                                help="Ignore the consistency of the disks on"
1094
                                " the secondary")
1095

    
1096
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1097
                                dest="allow_failover",
1098
                                action="store_true", default=False,
1099
                                help="If migration is not possible fallback to"
1100
                                     " failover")
1101

    
1102
NONLIVE_OPT = cli_option("--non-live", dest="live",
1103
                         default=True, action="store_false",
1104
                         help="Do a non-live migration (this usually means"
1105
                         " freeze the instance, save the state, transfer and"
1106
                         " only then resume running on the secondary node)")
1107

    
1108
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1109
                                default=None,
1110
                                choices=list(constants.HT_MIGRATION_MODES),
1111
                                help="Override default migration mode (choose"
1112
                                " either live or non-live")
1113

    
1114
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1115
                                help="Target node and optional secondary node",
1116
                                metavar="<pnode>[:<snode>]",
1117
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1118

    
1119
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1120
                           action="append", metavar="<node>",
1121
                           help="Use only this node (can be used multiple"
1122
                           " times, if not given defaults to all nodes)",
1123
                           completion_suggest=OPT_COMPL_ONE_NODE)
1124

    
1125
NODEGROUP_OPT_NAME = "--node-group"
1126
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1127
                           dest="nodegroup",
1128
                           help="Node group (name or uuid)",
1129
                           metavar="<nodegroup>",
1130
                           default=None, type="string",
1131
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1132

    
1133
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1134
                             metavar="<node>",
1135
                             completion_suggest=OPT_COMPL_ONE_NODE)
1136

    
1137
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1138
                         action="store_false",
1139
                         help="Don't start the instance after creation")
1140

    
1141
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1142
                         action="store_true", default=False,
1143
                         help="Show command instead of executing it")
1144

    
1145
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1146
                         default=False, action="store_true",
1147
                         help="Instead of performing the migration/failover,"
1148
                         " try to recover from a failed cleanup. This is safe"
1149
                         " to run even if the instance is healthy, but it"
1150
                         " will create extra replication traffic and "
1151
                         " disrupt briefly the replication (like during the"
1152
                         " migration/failover")
1153

    
1154
STATIC_OPT = cli_option("-s", "--static", dest="static",
1155
                        action="store_true", default=False,
1156
                        help="Only show configuration data, not runtime data")
1157

    
1158
ALL_OPT = cli_option("--all", dest="show_all",
1159
                     default=False, action="store_true",
1160
                     help="Show info on all instances on the cluster."
1161
                     " This can take a long time to run, use wisely")
1162

    
1163
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1164
                           action="store_true", default=False,
1165
                           help="Interactive OS reinstall, lists available"
1166
                           " OS templates for selection")
1167

    
1168
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1169
                                 action="store_true", default=False,
1170
                                 help="Remove the instance from the cluster"
1171
                                 " configuration even if there are failures"
1172
                                 " during the removal process")
1173

    
1174
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1175
                                        dest="ignore_remove_failures",
1176
                                        action="store_true", default=False,
1177
                                        help="Remove the instance from the"
1178
                                        " cluster configuration even if there"
1179
                                        " are failures during the removal"
1180
                                        " process")
1181

    
1182
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1183
                                 action="store_true", default=False,
1184
                                 help="Remove the instance from the cluster")
1185

    
1186
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1187
                               help="Specifies the new node for the instance",
1188
                               metavar="NODE", default=None,
1189
                               completion_suggest=OPT_COMPL_ONE_NODE)
1190

    
1191
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1192
                               help="Specifies the new secondary node",
1193
                               metavar="NODE", default=None,
1194
                               completion_suggest=OPT_COMPL_ONE_NODE)
1195

    
1196
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1197
                             help="Specifies the new primary node",
1198
                             metavar="<node>", default=None,
1199
                             completion_suggest=OPT_COMPL_ONE_NODE)
1200

    
1201
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1202
                            default=False, action="store_true",
1203
                            help="Replace the disk(s) on the primary"
1204
                                 " node (applies only to internally mirrored"
1205
                                 " disk templates, e.g. %s)" %
1206
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1207

    
1208
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1209
                              default=False, action="store_true",
1210
                              help="Replace the disk(s) on the secondary"
1211
                                   " node (applies only to internally mirrored"
1212
                                   " disk templates, e.g. %s)" %
1213
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1214

    
1215
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1216
                              default=False, action="store_true",
1217
                              help="Lock all nodes and auto-promote as needed"
1218
                              " to MC status")
1219

    
1220
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1221
                              default=False, action="store_true",
1222
                              help="Automatically replace faulty disks"
1223
                                   " (applies only to internally mirrored"
1224
                                   " disk templates, e.g. %s)" %
1225
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1226

    
1227
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1228
                             default=False, action="store_true",
1229
                             help="Ignore current recorded size"
1230
                             " (useful for forcing activation when"
1231
                             " the recorded size is wrong)")
1232

    
1233
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1234
                          metavar="<node>",
1235
                          completion_suggest=OPT_COMPL_ONE_NODE)
1236

    
1237
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1238
                         metavar="<dir>")
1239

    
1240
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1241
                              help="Specify the secondary ip for the node",
1242
                              metavar="ADDRESS", default=None)
1243

    
1244
READD_OPT = cli_option("--readd", dest="readd",
1245
                       default=False, action="store_true",
1246
                       help="Readd old node after replacing it")
1247

    
1248
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1249
                                default=True, action="store_false",
1250
                                help="Disable SSH key fingerprint checking")
1251

    
1252
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1253
                                 default=False, action="store_true",
1254
                                 help="Force the joining of a node")
1255

    
1256
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1257
                    type="bool", default=None, metavar=_YORNO,
1258
                    help="Set the master_candidate flag on the node")
1259

    
1260
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1261
                         type="bool", default=None,
1262
                         help=("Set the offline flag on the node"
1263
                               " (cluster does not communicate with offline"
1264
                               " nodes)"))
1265

    
1266
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1267
                         type="bool", default=None,
1268
                         help=("Set the drained flag on the node"
1269
                               " (excluded from allocation operations)"))
1270

    
1271
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1272
                              type="bool", default=None, metavar=_YORNO,
1273
                              help="Set the master_capable flag on the node")
1274

    
1275
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1276
                          type="bool", default=None, metavar=_YORNO,
1277
                          help="Set the vm_capable flag on the node")
1278

    
1279
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1280
                             type="bool", default=None, metavar=_YORNO,
1281
                             help="Set the allocatable flag on a volume")
1282

    
1283
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1284
                            dest="enabled_hypervisors",
1285
                            help="Comma-separated list of hypervisors",
1286
                            type="string", default=None)
1287

    
1288
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1289
                                        dest="enabled_disk_templates",
1290
                                        help="Comma-separated list of "
1291
                                             "disk templates",
1292
                                        type="string", default=None)
1293

    
1294
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1295
                            type="keyval", default={},
1296
                            help="NIC parameters")
1297

    
1298
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1299
                         dest="candidate_pool_size", type="int",
1300
                         help="Set the candidate pool size")
1301

    
1302
RQL_OPT = cli_option("--max-running-jobs", dest="max_running_jobs",
1303
                     type="int", help="Set the maximal number of jobs to "
1304
                                      "run simultaneously")
1305

    
1306
INSTANCE_COMMUNICATION_NETWORK_OPT = \
1307
    cli_option("--instance-communication-network",
1308
               dest="instance_communication_network",
1309
               type="string",
1310
               help="Set the network name for instance communication")
1311

    
1312
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1313
                         help=("Enables LVM and specifies the volume group"
1314
                               " name (cluster-wide) for disk allocation"
1315
                               " [%s]" % constants.DEFAULT_VG),
1316
                         metavar="VG", default=None)
1317

    
1318
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1319
                          help="Destroy cluster", action="store_true")
1320

    
1321
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1322
                          help="Skip node agreement check (dangerous)",
1323
                          action="store_true", default=False)
1324

    
1325
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1326
                            help="Specify the mac prefix for the instance IP"
1327
                            " addresses, in the format XX:XX:XX",
1328
                            metavar="PREFIX",
1329
                            default=None)
1330

    
1331
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1332
                               help="Specify the node interface (cluster-wide)"
1333
                               " on which the master IP address will be added"
1334
                               " (cluster init default: %s)" %
1335
                               constants.DEFAULT_BRIDGE,
1336
                               metavar="NETDEV",
1337
                               default=None)
1338

    
1339
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1340
                                help="Specify the netmask of the master IP",
1341
                                metavar="NETMASK",
1342
                                default=None)
1343

    
1344
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1345
                                     dest="use_external_mip_script",
1346
                                     help="Specify whether to run a"
1347
                                     " user-provided script for the master"
1348
                                     " IP address turnup and"
1349
                                     " turndown operations",
1350
                                     type="bool", metavar=_YORNO, default=None)
1351

    
1352
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1353
                                help="Specify the default directory (cluster-"
1354
                                "wide) for storing the file-based disks [%s]" %
1355
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1356
                                metavar="DIR",
1357
                                default=None)
1358

    
1359
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1360
  "--shared-file-storage-dir",
1361
  dest="shared_file_storage_dir",
1362
  help="Specify the default directory (cluster-wide) for storing the"
1363
  " shared file-based disks [%s]" %
1364
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1365
  metavar="SHAREDDIR", default=None)
1366

    
1367
GLOBAL_GLUSTER_FILEDIR_OPT = cli_option(
1368
  "--gluster-storage-dir",
1369
  dest="gluster_storage_dir",
1370
  help="Specify the default directory (cluster-wide) for mounting Gluster"
1371
  " file systems [%s]" %
1372
  pathutils.DEFAULT_GLUSTER_STORAGE_DIR,
1373
  metavar="GLUSTERDIR",
1374
  default=pathutils.DEFAULT_GLUSTER_STORAGE_DIR)
1375

    
1376
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1377
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1378
                                   action="store_false", default=True)
1379

    
1380
MODIFY_ETCHOSTS_OPT = \
1381
 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1382
            default=None, type="bool",
1383
            help="Defines whether the cluster should autonomously modify"
1384
            " and keep in sync the /etc/hosts file of the nodes")
1385

    
1386
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1387
                                    help="Don't initialize SSH keys",
1388
                                    action="store_false", default=True)
1389

    
1390
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1391
                             help="Enable parseable error messages",
1392
                             action="store_true", default=False)
1393

    
1394
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1395
                          help="Skip N+1 memory redundancy tests",
1396
                          action="store_true", default=False)
1397

    
1398
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1399
                             help="Type of reboot: soft/hard/full",
1400
                             default=constants.INSTANCE_REBOOT_HARD,
1401
                             metavar="<REBOOT>",
1402
                             choices=list(constants.REBOOT_TYPES))
1403

    
1404
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1405
                                    dest="ignore_secondaries",
1406
                                    default=False, action="store_true",
1407
                                    help="Ignore errors from secondaries")
1408

    
1409
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1410
                            action="store_false", default=True,
1411
                            help="Don't shutdown the instance (unsafe)")
1412

    
1413
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1414
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1415
                         help="Maximum time to wait")
1416

    
1417
COMPRESS_OPT = cli_option("--compress", dest="compress",
1418
                          default=constants.IEC_NONE,
1419
                          help="The compression mode to use",
1420
                          choices=list(constants.IEC_ALL))
1421

    
1422
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1423
                                  dest="shutdown_timeout", type="int",
1424
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1425
                                  help="Maximum time to wait for instance"
1426
                                  " shutdown")
1427

    
1428
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1429
                          default=None,
1430
                          help=("Number of seconds between repetions of the"
1431
                                " command"))
1432

    
1433
EARLY_RELEASE_OPT = cli_option("--early-release",
1434
                               dest="early_release", default=False,
1435
                               action="store_true",
1436
                               help="Release the locks on the secondary"
1437
                               " node(s) early")
1438

    
1439
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1440
                                  dest="new_cluster_cert",
1441
                                  default=False, action="store_true",
1442
                                  help="Generate a new cluster certificate")
1443

    
1444
NEW_NODE_CERT_OPT = cli_option(
1445
  "--new-node-certificates", dest="new_node_cert", default=False,
1446
  action="store_true", help="Generate new node certificates (for all nodes)")
1447

    
1448
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1449
                           default=None,
1450
                           help="File containing new RAPI certificate")
1451

    
1452
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1453
                               default=None, action="store_true",
1454
                               help=("Generate a new self-signed RAPI"
1455
                                     " certificate"))
1456

    
1457
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1458
                            default=None,
1459
                            help="File containing new SPICE certificate")
1460

    
1461
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1462
                              default=None,
1463
                              help="File containing the certificate of the CA"
1464
                              " which signed the SPICE certificate")
1465

    
1466
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1467
                                dest="new_spice_cert", default=None,
1468
                                action="store_true",
1469
                                help=("Generate a new self-signed SPICE"
1470
                                      " certificate"))
1471

    
1472
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1473
                                    dest="new_confd_hmac_key",
1474
                                    default=False, action="store_true",
1475
                                    help=("Create a new HMAC key for %s" %
1476
                                          constants.CONFD))
1477

    
1478
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1479
                                       dest="cluster_domain_secret",
1480
                                       default=None,
1481
                                       help=("Load new new cluster domain"
1482
                                             " secret from file"))
1483

    
1484
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1485
                                           dest="new_cluster_domain_secret",
1486
                                           default=False, action="store_true",
1487
                                           help=("Create a new cluster domain"
1488
                                                 " secret"))
1489

    
1490
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1491
                              dest="use_replication_network",
1492
                              help="Whether to use the replication network"
1493
                              " for talking to the nodes",
1494
                              action="store_true", default=False)
1495

    
1496
MAINTAIN_NODE_HEALTH_OPT = \
1497
    cli_option("--maintain-node-health", dest="maintain_node_health",
1498
               metavar=_YORNO, default=None, type="bool",
1499
               help="Configure the cluster to automatically maintain node"
1500
               " health, by shutting down unknown instances, shutting down"
1501
               " unknown DRBD devices, etc.")
1502

    
1503
IDENTIFY_DEFAULTS_OPT = \
1504
    cli_option("--identify-defaults", dest="identify_defaults",
1505
               default=False, action="store_true",
1506
               help="Identify which saved instance parameters are equal to"
1507
               " the current cluster defaults and set them as such, instead"
1508
               " of marking them as overridden")
1509

    
1510
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1511
                         action="store", dest="uid_pool",
1512
                         help=("A list of user-ids or user-id"
1513
                               " ranges separated by commas"))
1514

    
1515
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1516
                          action="store", dest="add_uids",
1517
                          help=("A list of user-ids or user-id"
1518
                                " ranges separated by commas, to be"
1519
                                " added to the user-id pool"))
1520

    
1521
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1522
                             action="store", dest="remove_uids",
1523
                             help=("A list of user-ids or user-id"
1524
                                   " ranges separated by commas, to be"
1525
                                   " removed from the user-id pool"))
1526

    
1527
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1528
                              action="store", dest="reserved_lvs",
1529
                              help=("A comma-separated list of reserved"
1530
                                    " logical volumes names, that will be"
1531
                                    " ignored by cluster verify"))
1532

    
1533
ROMAN_OPT = cli_option("--roman",
1534
                       dest="roman_integers", default=False,
1535
                       action="store_true",
1536
                       help="Use roman numbers for positive integers")
1537

    
1538
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1539
                             action="store", default=None,
1540
                             help="Specifies usermode helper for DRBD")
1541

    
1542
PRIMARY_IP_VERSION_OPT = \
1543
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1544
               action="store", dest="primary_ip_version",
1545
               metavar="%d|%d" % (constants.IP4_VERSION,
1546
                                  constants.IP6_VERSION),
1547
               help="Cluster-wide IP version for primary IP")
1548

    
1549
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1550
                              action="store_true",
1551
                              help="Show machine name for every line in output")
1552

    
1553
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1554
                              action="store_true",
1555
                              help=("Hide successful results and show failures"
1556
                                    " only (determined by the exit code)"))
1557

    
1558
REASON_OPT = cli_option("--reason", default=None,
1559
                        help="The reason for executing the command")
1560

    
1561

    
1562
def _PriorityOptionCb(option, _, value, parser):
1563
  """Callback for processing C{--priority} option.
1564

1565
  """
1566
  value = _PRIONAME_TO_VALUE[value]
1567

    
1568
  setattr(parser.values, option.dest, value)
1569

    
1570

    
1571
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1572
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1573
                          choices=_PRIONAME_TO_VALUE.keys(),
1574
                          action="callback", type="choice",
1575
                          callback=_PriorityOptionCb,
1576
                          help="Priority for opcode processing")
1577

    
1578
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1579
                        type="bool", default=None, metavar=_YORNO,
1580
                        help="Sets the hidden flag on the OS")
1581

    
1582
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1583
                        type="bool", default=None, metavar=_YORNO,
1584
                        help="Sets the blacklisted flag on the OS")
1585

    
1586
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1587
                                     type="bool", metavar=_YORNO,
1588
                                     dest="prealloc_wipe_disks",
1589
                                     help=("Wipe disks prior to instance"
1590
                                           " creation"))
1591

    
1592
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1593
                             type="keyval", default=None,
1594
                             help="Node parameters")
1595

    
1596
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1597
                              action="store", metavar="POLICY", default=None,
1598
                              help="Allocation policy for the node group")
1599

    
1600
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1601
                              type="bool", metavar=_YORNO,
1602
                              dest="node_powered",
1603
                              help="Specify if the SoR for node is powered")
1604

    
1605
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1606
                             default=constants.OOB_TIMEOUT,
1607
                             help="Maximum time to wait for out-of-band helper")
1608

    
1609
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1610
                             default=constants.OOB_POWER_DELAY,
1611
                             help="Time in seconds to wait between power-ons")
1612

    
1613
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1614
                              action="store_true", default=False,
1615
                              help=("Whether command argument should be treated"
1616
                                    " as filter"))
1617

    
1618
NO_REMEMBER_OPT = cli_option("--no-remember",
1619
                             dest="no_remember",
1620
                             action="store_true", default=False,
1621
                             help="Perform but do not record the change"
1622
                             " in the configuration")
1623

    
1624
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1625
                              default=False, action="store_true",
1626
                              help="Evacuate primary instances only")
1627

    
1628
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1629
                                default=False, action="store_true",
1630
                                help="Evacuate secondary instances only"
1631
                                     " (applies only to internally mirrored"
1632
                                     " disk templates, e.g. %s)" %
1633
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1634

    
1635
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1636
                                action="store_true", default=False,
1637
                                help="Pause instance at startup")
1638

    
1639
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1640
                          help="Destination node group (name or uuid)",
1641
                          default=None, action="append",
1642
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1643

    
1644
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1645
                               action="append", dest="ignore_errors",
1646
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1647
                               help="Error code to be ignored")
1648

    
1649
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1650
                            action="append",
1651
                            help=("Specify disk state information in the"
1652
                                  " format"
1653
                                  " storage_type/identifier:option=value,...;"
1654
                                  " note this is unused for now"),
1655
                            type="identkeyval")
1656

    
1657
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1658
                          action="append",
1659
                          help=("Specify hypervisor state information in the"
1660
                                " format hypervisor:option=value,...;"
1661
                                " note this is unused for now"),
1662
                          type="identkeyval")
1663

    
1664
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1665
                                action="store_true", default=False,
1666
                                help="Ignore instance policy violations")
1667

    
1668
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1669
                             help="Sets the instance's runtime memory,"
1670
                             " ballooning it up or down to the new value",
1671
                             default=None, type="unit", metavar="<size>")
1672

    
1673
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1674
                          action="store_true", default=False,
1675
                          help="Marks the grow as absolute instead of the"
1676
                          " (default) relative mode")
1677

    
1678
NETWORK_OPT = cli_option("--network",
1679
                         action="store", default=None, dest="network",
1680
                         help="IP network in CIDR notation")
1681

    
1682
GATEWAY_OPT = cli_option("--gateway",
1683
                         action="store", default=None, dest="gateway",
1684
                         help="IP address of the router (gateway)")
1685

    
1686
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1687
                                  action="store", default=None,
1688
                                  dest="add_reserved_ips",
1689
                                  help="Comma-separated list of"
1690
                                  " reserved IPs to add")
1691

    
1692
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1693
                                     action="store", default=None,
1694
                                     dest="remove_reserved_ips",
1695
                                     help="Comma-delimited list of"
1696
                                     " reserved IPs to remove")
1697

    
1698
NETWORK6_OPT = cli_option("--network6",
1699
                          action="store", default=None, dest="network6",
1700
                          help="IP network in CIDR notation")
1701

    
1702
GATEWAY6_OPT = cli_option("--gateway6",
1703
                          action="store", default=None, dest="gateway6",
1704
                          help="IP6 address of the router (gateway)")
1705

    
1706
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1707
                                  dest="conflicts_check",
1708
                                  default=True,
1709
                                  action="store_false",
1710
                                  help="Don't check for conflicting IPs")
1711

    
1712
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1713
                                 default=False, action="store_true",
1714
                                 help="Include default values")
1715

    
1716
HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1717
                         action="store_true", default=False,
1718
                         help="Hotplug supported devices (NICs and Disks)")
1719

    
1720
HOTPLUG_IF_POSSIBLE_OPT = cli_option("--hotplug-if-possible",
1721
                                     dest="hotplug_if_possible",
1722
                                     action="store_true", default=False,
1723
                                     help="Hotplug devices in case"
1724
                                          " hotplug is supported")
1725

    
1726
INSTANCE_COMMUNICATION_OPT = \
1727
    cli_option("-c", "--communication",
1728
               dest="instance_communication",
1729
               help=constants.INSTANCE_COMMUNICATION_DOC,
1730
               type="bool")
1731

    
1732
#: Options provided by all commands
1733
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1734

    
1735
# options related to asynchronous job handling
1736

    
1737
SUBMIT_OPTS = [
1738
  SUBMIT_OPT,
1739
  PRINT_JOBID_OPT,
1740
  ]
1741

    
1742
# common options for creating instances. add and import then add their own
1743
# specific ones.
1744
COMMON_CREATE_OPTS = [
1745
  BACKEND_OPT,
1746
  DISK_OPT,
1747
  DISK_TEMPLATE_OPT,
1748
  FILESTORE_DIR_OPT,
1749
  FILESTORE_DRIVER_OPT,
1750
  HYPERVISOR_OPT,
1751
  IALLOCATOR_OPT,
1752
  NET_OPT,
1753
  NODE_PLACEMENT_OPT,
1754
  NOIPCHECK_OPT,
1755
  NOCONFLICTSCHECK_OPT,
1756
  NONAMECHECK_OPT,
1757
  NONICS_OPT,
1758
  NWSYNC_OPT,
1759
  OSPARAMS_OPT,
1760
  OSPARAMS_PRIVATE_OPT,
1761
  OSPARAMS_SECRET_OPT,
1762
  OS_SIZE_OPT,
1763
  SUBMIT_OPT,
1764
  PRINT_JOBID_OPT,
1765
  TAG_ADD_OPT,
1766
  DRY_RUN_OPT,
1767
  PRIORITY_OPT,
1768
  ]
1769

    
1770
# common instance policy options
1771
INSTANCE_POLICY_OPTS = [
1772
  IPOLICY_BOUNDS_SPECS_OPT,
1773
  IPOLICY_DISK_TEMPLATES,
1774
  IPOLICY_VCPU_RATIO,
1775
  IPOLICY_SPINDLE_RATIO,
1776
  ]
1777

    
1778
# instance policy split specs options
1779
SPLIT_ISPECS_OPTS = [
1780
  SPECS_CPU_COUNT_OPT,
1781
  SPECS_DISK_COUNT_OPT,
1782
  SPECS_DISK_SIZE_OPT,
1783
  SPECS_MEM_SIZE_OPT,
1784
  SPECS_NIC_COUNT_OPT,
1785
  ]
1786

    
1787

    
1788
class _ShowUsage(Exception):
1789
  """Exception class for L{_ParseArgs}.
1790

1791
  """
1792
  def __init__(self, exit_error):
1793
    """Initializes instances of this class.
1794

1795
    @type exit_error: bool
1796
    @param exit_error: Whether to report failure on exit
1797

1798
    """
1799
    Exception.__init__(self)
1800
    self.exit_error = exit_error
1801

    
1802

    
1803
class _ShowVersion(Exception):
1804
  """Exception class for L{_ParseArgs}.
1805

1806
  """
1807

    
1808

    
1809
def _ParseArgs(binary, argv, commands, aliases, env_override):
1810
  """Parser for the command line arguments.
1811

1812
  This function parses the arguments and returns the function which
1813
  must be executed together with its (modified) arguments.
1814

1815
  @param binary: Script name
1816
  @param argv: Command line arguments
1817
  @param commands: Dictionary containing command definitions
1818
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1819
  @param env_override: list of env variables allowed for default args
1820
  @raise _ShowUsage: If usage description should be shown
1821
  @raise _ShowVersion: If version should be shown
1822

1823
  """
1824
  assert not (env_override - set(commands))
1825
  assert not (set(aliases.keys()) & set(commands.keys()))
1826

    
1827
  if len(argv) > 1:
1828
    cmd = argv[1]
1829
  else:
1830
    # No option or command given
1831
    raise _ShowUsage(exit_error=True)
1832

    
1833
  if cmd == "--version":
1834
    raise _ShowVersion()
1835
  elif cmd == "--help":
1836
    raise _ShowUsage(exit_error=False)
1837
  elif not (cmd in commands or cmd in aliases):
1838
    raise _ShowUsage(exit_error=True)
1839

    
1840
  # get command, unalias it, and look it up in commands
1841
  if cmd in aliases:
1842
    if aliases[cmd] not in commands:
1843
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1844
                                   " command '%s'" % (cmd, aliases[cmd]))
1845

    
1846
    cmd = aliases[cmd]
1847

    
1848
  if cmd in env_override:
1849
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1850
    env_args = os.environ.get(args_env_name)
1851
    if env_args:
1852
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1853

    
1854
  func, args_def, parser_opts, usage, description = commands[cmd]
1855
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1856
                        description=description,
1857
                        formatter=TitledHelpFormatter(),
1858
                        usage="%%prog %s %s" % (cmd, usage))
1859
  parser.disable_interspersed_args()
1860
  options, args = parser.parse_args(args=argv[2:])
1861

    
1862
  if not _CheckArguments(cmd, args_def, args):
1863
    return None, None, None
1864

    
1865
  return func, options, args
1866

    
1867

    
1868
def _FormatUsage(binary, commands):
1869
  """Generates a nice description of all commands.
1870

1871
  @param binary: Script name
1872
  @param commands: Dictionary containing command definitions
1873

1874
  """
1875
  # compute the max line length for cmd + usage
1876
  mlen = min(60, max(map(len, commands)))
1877

    
1878
  yield "Usage: %s {command} [options...] [argument...]" % binary
1879
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1880
  yield ""
1881
  yield "Commands:"
1882

    
1883
  # and format a nice command list
1884
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1885
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1886
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1887
    for line in help_lines:
1888
      yield " %-*s   %s" % (mlen, "", line)
1889

    
1890
  yield ""
1891

    
1892

    
1893
def _CheckArguments(cmd, args_def, args):
1894
  """Verifies the arguments using the argument definition.
1895

1896
  Algorithm:
1897

1898
    1. Abort with error if values specified by user but none expected.
1899

1900
    1. For each argument in definition
1901

1902
      1. Keep running count of minimum number of values (min_count)
1903
      1. Keep running count of maximum number of values (max_count)
1904
      1. If it has an unlimited number of values
1905

1906
        1. Abort with error if it's not the last argument in the definition
1907

1908
    1. If last argument has limited number of values
1909

1910
      1. Abort with error if number of values doesn't match or is too large
1911

1912
    1. Abort with error if user didn't pass enough values (min_count)
1913

1914
  """
1915
  if args and not args_def:
1916
    ToStderr("Error: Command %s expects no arguments", cmd)
1917
    return False
1918

    
1919
  min_count = None
1920
  max_count = None
1921
  check_max = None
1922

    
1923
  last_idx = len(args_def) - 1
1924

    
1925
  for idx, arg in enumerate(args_def):
1926
    if min_count is None:
1927
      min_count = arg.min
1928
    elif arg.min is not None:
1929
      min_count += arg.min
1930

    
1931
    if max_count is None:
1932
      max_count = arg.max
1933
    elif arg.max is not None:
1934
      max_count += arg.max
1935

    
1936
    if idx == last_idx:
1937
      check_max = (arg.max is not None)
1938

    
1939
    elif arg.max is None:
1940
      raise errors.ProgrammerError("Only the last argument can have max=None")
1941

    
1942
  if check_max:
1943
    # Command with exact number of arguments
1944
    if (min_count is not None and max_count is not None and
1945
        min_count == max_count and len(args) != min_count):
1946
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1947
      return False
1948

    
1949
    # Command with limited number of arguments
1950
    if max_count is not None and len(args) > max_count:
1951
      ToStderr("Error: Command %s expects only %d argument(s)",
1952
               cmd, max_count)
1953
      return False
1954

    
1955
  # Command with some required arguments
1956
  if min_count is not None and len(args) < min_count:
1957
    ToStderr("Error: Command %s expects at least %d argument(s)",
1958
             cmd, min_count)
1959
    return False
1960

    
1961
  return True
1962

    
1963

    
1964
def SplitNodeOption(value):
1965
  """Splits the value of a --node option.
1966

1967
  """
1968
  if value and ":" in value:
1969
    return value.split(":", 1)
1970
  else:
1971
    return (value, None)
1972

    
1973

    
1974
def CalculateOSNames(os_name, os_variants):
1975
  """Calculates all the names an OS can be called, according to its variants.
1976

1977
  @type os_name: string
1978
  @param os_name: base name of the os
1979
  @type os_variants: list or None
1980
  @param os_variants: list of supported variants
1981
  @rtype: list
1982
  @return: list of valid names
1983

1984
  """
1985
  if os_variants:
1986
    return ["%s+%s" % (os_name, v) for v in os_variants]
1987
  else:
1988
    return [os_name]
1989

    
1990

    
1991
def ParseFields(selected, default):
1992
  """Parses the values of "--field"-like options.
1993

1994
  @type selected: string or None
1995
  @param selected: User-selected options
1996
  @type default: list
1997
  @param default: Default fields
1998

1999
  """
2000
  if selected is None:
2001
    return default
2002

    
2003
  if selected.startswith("+"):
2004
    return default + selected[1:].split(",")
2005

    
2006
  return selected.split(",")
2007

    
2008

    
2009
UsesRPC = rpc.RunWithRPC
2010

    
2011

    
2012
def AskUser(text, choices=None):
2013
  """Ask the user a question.
2014

2015
  @param text: the question to ask
2016

2017
  @param choices: list with elements tuples (input_char, return_value,
2018
      description); if not given, it will default to: [('y', True,
2019
      'Perform the operation'), ('n', False, 'Do no do the operation')];
2020
      note that the '?' char is reserved for help
2021

2022
  @return: one of the return values from the choices list; if input is
2023
      not possible (i.e. not running with a tty, we return the last
2024
      entry from the list
2025

2026
  """
2027
  if choices is None:
2028
    choices = [("y", True, "Perform the operation"),
2029
               ("n", False, "Do not perform the operation")]
2030
  if not choices or not isinstance(choices, list):
2031
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
2032
  for entry in choices:
2033
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
2034
      raise errors.ProgrammerError("Invalid choices element to AskUser")
2035

    
2036
  answer = choices[-1][1]
2037
  new_text = []
2038
  for line in text.splitlines():
2039
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
2040
  text = "\n".join(new_text)
2041
  try:
2042
    f = file("/dev/tty", "a+")
2043
  except IOError:
2044
    return answer
2045
  try:
2046
    chars = [entry[0] for entry in choices]
2047
    chars[-1] = "[%s]" % chars[-1]
2048
    chars.append("?")
2049
    maps = dict([(entry[0], entry[1]) for entry in choices])
2050
    while True:
2051
      f.write(text)
2052
      f.write("\n")
2053
      f.write("/".join(chars))
2054
      f.write(": ")
2055
      line = f.readline(2).strip().lower()
2056
      if line in maps:
2057
        answer = maps[line]
2058
        break
2059
      elif line == "?":
2060
        for entry in choices:
2061
          f.write(" %s - %s\n" % (entry[0], entry[2]))
2062
        f.write("\n")
2063
        continue
2064
  finally:
2065
    f.close()
2066
  return answer
2067

    
2068

    
2069
class JobSubmittedException(Exception):
2070
  """Job was submitted, client should exit.
2071

2072
  This exception has one argument, the ID of the job that was
2073
  submitted. The handler should print this ID.
2074

2075
  This is not an error, just a structured way to exit from clients.
2076

2077
  """
2078

    
2079

    
2080
def SendJob(ops, cl=None):
2081
  """Function to submit an opcode without waiting for the results.
2082

2083
  @type ops: list
2084
  @param ops: list of opcodes
2085
  @type cl: luxi.Client
2086
  @param cl: the luxi client to use for communicating with the master;
2087
             if None, a new client will be created
2088

2089
  """
2090
  if cl is None:
2091
    cl = GetClient()
2092

    
2093
  job_id = cl.SubmitJob(ops)
2094

    
2095
  return job_id
2096

    
2097

    
2098
def GenericPollJob(job_id, cbs, report_cbs):
2099
  """Generic job-polling function.
2100

2101
  @type job_id: number
2102
  @param job_id: Job ID
2103
  @type cbs: Instance of L{JobPollCbBase}
2104
  @param cbs: Data callbacks
2105
  @type report_cbs: Instance of L{JobPollReportCbBase}
2106
  @param report_cbs: Reporting callbacks
2107

2108
  """
2109
  prev_job_info = None
2110
  prev_logmsg_serial = None
2111

    
2112
  status = None
2113

    
2114
  while True:
2115
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2116
                                      prev_logmsg_serial)
2117
    if not result:
2118
      # job not found, go away!
2119
      raise errors.JobLost("Job with id %s lost" % job_id)
2120

    
2121
    if result == constants.JOB_NOTCHANGED:
2122
      report_cbs.ReportNotChanged(job_id, status)
2123

    
2124
      # Wait again
2125
      continue
2126

    
2127
    # Split result, a tuple of (field values, log entries)
2128
    (job_info, log_entries) = result
2129
    (status, ) = job_info
2130

    
2131
    if log_entries:
2132
      for log_entry in log_entries:
2133
        (serial, timestamp, log_type, message) = log_entry
2134
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2135
                                    log_type, message)
2136
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2137

    
2138
    # TODO: Handle canceled and archived jobs
2139
    elif status in (constants.JOB_STATUS_SUCCESS,
2140
                    constants.JOB_STATUS_ERROR,
2141
                    constants.JOB_STATUS_CANCELING,
2142
                    constants.JOB_STATUS_CANCELED):
2143
      break
2144

    
2145
    prev_job_info = job_info
2146

    
2147
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2148
  if not jobs:
2149
    raise errors.JobLost("Job with id %s lost" % job_id)
2150

    
2151
  status, opstatus, result = jobs[0]
2152

    
2153
  if status == constants.JOB_STATUS_SUCCESS:
2154
    return result
2155

    
2156
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2157
    raise errors.OpExecError("Job was canceled")
2158

    
2159
  has_ok = False
2160
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2161
    if status == constants.OP_STATUS_SUCCESS:
2162
      has_ok = True
2163
    elif status == constants.OP_STATUS_ERROR:
2164
      errors.MaybeRaise(msg)
2165

    
2166
      if has_ok:
2167
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2168
                                 (idx, msg))
2169

    
2170
      raise errors.OpExecError(str(msg))
2171

    
2172
  # default failure mode
2173
  raise errors.OpExecError(result)
2174

    
2175

    
2176
class JobPollCbBase:
2177
  """Base class for L{GenericPollJob} callbacks.
2178

2179
  """
2180
  def __init__(self):
2181
    """Initializes this class.
2182

2183
    """
2184

    
2185
  def WaitForJobChangeOnce(self, job_id, fields,
2186
                           prev_job_info, prev_log_serial):
2187
    """Waits for changes on a job.
2188

2189
    """
2190
    raise NotImplementedError()
2191

    
2192
  def QueryJobs(self, job_ids, fields):
2193
    """Returns the selected fields for the selected job IDs.
2194

2195
    @type job_ids: list of numbers
2196
    @param job_ids: Job IDs
2197
    @type fields: list of strings
2198
    @param fields: Fields
2199

2200
    """
2201
    raise NotImplementedError()
2202

    
2203

    
2204
class JobPollReportCbBase:
2205
  """Base class for L{GenericPollJob} reporting callbacks.
2206

2207
  """
2208
  def __init__(self):
2209
    """Initializes this class.
2210

2211
    """
2212

    
2213
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2214
    """Handles a log message.
2215

2216
    """
2217
    raise NotImplementedError()
2218

    
2219
  def ReportNotChanged(self, job_id, status):
2220
    """Called for if a job hasn't changed in a while.
2221

2222
    @type job_id: number
2223
    @param job_id: Job ID
2224
    @type status: string or None
2225
    @param status: Job status if available
2226

2227
    """
2228
    raise NotImplementedError()
2229

    
2230

    
2231
class _LuxiJobPollCb(JobPollCbBase):
2232
  def __init__(self, cl):
2233
    """Initializes this class.
2234

2235
    """
2236
    JobPollCbBase.__init__(self)
2237
    self.cl = cl
2238

    
2239
  def WaitForJobChangeOnce(self, job_id, fields,
2240
                           prev_job_info, prev_log_serial):
2241
    """Waits for changes on a job.
2242

2243
    """
2244
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2245
                                        prev_job_info, prev_log_serial)
2246

    
2247
  def QueryJobs(self, job_ids, fields):
2248
    """Returns the selected fields for the selected job IDs.
2249

2250
    """
2251
    return self.cl.QueryJobs(job_ids, fields)
2252

    
2253

    
2254
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2255
  def __init__(self, feedback_fn):
2256
    """Initializes this class.
2257

2258
    """
2259
    JobPollReportCbBase.__init__(self)
2260

    
2261
    self.feedback_fn = feedback_fn
2262

    
2263
    assert callable(feedback_fn)
2264

    
2265
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2266
    """Handles a log message.
2267

2268
    """
2269
    self.feedback_fn((timestamp, log_type, log_msg))
2270

    
2271
  def ReportNotChanged(self, job_id, status):
2272
    """Called if a job hasn't changed in a while.
2273

2274
    """
2275
    # Ignore
2276

    
2277

    
2278
class StdioJobPollReportCb(JobPollReportCbBase):
2279
  def __init__(self):
2280
    """Initializes this class.
2281

2282
    """
2283
    JobPollReportCbBase.__init__(self)
2284

    
2285
    self.notified_queued = False
2286
    self.notified_waitlock = False
2287

    
2288
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2289
    """Handles a log message.
2290

2291
    """
2292
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2293
             FormatLogMessage(log_type, log_msg))
2294

    
2295
  def ReportNotChanged(self, job_id, status):
2296
    """Called if a job hasn't changed in a while.
2297

2298
    """
2299
    if status is None:
2300
      return
2301

    
2302
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2303
      ToStderr("Job %s is waiting in queue", job_id)
2304
      self.notified_queued = True
2305

    
2306
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2307
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2308
      self.notified_waitlock = True
2309

    
2310

    
2311
def FormatLogMessage(log_type, log_msg):
2312
  """Formats a job message according to its type.
2313

2314
  """
2315
  if log_type != constants.ELOG_MESSAGE:
2316
    log_msg = str(log_msg)
2317

    
2318
  return utils.SafeEncode(log_msg)
2319

    
2320

    
2321
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2322
  """Function to poll for the result of a job.
2323

2324
  @type job_id: job identified
2325
  @param job_id: the job to poll for results
2326
  @type cl: luxi.Client
2327
  @param cl: the luxi client to use for communicating with the master;
2328
             if None, a new client will be created
2329

2330
  """
2331
  if cl is None:
2332
    cl = GetClient()
2333

    
2334
  if reporter is None:
2335
    if feedback_fn:
2336
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2337
    else:
2338
      reporter = StdioJobPollReportCb()
2339
  elif feedback_fn:
2340
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2341

    
2342
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2343

    
2344

    
2345
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2346
  """Legacy function to submit an opcode.
2347

2348
  This is just a simple wrapper over the construction of the processor
2349
  instance. It should be extended to better handle feedback and
2350
  interaction functions.
2351

2352
  """
2353
  if cl is None:
2354
    cl = GetClient()
2355

    
2356
  SetGenericOpcodeOpts([op], opts)
2357

    
2358
  job_id = SendJob([op], cl=cl)
2359
  if hasattr(opts, "print_jobid") and opts.print_jobid:
2360
    ToStdout("%d" % job_id)
2361

    
2362
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2363
                       reporter=reporter)
2364

    
2365
  return op_results[0]
2366

    
2367

    
2368
def SubmitOpCodeToDrainedQueue(op):
2369
  """Forcefully insert a job in the queue, even if it is drained.
2370

2371
  """
2372
  cl = GetClient()
2373
  job_id = cl.SubmitJobToDrainedQueue([op])
2374
  op_results = PollJob(job_id, cl=cl)
2375
  return op_results[0]
2376

    
2377

    
2378
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2379
  """Wrapper around SubmitOpCode or SendJob.
2380

2381
  This function will decide, based on the 'opts' parameter, whether to
2382
  submit and wait for the result of the opcode (and return it), or
2383
  whether to just send the job and print its identifier. It is used in
2384
  order to simplify the implementation of the '--submit' option.
2385

2386
  It will also process the opcodes if we're sending the via SendJob
2387
  (otherwise SubmitOpCode does it).
2388

2389
  """
2390
  if opts and opts.submit_only:
2391
    job = [op]
2392
    SetGenericOpcodeOpts(job, opts)
2393
    job_id = SendJob(job, cl=cl)
2394
    if opts.print_jobid:
2395
      ToStdout("%d" % job_id)
2396
    raise JobSubmittedException(job_id)
2397
  else:
2398
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2399

    
2400

    
2401
def _InitReasonTrail(op, opts):
2402
  """Builds the first part of the reason trail
2403

2404
  Builds the initial part of the reason trail, adding the user provided reason
2405
  (if it exists) and the name of the command starting the operation.
2406

2407
  @param op: the opcode the reason trail will be added to
2408
  @param opts: the command line options selected by the user
2409

2410
  """
2411
  assert len(sys.argv) >= 2
2412
  trail = []
2413

    
2414
  if opts.reason:
2415
    trail.append((constants.OPCODE_REASON_SRC_USER,
2416
                  opts.reason,
2417
                  utils.EpochNano()))
2418

    
2419
  binary = os.path.basename(sys.argv[0])
2420
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2421
  command = sys.argv[1]
2422
  trail.append((source, command, utils.EpochNano()))
2423
  op.reason = trail
2424

    
2425

    
2426
def SetGenericOpcodeOpts(opcode_list, options):
2427
  """Processor for generic options.
2428

2429
  This function updates the given opcodes based on generic command
2430
  line options (like debug, dry-run, etc.).
2431

2432
  @param opcode_list: list of opcodes
2433
  @param options: command line options or None
2434
  @return: None (in-place modification)
2435

2436
  """
2437
  if not options:
2438
    return
2439
  for op in opcode_list:
2440
    op.debug_level = options.debug
2441
    if hasattr(options, "dry_run"):
2442
      op.dry_run = options.dry_run
2443
    if getattr(options, "priority", None) is not None:
2444
      op.priority = options.priority
2445
    _InitReasonTrail(op, options)
2446

    
2447

    
2448
def FormatError(err):
2449
  """Return a formatted error message for a given error.
2450

2451
  This function takes an exception instance and returns a tuple
2452
  consisting of two values: first, the recommended exit code, and
2453
  second, a string describing the error message (not
2454
  newline-terminated).
2455

2456
  """
2457
  retcode = 1
2458
  obuf = StringIO()
2459
  msg = str(err)
2460
  if isinstance(err, errors.ConfigurationError):
2461
    txt = "Corrupt configuration file: %s" % msg
2462
    logging.error(txt)
2463
    obuf.write(txt + "\n")
2464
    obuf.write("Aborting.")
2465
    retcode = 2
2466
  elif isinstance(err, errors.HooksAbort):
2467
    obuf.write("Failure: hooks execution failed:\n")
2468
    for node, script, out in err.args[0]:
2469
      if out:
2470
        obuf.write("  node: %s, script: %s, output: %s\n" %
2471
                   (node, script, out))
2472
      else:
2473
        obuf.write("  node: %s, script: %s (no output)\n" %
2474
                   (node, script))
2475
  elif isinstance(err, errors.HooksFailure):
2476
    obuf.write("Failure: hooks general failure: %s" % msg)
2477
  elif isinstance(err, errors.ResolverError):
2478
    this_host = netutils.Hostname.GetSysName()
2479
    if err.args[0] == this_host:
2480
      msg = "Failure: can't resolve my own hostname ('%s')"
2481
    else:
2482
      msg = "Failure: can't resolve hostname '%s'"
2483
    obuf.write(msg % err.args[0])
2484
  elif isinstance(err, errors.OpPrereqError):
2485
    if len(err.args) == 2:
2486
      obuf.write("Failure: prerequisites not met for this"
2487
                 " operation:\nerror type: %s, error details:\n%s" %
2488
                 (err.args[1], err.args[0]))
2489
    else:
2490
      obuf.write("Failure: prerequisites not met for this"
2491
                 " operation:\n%s" % msg)
2492
  elif isinstance(err, errors.OpExecError):
2493
    obuf.write("Failure: command execution error:\n%s" % msg)
2494
  elif isinstance(err, errors.TagError):
2495
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2496
  elif isinstance(err, errors.JobQueueDrainError):
2497
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2498
               " accept new requests\n")
2499
  elif isinstance(err, errors.JobQueueFull):
2500
    obuf.write("Failure: the job queue is full and doesn't accept new"
2501
               " job submissions until old jobs are archived\n")
2502
  elif isinstance(err, errors.TypeEnforcementError):
2503
    obuf.write("Parameter Error: %s" % msg)
2504
  elif isinstance(err, errors.ParameterError):
2505
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2506
  elif isinstance(err, rpcerr.NoMasterError):
2507
    if err.args[0] == pathutils.MASTER_SOCKET:
2508
      daemon = "the master daemon"
2509
    elif err.args[0] == pathutils.QUERY_SOCKET:
2510
      daemon = "the config daemon"
2511
    else:
2512
      daemon = "socket '%s'" % str(err.args[0])
2513
    obuf.write("Cannot communicate with %s.\nIs the process running"
2514
               " and listening for connections?" % daemon)
2515
  elif isinstance(err, rpcerr.TimeoutError):
2516
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2517
               " been submitted and will continue to run even if the call"
2518
               " timed out. Useful commands in this situation are \"gnt-job"
2519
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2520
    obuf.write(msg)
2521
  elif isinstance(err, rpcerr.PermissionError):
2522
    obuf.write("It seems you don't have permissions to connect to the"
2523
               " master daemon.\nPlease retry as a different user.")
2524
  elif isinstance(err, rpcerr.ProtocolError):
2525
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2526
               "%s" % msg)
2527
  elif isinstance(err, errors.JobLost):
2528
    obuf.write("Error checking job status: %s" % msg)
2529
  elif isinstance(err, errors.QueryFilterParseError):
2530
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2531
    obuf.write("\n".join(err.GetDetails()))
2532
  elif isinstance(err, errors.GenericError):
2533
    obuf.write("Unhandled Ganeti error: %s" % msg)
2534
  elif isinstance(err, JobSubmittedException):
2535
    obuf.write("JobID: %s\n" % err.args[0])
2536
    retcode = 0
2537
  else:
2538
    obuf.write("Unhandled exception: %s" % msg)
2539
  return retcode, obuf.getvalue().rstrip("\n")
2540

    
2541

    
2542
def GenericMain(commands, override=None, aliases=None,
2543
                env_override=frozenset()):
2544
  """Generic main function for all the gnt-* commands.
2545

2546
  @param commands: a dictionary with a special structure, see the design doc
2547
                   for command line handling.
2548
  @param override: if not None, we expect a dictionary with keys that will
2549
                   override command line options; this can be used to pass
2550
                   options from the scripts to generic functions
2551
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2552
  @param env_override: list of environment names which are allowed to submit
2553
                       default args for commands
2554

2555
  """
2556
  # save the program name and the entire command line for later logging
2557
  if sys.argv:
2558
    binary = os.path.basename(sys.argv[0])
2559
    if not binary:
2560
      binary = sys.argv[0]
2561

    
2562
    if len(sys.argv) >= 2:
2563
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2564
    else:
2565
      logname = binary
2566

    
2567
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2568
  else:
2569
    binary = "<unknown program>"
2570
    cmdline = "<unknown>"
2571

    
2572
  if aliases is None:
2573
    aliases = {}
2574

    
2575
  try:
2576
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2577
                                       env_override)
2578
  except _ShowVersion:
2579
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2580
             constants.RELEASE_VERSION)
2581
    return constants.EXIT_SUCCESS
2582
  except _ShowUsage, err:
2583
    for line in _FormatUsage(binary, commands):
2584
      ToStdout(line)
2585

    
2586
    if err.exit_error:
2587
      return constants.EXIT_FAILURE
2588
    else:
2589
      return constants.EXIT_SUCCESS
2590
  except errors.ParameterError, err:
2591
    result, err_msg = FormatError(err)
2592
    ToStderr(err_msg)
2593
    return 1
2594

    
2595
  if func is None: # parse error
2596
    return 1
2597

    
2598
  if override is not None:
2599
    for key, val in override.iteritems():
2600
      setattr(options, key, val)
2601

    
2602
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2603
                     stderr_logging=True)
2604

    
2605
  logging.debug("Command line: %s", cmdline)
2606

    
2607
  try:
2608
    result = func(options, args)
2609
  except (errors.GenericError, rpcerr.ProtocolError,
2610
          JobSubmittedException), err:
2611
    result, err_msg = FormatError(err)
2612
    logging.exception("Error during command processing")
2613
    ToStderr(err_msg)
2614
  except KeyboardInterrupt:
2615
    result = constants.EXIT_FAILURE
2616
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2617
             " might have been submitted and"
2618
             " will continue to run in the background.")
2619
  except IOError, err:
2620
    if err.errno == errno.EPIPE:
2621
      # our terminal went away, we'll exit
2622
      sys.exit(constants.EXIT_FAILURE)
2623
    else:
2624
      raise
2625

    
2626
  return result
2627

    
2628

    
2629
def ParseNicOption(optvalue):
2630
  """Parses the value of the --net option(s).
2631

2632
  """
2633
  try:
2634
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2635
  except (TypeError, ValueError), err:
2636
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2637
                               errors.ECODE_INVAL)
2638

    
2639
  nics = [{}] * nic_max
2640
  for nidx, ndict in optvalue:
2641
    nidx = int(nidx)
2642

    
2643
    if not isinstance(ndict, dict):
2644
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2645
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2646

    
2647
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2648

    
2649
    nics[nidx] = ndict
2650

    
2651
  return nics
2652

    
2653

    
2654
def FixHvParams(hvparams):
2655
  # In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from
2656
  # comma to space because commas cannot be accepted on the command line
2657
  # (they already act as the separator between different hvparams). Still,
2658
  # RAPI should be able to accept commas for backwards compatibility.
2659
  # Therefore, we convert spaces into commas here, and we keep the old
2660
  # parsing logic everywhere else.
2661
  try:
2662
    new_usb_devices = hvparams[constants.HV_USB_DEVICES].replace(" ", ",")
2663
    hvparams[constants.HV_USB_DEVICES] = new_usb_devices
2664
  except KeyError:
2665
    #No usb_devices, no modification required
2666
    pass
2667

    
2668

    
2669
def GenericInstanceCreate(mode, opts, args):
2670
  """Add an instance to the cluster via either creation or import.
2671

2672
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2673
  @param opts: the command line options selected by the user
2674
  @type args: list
2675
  @param args: should contain only one element, the new instance name
2676
  @rtype: int
2677
  @return: the desired exit code
2678

2679
  """
2680
  instance = args[0]
2681

    
2682
  (pnode, snode) = SplitNodeOption(opts.node)
2683

    
2684
  hypervisor = None
2685
  hvparams = {}
2686
  if opts.hypervisor:
2687
    hypervisor, hvparams = opts.hypervisor
2688

    
2689
  if opts.nics:
2690
    nics = ParseNicOption(opts.nics)
2691
  elif opts.no_nics:
2692
    # no nics
2693
    nics = []
2694
  elif mode == constants.INSTANCE_CREATE:
2695
    # default of one nic, all auto
2696
    nics = [{}]
2697
  else:
2698
    # mode == import
2699
    nics = []
2700

    
2701
  if opts.disk_template == constants.DT_DISKLESS:
2702
    if opts.disks or opts.sd_size is not None:
2703
      raise errors.OpPrereqError("Diskless instance but disk"
2704
                                 " information passed", errors.ECODE_INVAL)
2705
    disks = []
2706
  else:
2707
    if (not opts.disks and not opts.sd_size
2708
        and mode == constants.INSTANCE_CREATE):
2709
      raise errors.OpPrereqError("No disk information specified",
2710
                                 errors.ECODE_INVAL)
2711
    if opts.disks and opts.sd_size is not None:
2712
      raise errors.OpPrereqError("Please use either the '--disk' or"
2713
                                 " '-s' option", errors.ECODE_INVAL)
2714
    if opts.sd_size is not None:
2715
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2716

    
2717
    if opts.disks:
2718
      try:
2719
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2720
      except ValueError, err:
2721
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2722
                                   errors.ECODE_INVAL)
2723
      disks = [{}] * disk_max
2724
    else:
2725
      disks = []
2726
    for didx, ddict in opts.disks:
2727
      didx = int(didx)
2728
      if not isinstance(ddict, dict):
2729
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2730
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2731
      elif constants.IDISK_SIZE in ddict:
2732
        if constants.IDISK_ADOPT in ddict:
2733
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2734
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2735
        try:
2736
          ddict[constants.IDISK_SIZE] = \
2737
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2738
        except ValueError, err:
2739
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2740
                                     (didx, err), errors.ECODE_INVAL)
2741
      elif constants.IDISK_ADOPT in ddict:
2742
        if constants.IDISK_SPINDLES in ddict:
2743
          raise errors.OpPrereqError("spindles is not a valid option when"
2744
                                     " adopting a disk", errors.ECODE_INVAL)
2745
        if mode == constants.INSTANCE_IMPORT:
2746
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2747
                                     " import", errors.ECODE_INVAL)
2748
        ddict[constants.IDISK_SIZE] = 0
2749
      else:
2750
        raise errors.OpPrereqError("Missing size or adoption source for"
2751
                                   " disk %d" % didx, errors.ECODE_INVAL)
2752
      if constants.IDISK_SPINDLES in ddict:
2753
        ddict[constants.IDISK_SPINDLES] = int(ddict[constants.IDISK_SPINDLES])
2754

    
2755
      disks[didx] = ddict
2756

    
2757
  if opts.tags is not None:
2758
    tags = opts.tags.split(",")
2759
  else:
2760
    tags = []
2761

    
2762
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2763
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2764
  FixHvParams(hvparams)
2765

    
2766
  osparams_private = opts.osparams_private or serializer.PrivateDict()
2767
  osparams_secret = opts.osparams_secret or serializer.PrivateDict()
2768

    
2769
  if mode == constants.INSTANCE_CREATE:
2770
    start = opts.start
2771
    os_type = opts.os
2772
    force_variant = opts.force_variant
2773
    src_node = None
2774
    src_path = None
2775
    no_install = opts.no_install
2776
    identify_defaults = False
2777
    compress = constants.IEC_NONE
2778
    if opts.instance_communication is None:
2779
      instance_communication = False
2780
    else:
2781
      instance_communication = opts.instance_communication
2782
  elif mode == constants.INSTANCE_IMPORT:
2783
    start = False
2784
    os_type = None
2785
    force_variant = False
2786
    src_node = opts.src_node
2787
    src_path = opts.src_dir
2788
    no_install = None
2789
    identify_defaults = opts.identify_defaults
2790
    compress = opts.compress
2791
    instance_communication = False
2792
  else:
2793
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2794

    
2795
  op = opcodes.OpInstanceCreate(instance_name=instance,
2796
                                disks=disks,
2797
                                disk_template=opts.disk_template,
2798
                                nics=nics,
2799
                                conflicts_check=opts.conflicts_check,
2800
                                pnode=pnode, snode=snode,
2801
                                ip_check=opts.ip_check,
2802
                                name_check=opts.name_check,
2803
                                wait_for_sync=opts.wait_for_sync,
2804
                                file_storage_dir=opts.file_storage_dir,
2805
                                file_driver=opts.file_driver,
2806
                                iallocator=opts.iallocator,
2807
                                hypervisor=hypervisor,
2808
                                hvparams=hvparams,
2809
                                beparams=opts.beparams,
2810
                                osparams=opts.osparams,
2811
                                osparams_private=osparams_private,
2812
                                osparams_secret=osparams_secret,
2813
                                mode=mode,
2814
                                start=start,
2815
                                os_type=os_type,
2816
                                force_variant=force_variant,
2817
                                src_node=src_node,
2818
                                src_path=src_path,
2819
                                compress=compress,
2820
                                tags=tags,
2821
                                no_install=no_install,
2822
                                identify_defaults=identify_defaults,
2823
                                ignore_ipolicy=opts.ignore_ipolicy,
2824
                                instance_communication=instance_communication)
2825

    
2826
  SubmitOrSend(op, opts)
2827
  return 0
2828

    
2829

    
2830
class _RunWhileClusterStoppedHelper:
2831
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2832

2833
  """
2834
  def __init__(self, feedback_fn, cluster_name, master_node,
2835
               online_nodes, ssh_ports):
2836
    """Initializes this class.
2837

2838
    @type feedback_fn: callable
2839
    @param feedback_fn: Feedback function
2840
    @type cluster_name: string
2841
    @param cluster_name: Cluster name
2842
    @type master_node: string
2843
    @param master_node Master node name
2844
    @type online_nodes: list
2845
    @param online_nodes: List of names of online nodes
2846
    @type ssh_ports: list
2847
    @param ssh_ports: List of SSH ports of online nodes
2848

2849
    """
2850
    self.feedback_fn = feedback_fn
2851
    self.cluster_name = cluster_name
2852
    self.master_node = master_node
2853
    self.online_nodes = online_nodes
2854
    self.ssh_ports = dict(zip(online_nodes, ssh_ports))
2855

    
2856
    self.ssh = ssh.SshRunner(self.cluster_name)
2857

    
2858
    self.nonmaster_nodes = [name for name in online_nodes
2859
                            if name != master_node]
2860

    
2861
    assert self.master_node not in self.nonmaster_nodes
2862

    
2863
  def _RunCmd(self, node_name, cmd):
2864
    """Runs a command on the local or a remote machine.
2865

2866
    @type node_name: string
2867
    @param node_name: Machine name
2868
    @type cmd: list
2869
    @param cmd: Command
2870

2871
    """
2872
    if node_name is None or node_name == self.master_node:
2873
      # No need to use SSH
2874
      result = utils.RunCmd(cmd)
2875
    else:
2876
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2877
                            utils.ShellQuoteArgs(cmd),
2878
                            port=self.ssh_ports[node_name])
2879

    
2880
    if result.failed:
2881
      errmsg = ["Failed to run command %s" % result.cmd]
2882
      if node_name:
2883
        errmsg.append("on node %s" % node_name)
2884
      errmsg.append(": exitcode %s and error %s" %
2885
                    (result.exit_code, result.output))
2886
      raise errors.OpExecError(" ".join(errmsg))
2887

    
2888
  def Call(self, fn, *args):
2889
    """Call function while all daemons are stopped.
2890

2891
    @type fn: callable
2892
    @param fn: Function to be called
2893

2894
    """
2895
    # Pause watcher by acquiring an exclusive lock on watcher state file
2896
    self.feedback_fn("Blocking watcher")
2897
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2898
    try:
2899
      # TODO: Currently, this just blocks. There's no timeout.
2900
      # TODO: Should it be a shared lock?
2901
      watcher_block.Exclusive(blocking=True)
2902

    
2903
      # Stop master daemons, so that no new jobs can come in and all running
2904
      # ones are finished
2905
      self.feedback_fn("Stopping master daemons")
2906
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2907
      try:
2908
        # Stop daemons on all nodes
2909
        for node_name in self.online_nodes:
2910
          self.feedback_fn("Stopping daemons on %s" % node_name)
2911
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2912

    
2913
        # All daemons are shut down now
2914
        try:
2915
          return fn(self, *args)
2916
        except Exception, err:
2917
          _, errmsg = FormatError(err)
2918
          logging.exception("Caught exception")
2919
          self.feedback_fn(errmsg)
2920
          raise
2921
      finally:
2922
        # Start cluster again, master node last
2923
        for node_name in self.nonmaster_nodes + [self.master_node]:
2924
          self.feedback_fn("Starting daemons on %s" % node_name)
2925
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2926
    finally:
2927
      # Resume watcher
2928
      watcher_block.Close()
2929

    
2930

    
2931
def RunWhileClusterStopped(feedback_fn, fn, *args):
2932
  """Calls a function while all cluster daemons are stopped.
2933

2934
  @type feedback_fn: callable
2935
  @param feedback_fn: Feedback function
2936
  @type fn: callable
2937
  @param fn: Function to be called when daemons are stopped
2938

2939
  """
2940
  feedback_fn("Gathering cluster information")
2941

    
2942
  # This ensures we're running on the master daemon
2943
  cl = GetClient()
2944

    
2945
  (cluster_name, master_node) = \
2946
    cl.QueryConfigValues(["cluster_name", "master_node"])
2947

    
2948
  online_nodes = GetOnlineNodes([], cl=cl)
2949
  ssh_ports = GetNodesSshPorts(online_nodes, cl)
2950

    
2951
  # Don't keep a reference to the client. The master daemon will go away.
2952
  del cl
2953

    
2954
  assert master_node in online_nodes
2955

    
2956
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2957
                                       online_nodes, ssh_ports).Call(fn, *args)
2958

    
2959

    
2960
def GenerateTable(headers, fields, separator, data,
2961
                  numfields=None, unitfields=None,
2962
                  units=None):
2963
  """Prints a table with headers and different fields.
2964

2965
  @type headers: dict
2966
  @param headers: dictionary mapping field names to headers for
2967
      the table
2968
  @type fields: list
2969
  @param fields: the field names corresponding to each row in
2970
      the data field
2971
  @param separator: the separator to be used; if this is None,
2972
      the default 'smart' algorithm is used which computes optimal
2973
      field width, otherwise just the separator is used between
2974
      each field
2975
  @type data: list
2976
  @param data: a list of lists, each sublist being one row to be output
2977
  @type numfields: list
2978
  @param numfields: a list with the fields that hold numeric
2979
      values and thus should be right-aligned
2980
  @type unitfields: list
2981
  @param unitfields: a list with the fields that hold numeric
2982
      values that should be formatted with the units field
2983
  @type units: string or None
2984
  @param units: the units we should use for formatting, or None for
2985
      automatic choice (human-readable for non-separator usage, otherwise
2986
      megabytes); this is a one-letter string
2987

2988
  """
2989
  if units is None:
2990
    if separator:
2991
      units = "m"
2992
    else:
2993
      units = "h"
2994

    
2995
  if numfields is None:
2996
    numfields = []
2997
  if unitfields is None:
2998
    unitfields = []
2999

    
3000
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
3001
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
3002

    
3003
  format_fields = []
3004
  for field in fields:
3005
    if headers and field not in headers:
3006
      # TODO: handle better unknown fields (either revert to old
3007
      # style of raising exception, or deal more intelligently with
3008
      # variable fields)
3009
      headers[field] = field
3010
    if separator is not None:
3011
      format_fields.append("%s")
3012
    elif numfields.Matches(field):
3013
      format_fields.append("%*s")
3014
    else:
3015
      format_fields.append("%-*s")
3016

    
3017
  if separator is None:
3018
    mlens = [0 for name in fields]
3019
    format_str = " ".join(format_fields)
3020
  else:
3021
    format_str = separator.replace("%", "%%").join(format_fields)
3022

    
3023
  for row in data:
3024
    if row is None:
3025
      continue
3026
    for idx, val in enumerate(row):
3027
      if unitfields.Matches(fields[idx]):
3028
        try:
3029
          val = int(val)
3030
        except (TypeError, ValueError):
3031
          pass
3032
        else:
3033
          val = row[idx] = utils.FormatUnit(val, units)
3034
      val = row[idx] = str(val)
3035
      if separator is None:
3036
        mlens[idx] = max(mlens[idx], len(val))
3037

    
3038
  result = []
3039
  if headers:
3040
    args = []
3041
    for idx, name in enumerate(fields):
3042
      hdr = headers[name]
3043
      if separator is None:
3044
        mlens[idx] = max(mlens[idx], len(hdr))
3045
        args.append(mlens[idx])
3046
      args.append(hdr)
3047
    result.append(format_str % tuple(args))
3048

    
3049
  if separator is None:
3050
    assert len(mlens) == len(fields)
3051

    
3052
    if fields and not numfields.Matches(fields[-1]):
3053
      mlens[-1] = 0
3054

    
3055
  for line in data:
3056
    args = []
3057
    if line is None:
3058
      line = ["-" for _ in fields]
3059
    for idx in range(len(fields)):
3060
      if separator is None:
3061
        args.append(mlens[idx])
3062
      args.append(line[idx])
3063
    result.append(format_str % tuple(args))
3064

    
3065
  return result
3066

    
3067

    
3068
def _FormatBool(value):
3069
  """Formats a boolean value as a string.
3070

3071
  """
3072
  if value:
3073
    return "Y"
3074
  return "N"
3075

    
3076

    
3077
#: Default formatting for query results; (callback, align right)
3078
_DEFAULT_FORMAT_QUERY = {
3079
  constants.QFT_TEXT: (str, False),
3080
  constants.QFT_BOOL: (_FormatBool, False),
3081
  constants.QFT_NUMBER: (str, True),
3082
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
3083
  constants.QFT_OTHER: (str, False),
3084
  constants.QFT_UNKNOWN: (str, False),
3085
  }
3086

    
3087

    
3088
def _GetColumnFormatter(fdef, override, unit):
3089
  """Returns formatting function for a field.
3090

3091
  @type fdef: L{objects.QueryFieldDefinition}
3092
  @type override: dict
3093
  @param override: Dictionary for overriding field formatting functions,
3094
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3095
  @type unit: string
3096
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
3097
  @rtype: tuple; (callable, bool)
3098
  @return: Returns the function to format a value (takes one parameter) and a
3099
    boolean for aligning the value on the right-hand side
3100

3101
  """
3102
  fmt = override.get(fdef.name, None)
3103
  if fmt is not None:
3104
    return fmt
3105

    
3106
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3107

    
3108
  if fdef.kind == constants.QFT_UNIT:
3109
    # Can't keep this information in the static dictionary
3110
    return (lambda value: utils.FormatUnit(value, unit), True)
3111

    
3112
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3113
  if fmt is not None:
3114
    return fmt
3115

    
3116
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3117

    
3118

    
3119
class _QueryColumnFormatter:
3120
  """Callable class for formatting fields of a query.
3121

3122
  """
3123
  def __init__(self, fn, status_fn, verbose):
3124
    """Initializes this class.
3125

3126
    @type fn: callable
3127
    @param fn: Formatting function
3128
    @type status_fn: callable
3129
    @param status_fn: Function to report fields' status
3130
    @type verbose: boolean
3131
    @param verbose: whether to use verbose field descriptions or not
3132

3133
    """
3134
    self._fn = fn
3135
    self._status_fn = status_fn
3136
    self._verbose = verbose
3137

    
3138
  def __call__(self, data):
3139
    """Returns a field's string representation.
3140

3141
    """
3142
    (status, value) = data
3143

    
3144
    # Report status
3145
    self._status_fn(status)
3146

    
3147
    if status == constants.RS_NORMAL:
3148
      return self._fn(value)
3149

    
3150
    assert value is None, \
3151
           "Found value %r for abnormal status %s" % (value, status)
3152

    
3153
    return FormatResultError(status, self._verbose)
3154

    
3155

    
3156
def FormatResultError(status, verbose):
3157
  """Formats result status other than L{constants.RS_NORMAL}.
3158

3159
  @param status: The result status
3160
  @type verbose: boolean
3161
  @param verbose: Whether to return the verbose text
3162
  @return: Text of result status
3163

3164
  """
3165
  assert status != constants.RS_NORMAL, \
3166
         "FormatResultError called with status equal to constants.RS_NORMAL"
3167
  try:
3168
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3169
  except KeyError:
3170
    raise NotImplementedError("Unknown status %s" % status)
3171
  else:
3172
    if verbose:
3173
      return verbose_text
3174
    return normal_text
3175

    
3176

    
3177
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3178
                      header=False, verbose=False):
3179
  """Formats data in L{objects.QueryResponse}.
3180

3181
  @type result: L{objects.QueryResponse}
3182
  @param result: result of query operation
3183
  @type unit: string
3184
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3185
    see L{utils.text.FormatUnit}
3186
  @type format_override: dict
3187
  @param format_override: Dictionary for overriding field formatting functions,
3188
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3189
  @type separator: string or None
3190
  @param separator: String used to separate fields
3191
  @type header: bool
3192
  @param header: Whether to output header row
3193
  @type verbose: boolean
3194
  @param verbose: whether to use verbose field descriptions or not
3195

3196
  """
3197
  if unit is None:
3198
    if separator:
3199
      unit = "m"
3200
    else:
3201
      unit = "h"
3202

    
3203
  if format_override is None:
3204
    format_override = {}
3205

    
3206
  stats = dict.fromkeys(constants.RS_ALL, 0)
3207

    
3208
  def _RecordStatus(status):
3209
    if status in stats:
3210
      stats[status] += 1
3211

    
3212
  columns = []
3213
  for fdef in result.fields:
3214
    assert fdef.title and fdef.name
3215
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3216
    columns.append(TableColumn(fdef.title,
3217
                               _QueryColumnFormatter(fn, _RecordStatus,
3218
                                                     verbose),
3219
                               align_right))
3220

    
3221
  table = FormatTable(result.data, columns, header, separator)
3222

    
3223
  # Collect statistics
3224
  assert len(stats) == len(constants.RS_ALL)
3225
  assert compat.all(count >= 0 for count in stats.values())
3226

    
3227
  # Determine overall status. If there was no data, unknown fields must be
3228
  # detected via the field definitions.
3229
  if (stats[constants.RS_UNKNOWN] or
3230
      (not result.data and _GetUnknownFields(result.fields))):
3231
    status = QR_UNKNOWN
3232
  elif compat.any(count > 0 for key, count in stats.items()
3233
                  if key != constants.RS_NORMAL):
3234
    status = QR_INCOMPLETE
3235
  else:
3236
    status = QR_NORMAL
3237

    
3238
  return (status, table)
3239

    
3240

    
3241
def _GetUnknownFields(fdefs):
3242
  """Returns list of unknown fields included in C{fdefs}.
3243

3244
  @type fdefs: list of L{objects.QueryFieldDefinition}
3245

3246
  """
3247
  return [fdef for fdef in fdefs
3248
          if fdef.kind == constants.QFT_UNKNOWN]
3249

    
3250

    
3251
def _WarnUnknownFields(fdefs):
3252
  """Prints a warning to stderr if a query included unknown fields.
3253

3254
  @type fdefs: list of L{objects.QueryFieldDefinition}
3255

3256
  """
3257
  unknown = _GetUnknownFields(fdefs)
3258
  if unknown:
3259
    ToStderr("Warning: Queried for unknown fields %s",
3260
             utils.CommaJoin(fdef.name for fdef in unknown))
3261
    return True
3262

    
3263
  return False
3264

    
3265

    
3266
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3267
                format_override=None, verbose=False, force_filter=False,
3268
                namefield=None, qfilter=None, isnumeric=False):
3269
  """Generic implementation for listing all items of a resource.
3270

3271
  @param resource: One of L{constants.QR_VIA_LUXI}
3272
  @type fields: list of strings
3273
  @param fields: List of fields to query for
3274
  @type names: list of strings
3275
  @param names: Names of items to query for
3276
  @type unit: string or None
3277
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3278
    None for automatic choice (human-readable for non-separator usage,
3279
    otherwise megabytes); this is a one-letter string
3280
  @type separator: string or None
3281
  @param separator: String used to separate fields
3282
  @type header: bool
3283
  @param header: Whether to show header row
3284
  @type force_filter: bool
3285
  @param force_filter: Whether to always treat names as filter
3286
  @type format_override: dict
3287
  @param format_override: Dictionary for overriding field formatting functions,
3288
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3289
  @type verbose: boolean
3290
  @param verbose: whether to use verbose field descriptions or not
3291
  @type namefield: string
3292
  @param namefield: Name of field to use for simple filters (see
3293
    L{qlang.MakeFilter} for details)
3294
  @type qfilter: list or None
3295
  @param qfilter: Query filter (in addition to names)
3296
  @param isnumeric: bool
3297
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3298
    any simple filters built by namefield should use integer values to
3299
    reflect that
3300

3301
  """
3302
  if not names:
3303
    names = None
3304

    
3305
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3306
                                isnumeric=isnumeric)
3307

    
3308
  if qfilter is None:
3309
    qfilter = namefilter
3310
  elif namefilter is not None:
3311
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3312

    
3313
  if cl is None:
3314
    cl = GetClient()
3315

    
3316
  response = cl.Query(resource, fields, qfilter)
3317

    
3318
  found_unknown = _WarnUnknownFields(response.fields)
3319

    
3320
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3321
                                     header=header,
3322
                                     format_override=format_override,
3323
                                     verbose=verbose)
3324

    
3325
  for line in data:
3326
    ToStdout(line)
3327

    
3328
  assert ((found_unknown and status == QR_UNKNOWN) or
3329
          (not found_unknown and status != QR_UNKNOWN))
3330

    
3331
  if status == QR_UNKNOWN:
3332
    return constants.EXIT_UNKNOWN_FIELD
3333

    
3334
  # TODO: Should the list command fail if not all data could be collected?
3335
  return constants.EXIT_SUCCESS
3336

    
3337

    
3338
def _FieldDescValues(fdef):
3339
  """Helper function for L{GenericListFields} to get query field description.
3340

3341
  @type fdef: L{objects.QueryFieldDefinition}
3342
  @rtype: list
3343

3344
  """
3345
  return [
3346
    fdef.name,
3347
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3348
    fdef.title,
3349
    fdef.doc,
3350
    ]
3351

    
3352

    
3353
def GenericListFields(resource, fields, separator, header, cl=None):
3354
  """Generic implementation for listing fields for a resource.
3355

3356
  @param resource: One of L{constants.QR_VIA_LUXI}
3357
  @type fields: list of strings
3358
  @param fields: List of fields to query for
3359
  @type separator: string or None
3360
  @param separator: String used to separate fields
3361
  @type header: bool
3362
  @param header: Whether to show header row
3363

3364
  """
3365
  if cl is None:
3366
    cl = GetClient()
3367

    
3368
  if not fields:
3369
    fields = None
3370

    
3371
  response = cl.QueryFields(resource, fields)
3372

    
3373
  found_unknown = _WarnUnknownFields(response.fields)
3374

    
3375
  columns = [
3376
    TableColumn("Name", str, False),
3377
    TableColumn("Type", str, False),
3378
    TableColumn("Title", str, False),
3379
    TableColumn("Description", str, False),
3380
    ]
3381

    
3382
  rows = map(_FieldDescValues, response.fields)
3383

    
3384
  for line in FormatTable(rows, columns, header, separator):
3385
    ToStdout(line)
3386

    
3387
  if found_unknown:
3388
    return constants.EXIT_UNKNOWN_FIELD
3389

    
3390
  return constants.EXIT_SUCCESS
3391

    
3392

    
3393
class TableColumn:
3394
  """Describes a column for L{FormatTable}.
3395

3396
  """
3397
  def __init__(self, title, fn, align_right):
3398
    """Initializes this class.
3399

3400
    @type title: string
3401
    @param title: Column title
3402
    @type fn: callable
3403
    @param fn: Formatting function
3404
    @type align_right: bool
3405
    @param align_right: Whether to align values on the right-hand side
3406

3407
    """
3408
    self.title = title
3409
    self.format = fn
3410
    self.align_right = align_right
3411

    
3412

    
3413
def _GetColFormatString(width, align_right):
3414
  """Returns the format string for a field.
3415

3416
  """
3417
  if align_right:
3418
    sign = ""
3419
  else:
3420
    sign = "-"
3421

    
3422
  return "%%%s%ss" % (sign, width)
3423

    
3424

    
3425
def FormatTable(rows, columns, header, separator):
3426
  """Formats data as a table.
3427

3428
  @type rows: list of lists
3429
  @param rows: Row data, one list per row
3430
  @type columns: list of L{TableColumn}
3431
  @param columns: Column descriptions
3432
  @type header: bool
3433
  @param header: Whether to show header row
3434
  @type separator: string or None
3435
  @param separator: String used to separate columns
3436

3437
  """
3438
  if header:
3439
    data = [[col.title for col in columns]]
3440
    colwidth = [len(col.title) for col in columns]
3441
  else:
3442
    data = []
3443
    colwidth = [0 for _ in columns]
3444

    
3445
  # Format row data
3446
  for row in rows:
3447
    assert len(row) == len(columns)
3448

    
3449
    formatted = [col.format(value) for value, col in zip(row, columns)]
3450

    
3451
    if separator is None:
3452
      # Update column widths
3453
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3454
        # Modifying a list's items while iterating is fine
3455
        colwidth[idx] = max(oldwidth, len(value))
3456

    
3457
    data.append(formatted)
3458

    
3459
  if separator is not None:
3460
    # Return early if a separator is used
3461
    return [separator.join(row) for row in data]
3462

    
3463
  if columns and not columns[-1].align_right:
3464
    # Avoid unnecessary spaces at end of line
3465
    colwidth[-1] = 0
3466

    
3467
  # Build format string
3468
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3469
                  for col, width in zip(columns, colwidth)])
3470

    
3471
  return [fmt % tuple(row) for row in data]
3472

    
3473

    
3474
def FormatTimestamp(ts):
3475
  """Formats a given timestamp.
3476

3477
  @type ts: timestamp
3478
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3479

3480
  @rtype: string
3481
  @return: a string with the formatted timestamp
3482

3483
  """
3484
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3485
    return "?"
3486

    
3487
  (sec, usecs) = ts
3488
  return utils.FormatTime(sec, usecs=usecs)
3489

    
3490

    
3491
def ParseTimespec(value):
3492
  """Parse a time specification.
3493

3494
  The following suffixed will be recognized:
3495

3496
    - s: seconds
3497
    - m: minutes
3498
    - h: hours
3499
    - d: day
3500
    - w: weeks
3501

3502
  Without any suffix, the value will be taken to be in seconds.
3503

3504
  """
3505
  value = str(value)
3506
  if not value:
3507
    raise errors.OpPrereqError("Empty time specification passed",
3508
                               errors.ECODE_INVAL)
3509
  suffix_map = {
3510
    "s": 1,
3511
    "m": 60,
3512
    "h": 3600,
3513
    "d": 86400,
3514
    "w": 604800,
3515
    }
3516
  if value[-1] not in suffix_map:
3517
    try:
3518
      value = int(value)
3519
    except (TypeError, ValueError):
3520
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3521
                                 errors.ECODE_INVAL)
3522
  else:
3523
    multiplier = suffix_map[value[-1]]
3524
    value = value[:-1]
3525
    if not value: # no data left after stripping the suffix
3526
      raise errors.OpPrereqError("Invalid time specification (only"
3527
                                 " suffix passed)", errors.ECODE_INVAL)
3528
    try:
3529
      value = int(value) * multiplier
3530
    except (TypeError, ValueError):
3531
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3532
                                 errors.ECODE_INVAL)
3533
  return value
3534

    
3535

    
3536
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3537
                   filter_master=False, nodegroup=None):
3538
  """Returns the names of online nodes.
3539

3540
  This function will also log a warning on stderr with the names of
3541
  the online nodes.
3542

3543
  @param nodes: if not empty, use only this subset of nodes (minus the
3544
      offline ones)
3545
  @param cl: if not None, luxi client to use
3546
  @type nowarn: boolean
3547
  @param nowarn: by default, this function will output a note with the
3548
      offline nodes that are skipped; if this parameter is True the
3549
      note is not displayed
3550
  @type secondary_ips: boolean
3551
  @param secondary_ips: if True, return the secondary IPs instead of the
3552
      names, useful for doing network traffic over the replication interface
3553
      (if any)
3554
  @type filter_master: boolean
3555
  @param filter_master: if True, do not return the master node in the list
3556
      (useful in coordination with secondary_ips where we cannot check our
3557
      node name against the list)
3558
  @type nodegroup: string
3559
  @param nodegroup: If set, only return nodes in this node group
3560

3561
  """
3562
  if cl is None:
3563
    cl = GetClient()
3564

    
3565
  qfilter = []
3566

    
3567
  if nodes:
3568
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3569

    
3570
  if nodegroup is not None:
3571
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3572
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3573

    
3574
  if filter_master:
3575
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3576

    
3577
  if qfilter:
3578
    if len(qfilter) > 1:
3579
      final_filter = [qlang.OP_AND] + qfilter
3580
    else:
3581
      assert len(qfilter) == 1
3582
      final_filter = qfilter[0]
3583
  else:
3584
    final_filter = None
3585

    
3586
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3587

    
3588
  def _IsOffline(row):
3589
    (_, (_, offline), _) = row
3590
    return offline
3591

    
3592
  def _GetName(row):
3593
    ((_, name), _, _) = row
3594
    return name
3595

    
3596
  def _GetSip(row):
3597
    (_, _, (_, sip)) = row
3598
    return sip
3599

    
3600
  (offline, online) = compat.partition(result.data, _IsOffline)
3601

    
3602
  if offline and not nowarn:
3603
    ToStderr("Note: skipping offline node(s): %s" %
3604
             utils.CommaJoin(map(_GetName, offline)))
3605

    
3606
  if secondary_ips:
3607
    fn = _GetSip
3608
  else:
3609
    fn = _GetName
3610

    
3611
  return map(fn, online)
3612

    
3613

    
3614
def GetNodesSshPorts(nodes, cl):
3615
  """Retrieves SSH ports of given nodes.
3616

3617
  @param nodes: the names of nodes
3618
  @type nodes: a list of strings
3619
  @param cl: a client to use for the query
3620
  @type cl: L{ganeti.luxi.Client}
3621
  @return: the list of SSH ports corresponding to the nodes
3622
  @rtype: a list of tuples
3623
  """
3624
  return map(lambda t: t[0],
3625
             cl.QueryNodes(names=nodes,
3626
                           fields=["ndp/ssh_port"],
3627
                           use_locking=False))
3628

    
3629

    
3630
def _ToStream(stream, txt, *args):
3631
  """Write a message to a stream, bypassing the logging system
3632

3633
  @type stream: file object
3634
  @param stream: the file to which we should write
3635
  @type txt: str
3636
  @param txt: the message
3637

3638
  """
3639
  try:
3640
    if args:
3641
      args = tuple(args)
3642
      stream.write(txt % args)
3643
    else:
3644
      stream.write(txt)
3645
    stream.write("\n")
3646
    stream.flush()
3647
  except IOError, err:
3648
    if err.errno == errno.EPIPE:
3649
      # our terminal went away, we'll exit
3650
      sys.exit(constants.EXIT_FAILURE)
3651
    else:
3652
      raise
3653

    
3654

    
3655
def ToStdout(txt, *args):
3656
  """Write a message to stdout only, bypassing the logging system
3657

3658
  This is just a wrapper over _ToStream.
3659

3660
  @type txt: str
3661
  @param txt: the message
3662

3663
  """
3664
  _ToStream(sys.stdout, txt, *args)
3665

    
3666

    
3667
def ToStderr(txt, *args):
3668
  """Write a message to stderr only, bypassing the logging system
3669

3670
  This is just a wrapper over _ToStream.
3671

3672
  @type txt: str
3673
  @param txt: the message
3674

3675
  """
3676
  _ToStream(sys.stderr, txt, *args)
3677

    
3678

    
3679
class JobExecutor(object):
3680
  """Class which manages the submission and execution of multiple jobs.
3681

3682
  Note that instances of this class should not be reused between
3683
  GetResults() calls.
3684

3685
  """
3686
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3687
    self.queue = []
3688
    if cl is None:
3689
      cl = GetClient()
3690
    self.cl = cl
3691
    self.verbose = verbose
3692
    self.jobs = []
3693
    self.opts = opts
3694
    self.feedback_fn = feedback_fn
3695
    self._counter = itertools.count()
3696

    
3697
  @staticmethod
3698
  def _IfName(name, fmt):
3699
    """Helper function for formatting name.
3700

3701
    """
3702
    if name:
3703
      return fmt % name
3704

    
3705
    return ""
3706

    
3707
  def QueueJob(self, name, *ops):
3708
    """Record a job for later submit.
3709

3710
    @type name: string
3711
    @param name: a description of the job, will be used in WaitJobSet
3712

3713
    """
3714
    SetGenericOpcodeOpts(ops, self.opts)
3715
    self.queue.append((self._counter.next(), name, ops))
3716

    
3717
  def AddJobId(self, name, status, job_id):
3718
    """Adds a job ID to the internal queue.
3719

3720
    """
3721
    self.jobs.append((self._counter.next(), status, job_id, name))
3722

    
3723
  def SubmitPending(self, each=False):
3724
    """Submit all pending jobs.
3725

3726
    """
3727
    if each:
3728
      results = []
3729
      for (_, _, ops) in self.queue:
3730
        # SubmitJob will remove the success status, but raise an exception if
3731
        # the submission fails, so we'll notice that anyway.
3732
        results.append([True, self.cl.SubmitJob(ops)[0]])
3733
    else:
3734
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3735
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3736
      self.jobs.append((idx, status, data, name))
3737

    
3738
  def _ChooseJob(self):
3739
    """Choose a non-waiting/queued job to poll next.
3740

3741
    """
3742
    assert self.jobs, "_ChooseJob called with empty job list"
3743

    
3744
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3745
                               ["status"])
3746
    assert result
3747

    
3748
    for job_data, status in zip(self.jobs, result):
3749
      if (isinstance(status, list) and status and
3750
          status[0] in (constants.JOB_STATUS_QUEUED,
3751
                        constants.JOB_STATUS_WAITING,
3752
                        constants.JOB_STATUS_CANCELING)):
3753
        # job is still present and waiting
3754
        continue
3755
      # good candidate found (either running job or lost job)
3756
      self.jobs.remove(job_data)
3757
      return job_data
3758

    
3759
    # no job found
3760
    return self.jobs.pop(0)
3761

    
3762
  def GetResults(self):
3763
    """Wait for and return the results of all jobs.
3764

3765
    @rtype: list
3766
    @return: list of tuples (success, job results), in the same order
3767
        as the submitted jobs; if a job has failed, instead of the result
3768
        there will be the error message
3769

3770
    """
3771
    if not self.jobs:
3772
      self.SubmitPending()
3773
    results = []
3774
    if self.verbose:
3775
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3776
      if ok_jobs:
3777
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3778

    
3779
    # first, remove any non-submitted jobs
3780
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3781
    for idx, _, jid, name in failures:
3782
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3783
      results.append((idx, False, jid))
3784

    
3785
    while self.jobs:
3786
      (idx, _, jid, name) = self._ChooseJob()
3787
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3788
      try:
3789
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3790
        success = True
3791
      except errors.JobLost, err:
3792
        _, job_result = FormatError(err)
3793
        ToStderr("Job %s%s has been archived, cannot check its result",
3794
                 jid, self._IfName(name, " for %s"))
3795
        success = False
3796
      except (errors.GenericError, rpcerr.ProtocolError), err:
3797
        _, job_result = FormatError(err)
3798
        success = False
3799
        # the error message will always be shown, verbose or not
3800
        ToStderr("Job %s%s has failed: %s",
3801
                 jid, self._IfName(name, " for %s"), job_result)
3802

    
3803
      results.append((idx, success, job_result))
3804

    
3805
    # sort based on the index, then drop it
3806
    results.sort()
3807
    results = [i[1:] for i in results]
3808

    
3809
    return results
3810

    
3811
  def WaitOrShow(self, wait):
3812
    """Wait for job results or only print the job IDs.
3813

3814
    @type wait: boolean
3815
    @param wait: whether to wait or not
3816

3817
    """
3818
    if wait:
3819
      return self.GetResults()
3820
    else:
3821
      if not self.jobs:
3822
        self.SubmitPending()
3823
      for _, status, result, name in self.jobs:
3824
        if status:
3825
          ToStdout("%s: %s", result, name)
3826
        else:
3827
          ToStderr("Failure for %s: %s", name, result)
3828
      return [row[1:3] for row in self.jobs]
3829

    
3830

    
3831
def FormatParamsDictInfo(param_dict, actual):
3832
  """Formats a parameter dictionary.
3833

3834
  @type param_dict: dict
3835
  @param param_dict: the own parameters
3836
  @type actual: dict
3837
  @param actual: the current parameter set (including defaults)
3838
  @rtype: dict
3839
  @return: dictionary where the value of each parameter is either a fully
3840
      formatted string or a dictionary containing formatted strings
3841

3842
  """
3843
  ret = {}
3844
  for (key, data) in actual.items():
3845
    if isinstance(data, dict) and data:
3846
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3847
    else:
3848
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3849
  return ret
3850

    
3851

    
3852
def _FormatListInfoDefault(data, def_data):
3853
  if data is not None:
3854
    ret = utils.CommaJoin(data)
3855
  else:
3856
    ret = "default (%s)" % utils.CommaJoin(def_data)
3857
  return ret
3858

    
3859

    
3860
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3861
  """Formats an instance policy.
3862

3863
  @type custom_ipolicy: dict
3864
  @param custom_ipolicy: own policy
3865
  @type eff_ipolicy: dict
3866
  @param eff_ipolicy: effective policy (including defaults); ignored for
3867
      cluster
3868
  @type iscluster: bool
3869
  @param iscluster: the policy is at cluster level
3870
  @rtype: list of pairs
3871
  @return: formatted data, suitable for L{PrintGenericInfo}
3872

3873
  """
3874
  if iscluster:
3875
    eff_ipolicy = custom_ipolicy
3876

    
3877
  minmax_out = []
3878
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3879
  if custom_minmax:
3880
    for (k, minmax) in enumerate(custom_minmax):
3881
      minmax_out.append([
3882
        ("%s/%s" % (key, k),
3883
         FormatParamsDictInfo(minmax[key], minmax[key]))
3884
        for key in constants.ISPECS_MINMAX_KEYS
3885
        ])
3886
  else:
3887
    for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3888
      minmax_out.append([
3889
        ("%s/%s" % (key, k),
3890
         FormatParamsDictInfo({}, minmax[key]))
3891
        for key in constants.ISPECS_MINMAX_KEYS
3892
        ])
3893
  ret = [("bounds specs", minmax_out)]
3894

    
3895
  if iscluster:
3896
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3897
    ret.append(
3898
      (constants.ISPECS_STD,
3899
       FormatParamsDictInfo(stdspecs, stdspecs))
3900
      )
3901

    
3902
  ret.append(
3903
    ("allowed disk templates",
3904
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3905
                            eff_ipolicy[constants.IPOLICY_DTS]))
3906
    )
3907
  ret.extend([
3908
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3909
    for key in constants.IPOLICY_PARAMETERS
3910
    ])
3911
  return ret
3912

    
3913

    
3914
def _PrintSpecsParameters(buf, specs):
3915
  values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3916
  buf.write(",".join(values))
3917

    
3918

    
3919
def PrintIPolicyCommand(buf, ipolicy, isgroup):
3920
  """Print the command option used to generate the given instance policy.
3921

3922
  Currently only the parts dealing with specs are supported.
3923

3924
  @type buf: StringIO
3925
  @param buf: stream to write into
3926
  @type ipolicy: dict
3927
  @param ipolicy: instance policy
3928
  @type isgroup: bool
3929
  @param isgroup: whether the policy is at group level
3930

3931
  """
3932
  if not isgroup:
3933
    stdspecs = ipolicy.get("std")
3934
    if stdspecs:
3935
      buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3936
      _PrintSpecsParameters(buf, stdspecs)
3937
  minmaxes = ipolicy.get("minmax", [])
3938
  first = True
3939
  for minmax in minmaxes:
3940
    minspecs = minmax.get("min")
3941
    maxspecs = minmax.get("max")
3942
    if minspecs and maxspecs:
3943
      if first:
3944
        buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3945
        first = False
3946
      else:
3947
        buf.write("//")
3948
      buf.write("min:")
3949
      _PrintSpecsParameters(buf, minspecs)
3950
      buf.write("/max:")
3951
      _PrintSpecsParameters(buf, maxspecs)
3952

    
3953

    
3954
def ConfirmOperation(names, list_type, text, extra=""):
3955
  """Ask the user to confirm an operation on a list of list_type.
3956

3957
  This function is used to request confirmation for doing an operation
3958
  on a given list of list_type.
3959

3960
  @type names: list
3961
  @param names: the list of names that we display when
3962
      we ask for confirmation
3963
  @type list_type: str
3964
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3965
  @type text: str
3966
  @param text: the operation that the user should confirm
3967
  @rtype: boolean
3968
  @return: True or False depending on user's confirmation.
3969

3970
  """
3971
  count = len(names)
3972
  msg = ("The %s will operate on %d %s.\n%s"
3973
         "Do you want to continue?" % (text, count, list_type, extra))
3974
  affected = (("\nAffected %s:\n" % list_type) +
3975
              "\n".join(["  %s" % name for name in names]))
3976

    
3977
  choices = [("y", True, "Yes, execute the %s" % text),
3978
             ("n", False, "No, abort the %s" % text)]
3979

    
3980
  if count > 20:
3981
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3982
    question = msg
3983
  else:
3984
    question = msg + affected
3985

    
3986
  choice = AskUser(question, choices)
3987
  if choice == "v":
3988
    choices.pop(1)
3989
    choice = AskUser(msg + affected, choices)
3990
  return choice
3991

    
3992

    
3993
def _MaybeParseUnit(elements):
3994
  """Parses and returns an array of potential values with units.
3995

3996
  """
3997
  parsed = {}
3998
  for k, v in elements.items():
3999
    if v == constants.VALUE_DEFAULT:
4000
      parsed[k] = v
4001
    else:
4002
      parsed[k] = utils.ParseUnit(v)
4003
  return parsed
4004

    
4005

    
4006
def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
4007
                             ispecs_disk_count, ispecs_disk_size,
4008
                             ispecs_nic_count, group_ipolicy, fill_all):
4009
  try:
4010
    if ispecs_mem_size:
4011
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
4012
    if ispecs_disk_size:
4013
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
4014
  except (TypeError, ValueError, errors.UnitParseError), err:
4015
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
4016
                               " in policy: %s" %
4017
                               (ispecs_disk_size, ispecs_mem_size, err),
4018
                               errors.ECODE_INVAL)
4019

    
4020
  # prepare ipolicy dict
4021
  ispecs_transposed = {
4022
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
4023
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
4024
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
4025
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
4026
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
4027
    }
4028

    
4029
  # first, check that the values given are correct
4030
  if group_ipolicy:
4031
    forced_type = TISPECS_GROUP_TYPES
4032
  else:
4033
    forced_type = TISPECS_CLUSTER_TYPES
4034
  for specs in ispecs_transposed.values():
4035
    assert type(specs) is dict
4036
    utils.ForceDictType(specs, forced_type)
4037

    
4038
  # then transpose
4039
  ispecs = {
4040
    constants.ISPECS_MIN: {},
4041
    constants.ISPECS_MAX: {},
4042
    constants.ISPECS_STD: {},
4043
    }
4044
  for (name, specs) in ispecs_transposed.iteritems():
4045
    assert name in constants.ISPECS_PARAMETERS
4046
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
4047
      assert key in ispecs
4048
      ispecs[key][name] = val
4049
  minmax_out = {}
4050
  for key in constants.ISPECS_MINMAX_KEYS:
4051
    if fill_all:
4052
      minmax_out[key] = \
4053
        objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
4054
    else:
4055
      minmax_out[key] = ispecs[key]
4056
  ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
4057
  if fill_all:
4058
    ipolicy[constants.ISPECS_STD] = \
4059
        objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
4060
                         ispecs[constants.ISPECS_STD])
4061
  else:
4062
    ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
4063

    
4064

    
4065
def _ParseSpecUnit(spec, keyname):
4066
  ret = spec.copy()
4067
  for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
4068
    if k in ret:
4069
      try:
4070
        ret[k] = utils.ParseUnit(ret[k])
4071
      except (TypeError, ValueError, errors.UnitParseError), err:
4072
        raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
4073
                                    " specs: %s" % (k, ret[k], keyname, err)),
4074
                                   errors.ECODE_INVAL)
4075
  return ret
4076

    
4077

    
4078
def _ParseISpec(spec, keyname, required):
4079
  ret = _ParseSpecUnit(spec, keyname)
4080
  utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
4081
  missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
4082
  if required and missing:
4083
    raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
4084
                               (keyname, utils.CommaJoin(missing)),
4085
                               errors.ECODE_INVAL)
4086
  return ret
4087

    
4088

    
4089
def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
4090
  ret = None
4091
  if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
4092
      len(minmax_ispecs[0]) == 1):
4093
    for (key, spec) in minmax_ispecs[0].items():
4094
      # This loop is executed exactly once
4095
      if key in allowed_values and not spec:
4096
        ret = key
4097
  return ret
4098

    
4099

    
4100
def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4101
                            group_ipolicy, allowed_values):
4102
  found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
4103
  if found_allowed is not None:
4104
    ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
4105
  elif minmax_ispecs is not None:
4106
    minmax_out = []
4107
    for mmpair in minmax_ispecs:
4108
      mmpair_out = {}
4109
      for (key, spec) in mmpair.items():
4110
        if key not in constants.ISPECS_MINMAX_KEYS:
4111
          msg = "Invalid key in bounds instance specifications: %s" % key
4112
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
4113
        mmpair_out[key] = _ParseISpec(spec, key, True)
4114
      minmax_out.append(mmpair_out)
4115
    ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
4116
  if std_ispecs is not None:
4117
    assert not group_ipolicy # This is not an option for gnt-group
4118
    ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
4119

    
4120

    
4121
def CreateIPolicyFromOpts(ispecs_mem_size=None,
4122
                          ispecs_cpu_count=None,
4123
                          ispecs_disk_count=None,
4124
                          ispecs_disk_size=None,
4125
                          ispecs_nic_count=None,
4126
                          minmax_ispecs=None,
4127
                          std_ispecs=None,
4128
                          ipolicy_disk_templates=None,
4129
                          ipolicy_vcpu_ratio=None,
4130
                          ipolicy_spindle_ratio=None,
4131
                          group_ipolicy=False,
4132
                          allowed_values=None,
4133
                          fill_all=False):
4134
  """Creation of instance policy based on command line options.
4135

4136
  @param fill_all: whether for cluster policies we should ensure that
4137
    all values are filled
4138

4139
  """
4140
  assert not (fill_all and allowed_values)
4141

    
4142
  split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4143
                 ispecs_disk_size or ispecs_nic_count)
4144
  if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4145
    raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4146
                               " together with any --ipolicy-xxx-specs option",
4147
                               errors.ECODE_INVAL)
4148

    
4149
  ipolicy_out = objects.MakeEmptyIPolicy()
4150
  if split_specs:
4151
    assert fill_all
4152
    _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4153
                             ispecs_disk_count, ispecs_disk_size,
4154
                             ispecs_nic_count, group_ipolicy, fill_all)
4155
  elif (minmax_ispecs is not None or std_ispecs is not None):
4156
    _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4157
                            group_ipolicy, allowed_values)
4158

    
4159
  if ipolicy_disk_templates is not None:
4160
    if allowed_values and ipolicy_disk_templates in allowed_values:
4161
      ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4162
    else:
4163
      ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4164
  if ipolicy_vcpu_ratio is not None:
4165
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4166
  if ipolicy_spindle_ratio is not None:
4167
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4168

    
4169
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4170

    
4171
  if not group_ipolicy and fill_all:
4172
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4173

    
4174
  return ipolicy_out
4175

    
4176

    
4177
def _SerializeGenericInfo(buf, data, level, afterkey=False):
4178
  """Formatting core of L{PrintGenericInfo}.
4179

4180
  @param buf: (string) stream to accumulate the result into
4181
  @param data: data to format
4182
  @type level: int
4183
  @param level: depth in the data hierarchy, used for indenting
4184
  @type afterkey: bool
4185
  @param afterkey: True when we are in the middle of a line after a key (used
4186
      to properly add newlines or indentation)
4187

4188
  """
4189
  baseind = "  "
4190
  if isinstance(data, dict):
4191
    if not data:
4192
      buf.write("\n")
4193
    else:
4194
      if afterkey:
4195
        buf.write("\n")
4196
        doindent = True
4197
      else:
4198
        doindent = False
4199
      for key in sorted(data):
4200
        if doindent:
4201
          buf.write(baseind * level)
4202
        else:
4203
          doindent = True
4204
        buf.write(key)
4205
        buf.write(": ")
4206
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4207
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4208
    # list of tuples (an ordered dictionary)
4209
    if afterkey:
4210
      buf.write("\n")
4211
      doindent = True
4212
    else:
4213
      doindent = False
4214
    for (key, val) in data:
4215
      if doindent:
4216
        buf.write(baseind * level)
4217
      else:
4218
        doindent = True
4219
      buf.write(key)
4220
      buf.write(": ")
4221
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4222
  elif isinstance(data, list):
4223
    if not data:
4224
      buf.write("\n")
4225
    else:
4226
      if afterkey:
4227
        buf.write("\n")
4228
        doindent = True
4229
      else:
4230
        doindent = False
4231
      for item in data:
4232
        if doindent:
4233
          buf.write(baseind * level)
4234
        else:
4235
          doindent = True
4236
        buf.write("-")
4237
        buf.write(baseind[1:])
4238
        _SerializeGenericInfo(buf, item, level + 1)
4239
  else:
4240
    # This branch should be only taken for strings, but it's practically
4241
    # impossible to guarantee that no other types are produced somewhere
4242
    buf.write(str(data))
4243
    buf.write("\n")
4244

    
4245

    
4246
def PrintGenericInfo(data):
4247
  """Print information formatted according to the hierarchy.
4248

4249
  The output is a valid YAML string.
4250

4251
  @param data: the data to print. It's a hierarchical structure whose elements
4252
      can be:
4253
        - dictionaries, where keys are strings and values are of any of the
4254
          types listed here
4255
        - lists of pairs (key, value), where key is a string and value is of
4256
          any of the types listed here; it's a way to encode ordered
4257
          dictionaries
4258
        - lists of any of the types listed here
4259
        - strings
4260

4261
  """
4262
  buf = StringIO()
4263
  _SerializeGenericInfo(buf, data, 0)
4264
  ToStdout(buf.getvalue().rstrip("\n"))