Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ f58ecd88

History | View | Annotate | Download (120.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ADD_RESERVED_IPS_OPT",
58
  "ALLOCATABLE_OPT",
59
  "ALLOC_POLICY_OPT",
60
  "ALL_OPT",
61
  "ALLOW_FAILOVER_OPT",
62
  "AUTO_PROMOTE_OPT",
63
  "AUTO_REPLACE_OPT",
64
  "BACKEND_OPT",
65
  "BLK_OS_OPT",
66
  "CAPAB_MASTER_OPT",
67
  "CAPAB_VM_OPT",
68
  "CLEANUP_OPT",
69
  "CLUSTER_DOMAIN_SECRET_OPT",
70
  "CONFIRM_OPT",
71
  "CP_SIZE_OPT",
72
  "DEBUG_OPT",
73
  "DEBUG_SIMERR_OPT",
74
  "DISKIDX_OPT",
75
  "DISK_OPT",
76
  "DISK_PARAMS_OPT",
77
  "DISK_TEMPLATE_OPT",
78
  "DRAINED_OPT",
79
  "DRY_RUN_OPT",
80
  "DRBD_HELPER_OPT",
81
  "DST_NODE_OPT",
82
  "EARLY_RELEASE_OPT",
83
  "ENABLED_HV_OPT",
84
  "ERROR_CODES_OPT",
85
  "FIELDS_OPT",
86
  "FILESTORE_DIR_OPT",
87
  "FILESTORE_DRIVER_OPT",
88
  "FORCE_FILTER_OPT",
89
  "FORCE_OPT",
90
  "FORCE_VARIANT_OPT",
91
  "GATEWAY_OPT",
92
  "GATEWAY6_OPT",
93
  "GLOBAL_FILEDIR_OPT",
94
  "HID_OS_OPT",
95
  "GLOBAL_SHARED_FILEDIR_OPT",
96
  "HVLIST_OPT",
97
  "HVOPTS_OPT",
98
  "HYPERVISOR_OPT",
99
  "IALLOCATOR_OPT",
100
  "DEFAULT_IALLOCATOR_OPT",
101
  "IDENTIFY_DEFAULTS_OPT",
102
  "IGNORE_CONSIST_OPT",
103
  "IGNORE_ERRORS_OPT",
104
  "IGNORE_FAILURES_OPT",
105
  "IGNORE_OFFLINE_OPT",
106
  "IGNORE_REMOVE_FAILURES_OPT",
107
  "IGNORE_SECONDARIES_OPT",
108
  "IGNORE_SIZE_OPT",
109
  "INTERVAL_OPT",
110
  "MAC_PREFIX_OPT",
111
  "MAINTAIN_NODE_HEALTH_OPT",
112
  "MASTER_NETDEV_OPT",
113
  "MASTER_NETMASK_OPT",
114
  "MC_OPT",
115
  "MIGRATION_MODE_OPT",
116
  "NET_OPT",
117
  "NETWORK_OPT",
118
  "NETWORK6_OPT",
119
  "NETWORK_TYPE_OPT",
120
  "NEW_CLUSTER_CERT_OPT",
121
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
122
  "NEW_CONFD_HMAC_KEY_OPT",
123
  "NEW_RAPI_CERT_OPT",
124
  "NEW_SECONDARY_OPT",
125
  "NEW_SPICE_CERT_OPT",
126
  "NIC_PARAMS_OPT",
127
  "NOCONFLICTSCHECK_OPT",
128
  "NODE_FORCE_JOIN_OPT",
129
  "NODE_LIST_OPT",
130
  "NODE_PLACEMENT_OPT",
131
  "NODEGROUP_OPT",
132
  "NODE_PARAMS_OPT",
133
  "NODE_POWERED_OPT",
134
  "NODRBD_STORAGE_OPT",
135
  "NOHDR_OPT",
136
  "NOIPCHECK_OPT",
137
  "NO_INSTALL_OPT",
138
  "NONAMECHECK_OPT",
139
  "NOLVM_STORAGE_OPT",
140
  "NOMODIFY_ETCHOSTS_OPT",
141
  "NOMODIFY_SSH_SETUP_OPT",
142
  "NONICS_OPT",
143
  "NONLIVE_OPT",
144
  "NONPLUS1_OPT",
145
  "NORUNTIME_CHGS_OPT",
146
  "NOSHUTDOWN_OPT",
147
  "NOSTART_OPT",
148
  "NOSSH_KEYCHECK_OPT",
149
  "NOVOTING_OPT",
150
  "NO_REMEMBER_OPT",
151
  "NWSYNC_OPT",
152
  "OFFLINE_INST_OPT",
153
  "ONLINE_INST_OPT",
154
  "ON_PRIMARY_OPT",
155
  "ON_SECONDARY_OPT",
156
  "OFFLINE_OPT",
157
  "OSPARAMS_OPT",
158
  "OS_OPT",
159
  "OS_SIZE_OPT",
160
  "OOB_TIMEOUT_OPT",
161
  "POWER_DELAY_OPT",
162
  "PREALLOC_WIPE_DISKS_OPT",
163
  "PRIMARY_IP_VERSION_OPT",
164
  "PRIMARY_ONLY_OPT",
165
  "PRIORITY_OPT",
166
  "RAPI_CERT_OPT",
167
  "READD_OPT",
168
  "REBOOT_TYPE_OPT",
169
  "REMOVE_INSTANCE_OPT",
170
  "REMOVE_RESERVED_IPS_OPT",
171
  "REMOVE_UIDS_OPT",
172
  "RESERVED_LVS_OPT",
173
  "RUNTIME_MEM_OPT",
174
  "ROMAN_OPT",
175
  "SECONDARY_IP_OPT",
176
  "SECONDARY_ONLY_OPT",
177
  "SELECT_OS_OPT",
178
  "SEP_OPT",
179
  "SHOWCMD_OPT",
180
  "SHUTDOWN_TIMEOUT_OPT",
181
  "SINGLE_NODE_OPT",
182
  "SPECS_CPU_COUNT_OPT",
183
  "SPECS_DISK_COUNT_OPT",
184
  "SPECS_DISK_SIZE_OPT",
185
  "SPECS_MEM_SIZE_OPT",
186
  "SPECS_NIC_COUNT_OPT",
187
  "IPOLICY_DISK_TEMPLATES",
188
  "IPOLICY_VCPU_RATIO",
189
  "SPICE_CACERT_OPT",
190
  "SPICE_CERT_OPT",
191
  "SRC_DIR_OPT",
192
  "SRC_NODE_OPT",
193
  "SUBMIT_OPT",
194
  "STARTUP_PAUSED_OPT",
195
  "STATIC_OPT",
196
  "SYNC_OPT",
197
  "TAG_ADD_OPT",
198
  "TAG_SRC_OPT",
199
  "TIMEOUT_OPT",
200
  "TO_GROUP_OPT",
201
  "UIDPOOL_OPT",
202
  "USEUNITS_OPT",
203
  "USE_EXTERNAL_MIP_SCRIPT",
204
  "USE_REPL_NET_OPT",
205
  "VERBOSE_OPT",
206
  "VG_NAME_OPT",
207
  "WFSYNC_OPT",
208
  "YES_DOIT_OPT",
209
  "DISK_STATE_OPT",
210
  "HV_STATE_OPT",
211
  "IGNORE_IPOLICY_OPT",
212
  "INSTANCE_POLICY_OPTS",
213
  # Generic functions for CLI programs
214
  "ConfirmOperation",
215
  "CreateIPolicyFromOpts",
216
  "GenericMain",
217
  "GenericInstanceCreate",
218
  "GenericList",
219
  "GenericListFields",
220
  "GetClient",
221
  "GetOnlineNodes",
222
  "JobExecutor",
223
  "JobSubmittedException",
224
  "ParseTimespec",
225
  "RunWhileClusterStopped",
226
  "SubmitOpCode",
227
  "SubmitOrSend",
228
  "UsesRPC",
229
  # Formatting functions
230
  "ToStderr", "ToStdout",
231
  "FormatError",
232
  "FormatQueryResult",
233
  "FormatParameterDict",
234
  "GenerateTable",
235
  "AskUser",
236
  "FormatTimestamp",
237
  "FormatLogMessage",
238
  # Tags functions
239
  "ListTags",
240
  "AddTags",
241
  "RemoveTags",
242
  # command line options support infrastructure
243
  "ARGS_MANY_INSTANCES",
244
  "ARGS_MANY_NODES",
245
  "ARGS_MANY_GROUPS",
246
  "ARGS_MANY_NETWORKS",
247
  "ARGS_NONE",
248
  "ARGS_ONE_INSTANCE",
249
  "ARGS_ONE_NODE",
250
  "ARGS_ONE_GROUP",
251
  "ARGS_ONE_OS",
252
  "ARGS_ONE_NETWORK",
253
  "ArgChoice",
254
  "ArgCommand",
255
  "ArgFile",
256
  "ArgGroup",
257
  "ArgHost",
258
  "ArgInstance",
259
  "ArgJobId",
260
  "ArgNetwork",
261
  "ArgNode",
262
  "ArgOs",
263
  "ArgSuggest",
264
  "ArgUnknown",
265
  "OPT_COMPL_INST_ADD_NODES",
266
  "OPT_COMPL_MANY_NODES",
267
  "OPT_COMPL_ONE_IALLOCATOR",
268
  "OPT_COMPL_ONE_INSTANCE",
269
  "OPT_COMPL_ONE_NODE",
270
  "OPT_COMPL_ONE_NODEGROUP",
271
  "OPT_COMPL_ONE_NETWORK",
272
  "OPT_COMPL_ONE_OS",
273
  "cli_option",
274
  "SplitNodeOption",
275
  "CalculateOSNames",
276
  "ParseFields",
277
  "COMMON_CREATE_OPTS",
278
  ]
279

    
280
NO_PREFIX = "no_"
281
UN_PREFIX = "-"
282

    
283
#: Priorities (sorted)
284
_PRIORITY_NAMES = [
285
  ("low", constants.OP_PRIO_LOW),
286
  ("normal", constants.OP_PRIO_NORMAL),
287
  ("high", constants.OP_PRIO_HIGH),
288
  ]
289

    
290
#: Priority dictionary for easier lookup
291
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
292
# we migrate to Python 2.6
293
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
294

    
295
# Query result status for clients
296
(QR_NORMAL,
297
 QR_UNKNOWN,
298
 QR_INCOMPLETE) = range(3)
299

    
300
#: Maximum batch size for ChooseJob
301
_CHOOSE_BATCH = 25
302

    
303

    
304
# constants used to create InstancePolicy dictionary
305
TISPECS_GROUP_TYPES = {
306
  constants.ISPECS_MIN: constants.VTYPE_INT,
307
  constants.ISPECS_MAX: constants.VTYPE_INT,
308
  }
309

    
310
TISPECS_CLUSTER_TYPES = {
311
  constants.ISPECS_MIN: constants.VTYPE_INT,
312
  constants.ISPECS_MAX: constants.VTYPE_INT,
313
  constants.ISPECS_STD: constants.VTYPE_INT,
314
  }
315

    
316

    
317
class _Argument:
318
  def __init__(self, min=0, max=None): # pylint: disable=W0622
319
    self.min = min
320
    self.max = max
321

    
322
  def __repr__(self):
323
    return ("<%s min=%s max=%s>" %
324
            (self.__class__.__name__, self.min, self.max))
325

    
326

    
327
class ArgSuggest(_Argument):
328
  """Suggesting argument.
329

330
  Value can be any of the ones passed to the constructor.
331

332
  """
333
  # pylint: disable=W0622
334
  def __init__(self, min=0, max=None, choices=None):
335
    _Argument.__init__(self, min=min, max=max)
336
    self.choices = choices
337

    
338
  def __repr__(self):
339
    return ("<%s min=%s max=%s choices=%r>" %
340
            (self.__class__.__name__, self.min, self.max, self.choices))
341

    
342

    
343
class ArgChoice(ArgSuggest):
344
  """Choice argument.
345

346
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
347
  but value must be one of the choices.
348

349
  """
350

    
351

    
352
class ArgUnknown(_Argument):
353
  """Unknown argument to program (e.g. determined at runtime).
354

355
  """
356

    
357

    
358
class ArgInstance(_Argument):
359
  """Instances argument.
360

361
  """
362

    
363

    
364
class ArgNode(_Argument):
365
  """Node argument.
366

367
  """
368

    
369

    
370
class ArgNetwork(_Argument):
371
  """Network argument.
372

373
  """
374

    
375
class ArgGroup(_Argument):
376
  """Node group argument.
377

378
  """
379

    
380

    
381
class ArgJobId(_Argument):
382
  """Job ID argument.
383

384
  """
385

    
386

    
387
class ArgFile(_Argument):
388
  """File path argument.
389

390
  """
391

    
392

    
393
class ArgCommand(_Argument):
394
  """Command argument.
395

396
  """
397

    
398

    
399
class ArgHost(_Argument):
400
  """Host argument.
401

402
  """
403

    
404

    
405
class ArgOs(_Argument):
406
  """OS argument.
407

408
  """
409

    
410

    
411
ARGS_NONE = []
412
ARGS_MANY_INSTANCES = [ArgInstance()]
413
ARGS_MANY_NETWORKS = [ArgNetwork()]
414
ARGS_MANY_NODES = [ArgNode()]
415
ARGS_MANY_GROUPS = [ArgGroup()]
416
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
417
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
418
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
419
# TODO
420
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
421
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
422

    
423

    
424
def _ExtractTagsObject(opts, args):
425
  """Extract the tag type object.
426

427
  Note that this function will modify its args parameter.
428

429
  """
430
  if not hasattr(opts, "tag_type"):
431
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
432
  kind = opts.tag_type
433
  if kind == constants.TAG_CLUSTER:
434
    retval = kind, kind
435
  elif kind in (constants.TAG_NODEGROUP,
436
                constants.TAG_NODE,
437
                constants.TAG_INSTANCE):
438
    if not args:
439
      raise errors.OpPrereqError("no arguments passed to the command",
440
                                 errors.ECODE_INVAL)
441
    name = args.pop(0)
442
    retval = kind, name
443
  else:
444
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
445
  return retval
446

    
447

    
448
def _ExtendTags(opts, args):
449
  """Extend the args if a source file has been given.
450

451
  This function will extend the tags with the contents of the file
452
  passed in the 'tags_source' attribute of the opts parameter. A file
453
  named '-' will be replaced by stdin.
454

455
  """
456
  fname = opts.tags_source
457
  if fname is None:
458
    return
459
  if fname == "-":
460
    new_fh = sys.stdin
461
  else:
462
    new_fh = open(fname, "r")
463
  new_data = []
464
  try:
465
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
466
    # because of python bug 1633941
467
    while True:
468
      line = new_fh.readline()
469
      if not line:
470
        break
471
      new_data.append(line.strip())
472
  finally:
473
    new_fh.close()
474
  args.extend(new_data)
475

    
476

    
477
def ListTags(opts, args):
478
  """List the tags on a given object.
479

480
  This is a generic implementation that knows how to deal with all
481
  three cases of tag objects (cluster, node, instance). The opts
482
  argument is expected to contain a tag_type field denoting what
483
  object type we work on.
484

485
  """
486
  kind, name = _ExtractTagsObject(opts, args)
487
  cl = GetClient(query=True)
488
  result = cl.QueryTags(kind, name)
489
  result = list(result)
490
  result.sort()
491
  for tag in result:
492
    ToStdout(tag)
493

    
494

    
495
def AddTags(opts, args):
496
  """Add tags on a given object.
497

498
  This is a generic implementation that knows how to deal with all
499
  three cases of tag objects (cluster, node, instance). The opts
500
  argument is expected to contain a tag_type field denoting what
501
  object type we work on.
502

503
  """
504
  kind, name = _ExtractTagsObject(opts, args)
505
  _ExtendTags(opts, args)
506
  if not args:
507
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
508
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
509
  SubmitOrSend(op, opts)
510

    
511

    
512
def RemoveTags(opts, args):
513
  """Remove tags from a given object.
514

515
  This is a generic implementation that knows how to deal with all
516
  three cases of tag objects (cluster, node, instance). The opts
517
  argument is expected to contain a tag_type field denoting what
518
  object type we work on.
519

520
  """
521
  kind, name = _ExtractTagsObject(opts, args)
522
  _ExtendTags(opts, args)
523
  if not args:
524
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
525
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
526
  SubmitOrSend(op, opts)
527

    
528

    
529
def check_unit(option, opt, value): # pylint: disable=W0613
530
  """OptParsers custom converter for units.
531

532
  """
533
  try:
534
    return utils.ParseUnit(value)
535
  except errors.UnitParseError, err:
536
    raise OptionValueError("option %s: %s" % (opt, err))
537

    
538

    
539
def _SplitKeyVal(opt, data):
540
  """Convert a KeyVal string into a dict.
541

542
  This function will convert a key=val[,...] string into a dict. Empty
543
  values will be converted specially: keys which have the prefix 'no_'
544
  will have the value=False and the prefix stripped, the others will
545
  have value=True.
546

547
  @type opt: string
548
  @param opt: a string holding the option name for which we process the
549
      data, used in building error messages
550
  @type data: string
551
  @param data: a string of the format key=val,key=val,...
552
  @rtype: dict
553
  @return: {key=val, key=val}
554
  @raises errors.ParameterError: if there are duplicate keys
555

556
  """
557
  kv_dict = {}
558
  if data:
559
    for elem in utils.UnescapeAndSplit(data, sep=","):
560
      if "=" in elem:
561
        key, val = elem.split("=", 1)
562
      else:
563
        if elem.startswith(NO_PREFIX):
564
          key, val = elem[len(NO_PREFIX):], False
565
        elif elem.startswith(UN_PREFIX):
566
          key, val = elem[len(UN_PREFIX):], None
567
        else:
568
          key, val = elem, True
569
      if key in kv_dict:
570
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
571
                                    (key, opt))
572
      kv_dict[key] = val
573
  return kv_dict
574

    
575

    
576
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
577
  """Custom parser for ident:key=val,key=val options.
578

579
  This will store the parsed values as a tuple (ident, {key: val}). As such,
580
  multiple uses of this option via action=append is possible.
581

582
  """
583
  if ":" not in value:
584
    ident, rest = value, ""
585
  else:
586
    ident, rest = value.split(":", 1)
587

    
588
  if ident.startswith(NO_PREFIX):
589
    if rest:
590
      msg = "Cannot pass options when removing parameter groups: %s" % value
591
      raise errors.ParameterError(msg)
592
    retval = (ident[len(NO_PREFIX):], False)
593
  elif (ident.startswith(UN_PREFIX) and
594
        (len(ident) <= len(UN_PREFIX) or
595
         not ident[len(UN_PREFIX)][0].isdigit())):
596
    if rest:
597
      msg = "Cannot pass options when removing parameter groups: %s" % value
598
      raise errors.ParameterError(msg)
599
    retval = (ident[len(UN_PREFIX):], None)
600
  else:
601
    kv_dict = _SplitKeyVal(opt, rest)
602
    retval = (ident, kv_dict)
603
  return retval
604

    
605

    
606
def check_key_val(option, opt, value):  # pylint: disable=W0613
607
  """Custom parser class for key=val,key=val options.
608

609
  This will store the parsed values as a dict {key: val}.
610

611
  """
612
  return _SplitKeyVal(opt, value)
613

    
614

    
615
def check_bool(option, opt, value): # pylint: disable=W0613
616
  """Custom parser for yes/no options.
617

618
  This will store the parsed value as either True or False.
619

620
  """
621
  value = value.lower()
622
  if value == constants.VALUE_FALSE or value == "no":
623
    return False
624
  elif value == constants.VALUE_TRUE or value == "yes":
625
    return True
626
  else:
627
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
628

    
629

    
630
def check_list(option, opt, value): # pylint: disable=W0613
631
  """Custom parser for comma-separated lists.
632

633
  """
634
  # we have to make this explicit check since "".split(",") is [""],
635
  # not an empty list :(
636
  if not value:
637
    return []
638
  else:
639
    return utils.UnescapeAndSplit(value)
640

    
641

    
642
def check_maybefloat(option, opt, value): # pylint: disable=W0613
643
  """Custom parser for float numbers which might be also defaults.
644

645
  """
646
  value = value.lower()
647

    
648
  if value == constants.VALUE_DEFAULT:
649
    return value
650
  else:
651
    return float(value)
652

    
653

    
654
# completion_suggestion is normally a list. Using numeric values not evaluating
655
# to False for dynamic completion.
656
(OPT_COMPL_MANY_NODES,
657
 OPT_COMPL_ONE_NODE,
658
 OPT_COMPL_ONE_INSTANCE,
659
 OPT_COMPL_ONE_OS,
660
 OPT_COMPL_ONE_IALLOCATOR,
661
 OPT_COMPL_ONE_NETWORK,
662
 OPT_COMPL_INST_ADD_NODES,
663
 OPT_COMPL_ONE_NODEGROUP) = range(100, 108)
664

    
665
OPT_COMPL_ALL = frozenset([
666
  OPT_COMPL_MANY_NODES,
667
  OPT_COMPL_ONE_NODE,
668
  OPT_COMPL_ONE_INSTANCE,
669
  OPT_COMPL_ONE_OS,
670
  OPT_COMPL_ONE_IALLOCATOR,
671
  OPT_COMPL_ONE_NETWORK,
672
  OPT_COMPL_INST_ADD_NODES,
673
  OPT_COMPL_ONE_NODEGROUP,
674
  ])
675

    
676

    
677
class CliOption(Option):
678
  """Custom option class for optparse.
679

680
  """
681
  ATTRS = Option.ATTRS + [
682
    "completion_suggest",
683
    ]
684
  TYPES = Option.TYPES + (
685
    "identkeyval",
686
    "keyval",
687
    "unit",
688
    "bool",
689
    "list",
690
    "maybefloat",
691
    )
692
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
693
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
694
  TYPE_CHECKER["keyval"] = check_key_val
695
  TYPE_CHECKER["unit"] = check_unit
696
  TYPE_CHECKER["bool"] = check_bool
697
  TYPE_CHECKER["list"] = check_list
698
  TYPE_CHECKER["maybefloat"] = check_maybefloat
699

    
700

    
701
# optparse.py sets make_option, so we do it for our own option class, too
702
cli_option = CliOption
703

    
704

    
705
_YORNO = "yes|no"
706

    
707
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
708
                       help="Increase debugging level")
709

    
710
NOHDR_OPT = cli_option("--no-headers", default=False,
711
                       action="store_true", dest="no_headers",
712
                       help="Don't display column headers")
713

    
714
SEP_OPT = cli_option("--separator", default=None,
715
                     action="store", dest="separator",
716
                     help=("Separator between output fields"
717
                           " (defaults to one space)"))
718

    
719
USEUNITS_OPT = cli_option("--units", default=None,
720
                          dest="units", choices=("h", "m", "g", "t"),
721
                          help="Specify units for output (one of h/m/g/t)")
722

    
723
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
724
                        type="string", metavar="FIELDS",
725
                        help="Comma separated list of output fields")
726

    
727
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
728
                       default=False, help="Force the operation")
729

    
730
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
731
                         default=False, help="Do not require confirmation")
732

    
733
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
734
                                  action="store_true", default=False,
735
                                  help=("Ignore offline nodes and do as much"
736
                                        " as possible"))
737

    
738
TAG_ADD_OPT = cli_option("--tags", dest="tags",
739
                         default=None, help="Comma-separated list of instance"
740
                                            " tags")
741

    
742
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
743
                         default=None, help="File with tag names")
744

    
745
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
746
                        default=False, action="store_true",
747
                        help=("Submit the job and return the job ID, but"
748
                              " don't wait for the job to finish"))
749

    
750
SYNC_OPT = cli_option("--sync", dest="do_locking",
751
                      default=False, action="store_true",
752
                      help=("Grab locks while doing the queries"
753
                            " in order to ensure more consistent results"))
754

    
755
DRY_RUN_OPT = cli_option("--dry-run", default=False,
756
                         action="store_true",
757
                         help=("Do not execute the operation, just run the"
758
                               " check steps and verify it it could be"
759
                               " executed"))
760

    
761
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
762
                         action="store_true",
763
                         help="Increase the verbosity of the operation")
764

    
765
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
766
                              action="store_true", dest="simulate_errors",
767
                              help="Debugging option that makes the operation"
768
                              " treat most runtime checks as failed")
769

    
770
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
771
                        default=True, action="store_false",
772
                        help="Don't wait for sync (DANGEROUS!)")
773

    
774
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
775
                        default=False, action="store_true",
776
                        help="Wait for disks to sync")
777

    
778
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
779
                             action="store_true", default=False,
780
                             help="Enable offline instance")
781

    
782
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
783
                              action="store_true", default=False,
784
                              help="Disable down instance")
785

    
786
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
787
                               help=("Custom disk setup (%s)" %
788
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
789
                               default=None, metavar="TEMPL",
790
                               choices=list(constants.DISK_TEMPLATES))
791

    
792
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
793
                        help="Do not create any network cards for"
794
                        " the instance")
795

    
796
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
797
                               help="Relative path under default cluster-wide"
798
                               " file storage dir to store file-based disks",
799
                               default=None, metavar="<DIR>")
800

    
801
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
802
                                  help="Driver to use for image files",
803
                                  default="loop", metavar="<DRIVER>",
804
                                  choices=list(constants.FILE_DRIVER))
805

    
806
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
807
                            help="Select nodes for the instance automatically"
808
                            " using the <NAME> iallocator plugin",
809
                            default=None, type="string",
810
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
811

    
812
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
813
                                    metavar="<NAME>",
814
                                    help="Set the default instance"
815
                                    " allocator plugin",
816
                                    default=None, type="string",
817
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
818

    
819
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
820
                    metavar="<os>",
821
                    completion_suggest=OPT_COMPL_ONE_OS)
822

    
823
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
824
                          type="keyval", default={},
825
                          help="OS parameters")
826

    
827
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
828
                               action="store_true", default=False,
829
                               help="Force an unknown variant")
830

    
831
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
832
                            action="store_true", default=False,
833
                            help="Do not install the OS (will"
834
                            " enable no-start)")
835

    
836
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
837
                                dest="allow_runtime_chgs",
838
                                default=True, action="store_false",
839
                                help="Don't allow runtime changes")
840

    
841
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
842
                         type="keyval", default={},
843
                         help="Backend parameters")
844

    
845
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
846
                        default={}, dest="hvparams",
847
                        help="Hypervisor parameters")
848

    
849
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
850
                             help="Disk template parameters, in the format"
851
                             " template:option=value,option=value,...",
852
                             type="identkeyval", action="append", default=[])
853

    
854
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
855
                                 type="keyval", default={},
856
                                 help="Memory size specs: list of key=value,"
857
                                " where key is one of min, max, std"
858
                                 " (in MB or using a unit)")
859

    
860
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
861
                                 type="keyval", default={},
862
                                 help="CPU count specs: list of key=value,"
863
                                 " where key is one of min, max, std")
864

    
865
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
866
                                  dest="ispecs_disk_count",
867
                                  type="keyval", default={},
868
                                  help="Disk count specs: list of key=value,"
869
                                  " where key is one of min, max, std")
870

    
871
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
872
                                 type="keyval", default={},
873
                                 help="Disk size specs: list of key=value,"
874
                                 " where key is one of min, max, std"
875
                                 " (in MB or using a unit)")
876

    
877
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
878
                                 type="keyval", default={},
879
                                 help="NIC count specs: list of key=value,"
880
                                 " where key is one of min, max, std")
881

    
882
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
883
                                    dest="ipolicy_disk_templates",
884
                                    type="list", default=None,
885
                                    help="Comma-separated list of"
886
                                    " enabled disk templates")
887

    
888
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
889
                                 dest="ipolicy_vcpu_ratio",
890
                                 type="maybefloat", default=None,
891
                                 help="The maximum allowed vcpu-to-cpu ratio")
892

    
893
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
894
                                   dest="ipolicy_spindle_ratio",
895
                                   type="maybefloat", default=None,
896
                                   help=("The maximum allowed instances to"
897
                                         " spindle ratio"))
898

    
899
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
900
                            help="Hypervisor and hypervisor options, in the"
901
                            " format hypervisor:option=value,option=value,...",
902
                            default=None, type="identkeyval")
903

    
904
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
905
                        help="Hypervisor and hypervisor options, in the"
906
                        " format hypervisor:option=value,option=value,...",
907
                        default=[], action="append", type="identkeyval")
908

    
909
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
910
                           action="store_false",
911
                           help="Don't check that the instance's IP"
912
                           " is alive")
913

    
914
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
915
                             default=True, action="store_false",
916
                             help="Don't check that the instance's name"
917
                             " is resolvable")
918

    
919
NET_OPT = cli_option("--net",
920
                     help="NIC parameters", default=[],
921
                     dest="nics", action="append", type="identkeyval")
922

    
923
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
924
                      dest="disks", action="append", type="identkeyval")
925

    
926
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
927
                         help="Comma-separated list of disks"
928
                         " indices to act on (e.g. 0,2) (optional,"
929
                         " defaults to all disks)")
930

    
931
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
932
                         help="Enforces a single-disk configuration using the"
933
                         " given disk size, in MiB unless a suffix is used",
934
                         default=None, type="unit", metavar="<size>")
935

    
936
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
937
                                dest="ignore_consistency",
938
                                action="store_true", default=False,
939
                                help="Ignore the consistency of the disks on"
940
                                " the secondary")
941

    
942
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
943
                                dest="allow_failover",
944
                                action="store_true", default=False,
945
                                help="If migration is not possible fallback to"
946
                                     " failover")
947

    
948
NONLIVE_OPT = cli_option("--non-live", dest="live",
949
                         default=True, action="store_false",
950
                         help="Do a non-live migration (this usually means"
951
                         " freeze the instance, save the state, transfer and"
952
                         " only then resume running on the secondary node)")
953

    
954
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
955
                                default=None,
956
                                choices=list(constants.HT_MIGRATION_MODES),
957
                                help="Override default migration mode (choose"
958
                                " either live or non-live")
959

    
960
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
961
                                help="Target node and optional secondary node",
962
                                metavar="<pnode>[:<snode>]",
963
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
964

    
965
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
966
                           action="append", metavar="<node>",
967
                           help="Use only this node (can be used multiple"
968
                           " times, if not given defaults to all nodes)",
969
                           completion_suggest=OPT_COMPL_ONE_NODE)
970

    
971
NODEGROUP_OPT_NAME = "--node-group"
972
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
973
                           dest="nodegroup",
974
                           help="Node group (name or uuid)",
975
                           metavar="<nodegroup>",
976
                           default=None, type="string",
977
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
978

    
979
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
980
                             metavar="<node>",
981
                             completion_suggest=OPT_COMPL_ONE_NODE)
982

    
983
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
984
                         action="store_false",
985
                         help="Don't start the instance after creation")
986

    
987
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
988
                         action="store_true", default=False,
989
                         help="Show command instead of executing it")
990

    
991
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
992
                         default=False, action="store_true",
993
                         help="Instead of performing the migration, try to"
994
                         " recover from a failed cleanup. This is safe"
995
                         " to run even if the instance is healthy, but it"
996
                         " will create extra replication traffic and "
997
                         " disrupt briefly the replication (like during the"
998
                         " migration")
999

    
1000
STATIC_OPT = cli_option("-s", "--static", dest="static",
1001
                        action="store_true", default=False,
1002
                        help="Only show configuration data, not runtime data")
1003

    
1004
ALL_OPT = cli_option("--all", dest="show_all",
1005
                     default=False, action="store_true",
1006
                     help="Show info on all instances on the cluster."
1007
                     " This can take a long time to run, use wisely")
1008

    
1009
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1010
                           action="store_true", default=False,
1011
                           help="Interactive OS reinstall, lists available"
1012
                           " OS templates for selection")
1013

    
1014
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1015
                                 action="store_true", default=False,
1016
                                 help="Remove the instance from the cluster"
1017
                                 " configuration even if there are failures"
1018
                                 " during the removal process")
1019

    
1020
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1021
                                        dest="ignore_remove_failures",
1022
                                        action="store_true", default=False,
1023
                                        help="Remove the instance from the"
1024
                                        " cluster configuration even if there"
1025
                                        " are failures during the removal"
1026
                                        " process")
1027

    
1028
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1029
                                 action="store_true", default=False,
1030
                                 help="Remove the instance from the cluster")
1031

    
1032
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1033
                               help="Specifies the new node for the instance",
1034
                               metavar="NODE", default=None,
1035
                               completion_suggest=OPT_COMPL_ONE_NODE)
1036

    
1037
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1038
                               help="Specifies the new secondary node",
1039
                               metavar="NODE", default=None,
1040
                               completion_suggest=OPT_COMPL_ONE_NODE)
1041

    
1042
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1043
                            default=False, action="store_true",
1044
                            help="Replace the disk(s) on the primary"
1045
                                 " node (applies only to internally mirrored"
1046
                                 " disk templates, e.g. %s)" %
1047
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1048

    
1049
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1050
                              default=False, action="store_true",
1051
                              help="Replace the disk(s) on the secondary"
1052
                                   " node (applies only to internally mirrored"
1053
                                   " disk templates, e.g. %s)" %
1054
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1055

    
1056
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1057
                              default=False, action="store_true",
1058
                              help="Lock all nodes and auto-promote as needed"
1059
                              " to MC status")
1060

    
1061
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1062
                              default=False, action="store_true",
1063
                              help="Automatically replace faulty disks"
1064
                                   " (applies only to internally mirrored"
1065
                                   " disk templates, e.g. %s)" %
1066
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1067

    
1068
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1069
                             default=False, action="store_true",
1070
                             help="Ignore current recorded size"
1071
                             " (useful for forcing activation when"
1072
                             " the recorded size is wrong)")
1073

    
1074
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1075
                          metavar="<node>",
1076
                          completion_suggest=OPT_COMPL_ONE_NODE)
1077

    
1078
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1079
                         metavar="<dir>")
1080

    
1081
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1082
                              help="Specify the secondary ip for the node",
1083
                              metavar="ADDRESS", default=None)
1084

    
1085
READD_OPT = cli_option("--readd", dest="readd",
1086
                       default=False, action="store_true",
1087
                       help="Readd old node after replacing it")
1088

    
1089
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1090
                                default=True, action="store_false",
1091
                                help="Disable SSH key fingerprint checking")
1092

    
1093
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1094
                                 default=False, action="store_true",
1095
                                 help="Force the joining of a node")
1096

    
1097
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1098
                    type="bool", default=None, metavar=_YORNO,
1099
                    help="Set the master_candidate flag on the node")
1100

    
1101
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1102
                         type="bool", default=None,
1103
                         help=("Set the offline flag on the node"
1104
                               " (cluster does not communicate with offline"
1105
                               " nodes)"))
1106

    
1107
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1108
                         type="bool", default=None,
1109
                         help=("Set the drained flag on the node"
1110
                               " (excluded from allocation operations)"))
1111

    
1112
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1113
                              type="bool", default=None, metavar=_YORNO,
1114
                              help="Set the master_capable flag on the node")
1115

    
1116
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1117
                          type="bool", default=None, metavar=_YORNO,
1118
                          help="Set the vm_capable flag on the node")
1119

    
1120
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1121
                             type="bool", default=None, metavar=_YORNO,
1122
                             help="Set the allocatable flag on a volume")
1123

    
1124
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1125
                               help="Disable support for lvm based instances"
1126
                               " (cluster-wide)",
1127
                               action="store_false", default=True)
1128

    
1129
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1130
                            dest="enabled_hypervisors",
1131
                            help="Comma-separated list of hypervisors",
1132
                            type="string", default=None)
1133

    
1134
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1135
                            type="keyval", default={},
1136
                            help="NIC parameters")
1137

    
1138
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1139
                         dest="candidate_pool_size", type="int",
1140
                         help="Set the candidate pool size")
1141

    
1142
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1143
                         help=("Enables LVM and specifies the volume group"
1144
                               " name (cluster-wide) for disk allocation"
1145
                               " [%s]" % constants.DEFAULT_VG),
1146
                         metavar="VG", default=None)
1147

    
1148
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1149
                          help="Destroy cluster", action="store_true")
1150

    
1151
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1152
                          help="Skip node agreement check (dangerous)",
1153
                          action="store_true", default=False)
1154

    
1155
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1156
                            help="Specify the mac prefix for the instance IP"
1157
                            " addresses, in the format XX:XX:XX",
1158
                            metavar="PREFIX",
1159
                            default=None)
1160

    
1161
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1162
                               help="Specify the node interface (cluster-wide)"
1163
                               " on which the master IP address will be added"
1164
                               " (cluster init default: %s)" %
1165
                               constants.DEFAULT_BRIDGE,
1166
                               metavar="NETDEV",
1167
                               default=None)
1168

    
1169
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1170
                                help="Specify the netmask of the master IP",
1171
                                metavar="NETMASK",
1172
                                default=None)
1173

    
1174
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1175
                                     dest="use_external_mip_script",
1176
                                     help="Specify whether to run a"
1177
                                     " user-provided script for the master"
1178
                                     " IP address turnup and"
1179
                                     " turndown operations",
1180
                                     type="bool", metavar=_YORNO, default=None)
1181

    
1182
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1183
                                help="Specify the default directory (cluster-"
1184
                                "wide) for storing the file-based disks [%s]" %
1185
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1186
                                metavar="DIR",
1187
                                default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1188

    
1189
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1190
  "--shared-file-storage-dir",
1191
  dest="shared_file_storage_dir",
1192
  help="Specify the default directory (cluster-wide) for storing the"
1193
  " shared file-based disks [%s]" %
1194
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1195
  metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1196

    
1197
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1198
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1199
                                   action="store_false", default=True)
1200

    
1201
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1202
                                    help="Don't initialize SSH keys",
1203
                                    action="store_false", default=True)
1204

    
1205
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1206
                             help="Enable parseable error messages",
1207
                             action="store_true", default=False)
1208

    
1209
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1210
                          help="Skip N+1 memory redundancy tests",
1211
                          action="store_true", default=False)
1212

    
1213
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1214
                             help="Type of reboot: soft/hard/full",
1215
                             default=constants.INSTANCE_REBOOT_HARD,
1216
                             metavar="<REBOOT>",
1217
                             choices=list(constants.REBOOT_TYPES))
1218

    
1219
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1220
                                    dest="ignore_secondaries",
1221
                                    default=False, action="store_true",
1222
                                    help="Ignore errors from secondaries")
1223

    
1224
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1225
                            action="store_false", default=True,
1226
                            help="Don't shutdown the instance (unsafe)")
1227

    
1228
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1229
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1230
                         help="Maximum time to wait")
1231

    
1232
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1233
                                  dest="shutdown_timeout", type="int",
1234
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1235
                                  help="Maximum time to wait for instance"
1236
                                  " shutdown")
1237

    
1238
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1239
                          default=None,
1240
                          help=("Number of seconds between repetions of the"
1241
                                " command"))
1242

    
1243
EARLY_RELEASE_OPT = cli_option("--early-release",
1244
                               dest="early_release", default=False,
1245
                               action="store_true",
1246
                               help="Release the locks on the secondary"
1247
                               " node(s) early")
1248

    
1249
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1250
                                  dest="new_cluster_cert",
1251
                                  default=False, action="store_true",
1252
                                  help="Generate a new cluster certificate")
1253

    
1254
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1255
                           default=None,
1256
                           help="File containing new RAPI certificate")
1257

    
1258
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1259
                               default=None, action="store_true",
1260
                               help=("Generate a new self-signed RAPI"
1261
                                     " certificate"))
1262

    
1263
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1264
                            default=None,
1265
                            help="File containing new SPICE certificate")
1266

    
1267
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1268
                              default=None,
1269
                              help="File containing the certificate of the CA"
1270
                              " which signed the SPICE certificate")
1271

    
1272
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1273
                                dest="new_spice_cert", default=None,
1274
                                action="store_true",
1275
                                help=("Generate a new self-signed SPICE"
1276
                                      " certificate"))
1277

    
1278
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1279
                                    dest="new_confd_hmac_key",
1280
                                    default=False, action="store_true",
1281
                                    help=("Create a new HMAC key for %s" %
1282
                                          constants.CONFD))
1283

    
1284
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1285
                                       dest="cluster_domain_secret",
1286
                                       default=None,
1287
                                       help=("Load new new cluster domain"
1288
                                             " secret from file"))
1289

    
1290
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1291
                                           dest="new_cluster_domain_secret",
1292
                                           default=False, action="store_true",
1293
                                           help=("Create a new cluster domain"
1294
                                                 " secret"))
1295

    
1296
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1297
                              dest="use_replication_network",
1298
                              help="Whether to use the replication network"
1299
                              " for talking to the nodes",
1300
                              action="store_true", default=False)
1301

    
1302
MAINTAIN_NODE_HEALTH_OPT = \
1303
    cli_option("--maintain-node-health", dest="maintain_node_health",
1304
               metavar=_YORNO, default=None, type="bool",
1305
               help="Configure the cluster to automatically maintain node"
1306
               " health, by shutting down unknown instances, shutting down"
1307
               " unknown DRBD devices, etc.")
1308

    
1309
IDENTIFY_DEFAULTS_OPT = \
1310
    cli_option("--identify-defaults", dest="identify_defaults",
1311
               default=False, action="store_true",
1312
               help="Identify which saved instance parameters are equal to"
1313
               " the current cluster defaults and set them as such, instead"
1314
               " of marking them as overridden")
1315

    
1316
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1317
                         action="store", dest="uid_pool",
1318
                         help=("A list of user-ids or user-id"
1319
                               " ranges separated by commas"))
1320

    
1321
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1322
                          action="store", dest="add_uids",
1323
                          help=("A list of user-ids or user-id"
1324
                                " ranges separated by commas, to be"
1325
                                " added to the user-id pool"))
1326

    
1327
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1328
                             action="store", dest="remove_uids",
1329
                             help=("A list of user-ids or user-id"
1330
                                   " ranges separated by commas, to be"
1331
                                   " removed from the user-id pool"))
1332

    
1333
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1334
                              action="store", dest="reserved_lvs",
1335
                              help=("A comma-separated list of reserved"
1336
                                    " logical volumes names, that will be"
1337
                                    " ignored by cluster verify"))
1338

    
1339
ROMAN_OPT = cli_option("--roman",
1340
                       dest="roman_integers", default=False,
1341
                       action="store_true",
1342
                       help="Use roman numbers for positive integers")
1343

    
1344
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1345
                             action="store", default=None,
1346
                             help="Specifies usermode helper for DRBD")
1347

    
1348
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1349
                                action="store_false", default=True,
1350
                                help="Disable support for DRBD")
1351

    
1352
PRIMARY_IP_VERSION_OPT = \
1353
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1354
               action="store", dest="primary_ip_version",
1355
               metavar="%d|%d" % (constants.IP4_VERSION,
1356
                                  constants.IP6_VERSION),
1357
               help="Cluster-wide IP version for primary IP")
1358

    
1359
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1360
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1361
                          choices=_PRIONAME_TO_VALUE.keys(),
1362
                          help="Priority for opcode processing")
1363

    
1364
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1365
                        type="bool", default=None, metavar=_YORNO,
1366
                        help="Sets the hidden flag on the OS")
1367

    
1368
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1369
                        type="bool", default=None, metavar=_YORNO,
1370
                        help="Sets the blacklisted flag on the OS")
1371

    
1372
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1373
                                     type="bool", metavar=_YORNO,
1374
                                     dest="prealloc_wipe_disks",
1375
                                     help=("Wipe disks prior to instance"
1376
                                           " creation"))
1377

    
1378
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1379
                             type="keyval", default=None,
1380
                             help="Node parameters")
1381

    
1382
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1383
                              action="store", metavar="POLICY", default=None,
1384
                              help="Allocation policy for the node group")
1385

    
1386
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1387
                              type="bool", metavar=_YORNO,
1388
                              dest="node_powered",
1389
                              help="Specify if the SoR for node is powered")
1390

    
1391
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1392
                             default=constants.OOB_TIMEOUT,
1393
                             help="Maximum time to wait for out-of-band helper")
1394

    
1395
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1396
                             default=constants.OOB_POWER_DELAY,
1397
                             help="Time in seconds to wait between power-ons")
1398

    
1399
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1400
                              action="store_true", default=False,
1401
                              help=("Whether command argument should be treated"
1402
                                    " as filter"))
1403

    
1404
NO_REMEMBER_OPT = cli_option("--no-remember",
1405
                             dest="no_remember",
1406
                             action="store_true", default=False,
1407
                             help="Perform but do not record the change"
1408
                             " in the configuration")
1409

    
1410
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1411
                              default=False, action="store_true",
1412
                              help="Evacuate primary instances only")
1413

    
1414
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1415
                                default=False, action="store_true",
1416
                                help="Evacuate secondary instances only"
1417
                                     " (applies only to internally mirrored"
1418
                                     " disk templates, e.g. %s)" %
1419
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1420

    
1421
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1422
                                action="store_true", default=False,
1423
                                help="Pause instance at startup")
1424

    
1425
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1426
                          help="Destination node group (name or uuid)",
1427
                          default=None, action="append",
1428
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1429

    
1430
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1431
                               action="append", dest="ignore_errors",
1432
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1433
                               help="Error code to be ignored")
1434

    
1435
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1436
                            action="append",
1437
                            help=("Specify disk state information in the"
1438
                                  " format"
1439
                                  " storage_type/identifier:option=value,...;"
1440
                                  " note this is unused for now"),
1441
                            type="identkeyval")
1442

    
1443
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1444
                          action="append",
1445
                          help=("Specify hypervisor state information in the"
1446
                                " format hypervisor:option=value,...;"
1447
                                " note this is unused for now"),
1448
                          type="identkeyval")
1449

    
1450
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1451
                                action="store_true", default=False,
1452
                                help="Ignore instance policy violations")
1453

    
1454
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1455
                             help="Sets the instance's runtime memory,"
1456
                             " ballooning it up or down to the new value",
1457
                             default=None, type="unit", metavar="<size>")
1458

    
1459
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1460
                          action="store_true", default=False,
1461
                          help="Marks the grow as absolute instead of the"
1462
                          " (default) relative mode")
1463

    
1464
NETWORK_OPT = cli_option("--network",
1465
                         action="store", default=None, dest="network",
1466
                         help="IP network in CIDR notation")
1467

    
1468
GATEWAY_OPT = cli_option("--gateway",
1469
                         action="store", default=None, dest="gateway",
1470
                         help="IP address of the router (gateway)")
1471

    
1472
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1473
                                  action="store", default=None,
1474
                                  dest="add_reserved_ips",
1475
                                  help="Comma-separated list of"
1476
                                  " reserved IPs to add")
1477

    
1478
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1479
                                     action="store", default=None,
1480
                                     dest="remove_reserved_ips",
1481
                                     help="Comma-delimited list of"
1482
                                     " reserved IPs to remove")
1483

    
1484
NETWORK_TYPE_OPT = cli_option("--network-type",
1485
                              action="store", default=None, dest="network_type",
1486
                              help="Network type: private, public, None")
1487

    
1488
NETWORK6_OPT = cli_option("--network6",
1489
                          action="store", default=None, dest="network6",
1490
                          help="IP network in CIDR notation")
1491

    
1492
GATEWAY6_OPT = cli_option("--gateway6",
1493
                          action="store", default=None, dest="gateway6",
1494
                          help="IP6 address of the router (gateway)")
1495

    
1496
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1497
                                  dest="conflicts_check",
1498
                                  default=True,
1499
                                  action="store_false",
1500
                                  help="Don't check for conflicting IPs")
1501

    
1502
#: Options provided by all commands
1503
COMMON_OPTS = [DEBUG_OPT]
1504

    
1505
# common options for creating instances. add and import then add their own
1506
# specific ones.
1507
COMMON_CREATE_OPTS = [
1508
  BACKEND_OPT,
1509
  DISK_OPT,
1510
  DISK_TEMPLATE_OPT,
1511
  FILESTORE_DIR_OPT,
1512
  FILESTORE_DRIVER_OPT,
1513
  HYPERVISOR_OPT,
1514
  IALLOCATOR_OPT,
1515
  NET_OPT,
1516
  NODE_PLACEMENT_OPT,
1517
  NOIPCHECK_OPT,
1518
  NOCONFLICTSCHECK_OPT,
1519
  NONAMECHECK_OPT,
1520
  NONICS_OPT,
1521
  NWSYNC_OPT,
1522
  OSPARAMS_OPT,
1523
  OS_SIZE_OPT,
1524
  SUBMIT_OPT,
1525
  TAG_ADD_OPT,
1526
  DRY_RUN_OPT,
1527
  PRIORITY_OPT,
1528
  ]
1529

    
1530
# common instance policy options
1531
INSTANCE_POLICY_OPTS = [
1532
  SPECS_CPU_COUNT_OPT,
1533
  SPECS_DISK_COUNT_OPT,
1534
  SPECS_DISK_SIZE_OPT,
1535
  SPECS_MEM_SIZE_OPT,
1536
  SPECS_NIC_COUNT_OPT,
1537
  IPOLICY_DISK_TEMPLATES,
1538
  IPOLICY_VCPU_RATIO,
1539
  IPOLICY_SPINDLE_RATIO,
1540
  ]
1541

    
1542

    
1543
class _ShowUsage(Exception):
1544
  """Exception class for L{_ParseArgs}.
1545

1546
  """
1547
  def __init__(self, exit_error):
1548
    """Initializes instances of this class.
1549

1550
    @type exit_error: bool
1551
    @param exit_error: Whether to report failure on exit
1552

1553
    """
1554
    Exception.__init__(self)
1555
    self.exit_error = exit_error
1556

    
1557

    
1558
class _ShowVersion(Exception):
1559
  """Exception class for L{_ParseArgs}.
1560

1561
  """
1562

    
1563

    
1564
def _ParseArgs(binary, argv, commands, aliases, env_override):
1565
  """Parser for the command line arguments.
1566

1567
  This function parses the arguments and returns the function which
1568
  must be executed together with its (modified) arguments.
1569

1570
  @param binary: Script name
1571
  @param argv: Command line arguments
1572
  @param commands: Dictionary containing command definitions
1573
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1574
  @param env_override: list of env variables allowed for default args
1575
  @raise _ShowUsage: If usage description should be shown
1576
  @raise _ShowVersion: If version should be shown
1577

1578
  """
1579
  assert not (env_override - set(commands))
1580
  assert not (set(aliases.keys()) & set(commands.keys()))
1581

    
1582
  if len(argv) > 1:
1583
    cmd = argv[1]
1584
  else:
1585
    # No option or command given
1586
    raise _ShowUsage(exit_error=True)
1587

    
1588
  if cmd == "--version":
1589
    raise _ShowVersion()
1590
  elif cmd == "--help":
1591
    raise _ShowUsage(exit_error=False)
1592
  elif not (cmd in commands or cmd in aliases):
1593
    raise _ShowUsage(exit_error=True)
1594

    
1595
  # get command, unalias it, and look it up in commands
1596
  if cmd in aliases:
1597
    if aliases[cmd] not in commands:
1598
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1599
                                   " command '%s'" % (cmd, aliases[cmd]))
1600

    
1601
    cmd = aliases[cmd]
1602

    
1603
  if cmd in env_override:
1604
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1605
    env_args = os.environ.get(args_env_name)
1606
    if env_args:
1607
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1608

    
1609
  func, args_def, parser_opts, usage, description = commands[cmd]
1610
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1611
                        description=description,
1612
                        formatter=TitledHelpFormatter(),
1613
                        usage="%%prog %s %s" % (cmd, usage))
1614
  parser.disable_interspersed_args()
1615
  options, args = parser.parse_args(args=argv[2:])
1616

    
1617
  if not _CheckArguments(cmd, args_def, args):
1618
    return None, None, None
1619

    
1620
  return func, options, args
1621

    
1622

    
1623
def _FormatUsage(binary, commands):
1624
  """Generates a nice description of all commands.
1625

1626
  @param binary: Script name
1627
  @param commands: Dictionary containing command definitions
1628

1629
  """
1630
  # compute the max line length for cmd + usage
1631
  mlen = min(60, max(map(len, commands)))
1632

    
1633
  yield "Usage: %s {command} [options...] [argument...]" % binary
1634
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1635
  yield ""
1636
  yield "Commands:"
1637

    
1638
  # and format a nice command list
1639
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1640
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1641
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1642
    for line in help_lines:
1643
      yield " %-*s   %s" % (mlen, "", line)
1644

    
1645
  yield ""
1646

    
1647

    
1648
def _CheckArguments(cmd, args_def, args):
1649
  """Verifies the arguments using the argument definition.
1650

1651
  Algorithm:
1652

1653
    1. Abort with error if values specified by user but none expected.
1654

1655
    1. For each argument in definition
1656

1657
      1. Keep running count of minimum number of values (min_count)
1658
      1. Keep running count of maximum number of values (max_count)
1659
      1. If it has an unlimited number of values
1660

1661
        1. Abort with error if it's not the last argument in the definition
1662

1663
    1. If last argument has limited number of values
1664

1665
      1. Abort with error if number of values doesn't match or is too large
1666

1667
    1. Abort with error if user didn't pass enough values (min_count)
1668

1669
  """
1670
  if args and not args_def:
1671
    ToStderr("Error: Command %s expects no arguments", cmd)
1672
    return False
1673

    
1674
  min_count = None
1675
  max_count = None
1676
  check_max = None
1677

    
1678
  last_idx = len(args_def) - 1
1679

    
1680
  for idx, arg in enumerate(args_def):
1681
    if min_count is None:
1682
      min_count = arg.min
1683
    elif arg.min is not None:
1684
      min_count += arg.min
1685

    
1686
    if max_count is None:
1687
      max_count = arg.max
1688
    elif arg.max is not None:
1689
      max_count += arg.max
1690

    
1691
    if idx == last_idx:
1692
      check_max = (arg.max is not None)
1693

    
1694
    elif arg.max is None:
1695
      raise errors.ProgrammerError("Only the last argument can have max=None")
1696

    
1697
  if check_max:
1698
    # Command with exact number of arguments
1699
    if (min_count is not None and max_count is not None and
1700
        min_count == max_count and len(args) != min_count):
1701
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1702
      return False
1703

    
1704
    # Command with limited number of arguments
1705
    if max_count is not None and len(args) > max_count:
1706
      ToStderr("Error: Command %s expects only %d argument(s)",
1707
               cmd, max_count)
1708
      return False
1709

    
1710
  # Command with some required arguments
1711
  if min_count is not None and len(args) < min_count:
1712
    ToStderr("Error: Command %s expects at least %d argument(s)",
1713
             cmd, min_count)
1714
    return False
1715

    
1716
  return True
1717

    
1718

    
1719
def SplitNodeOption(value):
1720
  """Splits the value of a --node option.
1721

1722
  """
1723
  if value and ":" in value:
1724
    return value.split(":", 1)
1725
  else:
1726
    return (value, None)
1727

    
1728

    
1729
def CalculateOSNames(os_name, os_variants):
1730
  """Calculates all the names an OS can be called, according to its variants.
1731

1732
  @type os_name: string
1733
  @param os_name: base name of the os
1734
  @type os_variants: list or None
1735
  @param os_variants: list of supported variants
1736
  @rtype: list
1737
  @return: list of valid names
1738

1739
  """
1740
  if os_variants:
1741
    return ["%s+%s" % (os_name, v) for v in os_variants]
1742
  else:
1743
    return [os_name]
1744

    
1745

    
1746
def ParseFields(selected, default):
1747
  """Parses the values of "--field"-like options.
1748

1749
  @type selected: string or None
1750
  @param selected: User-selected options
1751
  @type default: list
1752
  @param default: Default fields
1753

1754
  """
1755
  if selected is None:
1756
    return default
1757

    
1758
  if selected.startswith("+"):
1759
    return default + selected[1:].split(",")
1760

    
1761
  return selected.split(",")
1762

    
1763

    
1764
UsesRPC = rpc.RunWithRPC
1765

    
1766

    
1767
def AskUser(text, choices=None):
1768
  """Ask the user a question.
1769

1770
  @param text: the question to ask
1771

1772
  @param choices: list with elements tuples (input_char, return_value,
1773
      description); if not given, it will default to: [('y', True,
1774
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1775
      note that the '?' char is reserved for help
1776

1777
  @return: one of the return values from the choices list; if input is
1778
      not possible (i.e. not running with a tty, we return the last
1779
      entry from the list
1780

1781
  """
1782
  if choices is None:
1783
    choices = [("y", True, "Perform the operation"),
1784
               ("n", False, "Do not perform the operation")]
1785
  if not choices or not isinstance(choices, list):
1786
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1787
  for entry in choices:
1788
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1789
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1790

    
1791
  answer = choices[-1][1]
1792
  new_text = []
1793
  for line in text.splitlines():
1794
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1795
  text = "\n".join(new_text)
1796
  try:
1797
    f = file("/dev/tty", "a+")
1798
  except IOError:
1799
    return answer
1800
  try:
1801
    chars = [entry[0] for entry in choices]
1802
    chars[-1] = "[%s]" % chars[-1]
1803
    chars.append("?")
1804
    maps = dict([(entry[0], entry[1]) for entry in choices])
1805
    while True:
1806
      f.write(text)
1807
      f.write("\n")
1808
      f.write("/".join(chars))
1809
      f.write(": ")
1810
      line = f.readline(2).strip().lower()
1811
      if line in maps:
1812
        answer = maps[line]
1813
        break
1814
      elif line == "?":
1815
        for entry in choices:
1816
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1817
        f.write("\n")
1818
        continue
1819
  finally:
1820
    f.close()
1821
  return answer
1822

    
1823

    
1824
class JobSubmittedException(Exception):
1825
  """Job was submitted, client should exit.
1826

1827
  This exception has one argument, the ID of the job that was
1828
  submitted. The handler should print this ID.
1829

1830
  This is not an error, just a structured way to exit from clients.
1831

1832
  """
1833

    
1834

    
1835
def SendJob(ops, cl=None):
1836
  """Function to submit an opcode without waiting for the results.
1837

1838
  @type ops: list
1839
  @param ops: list of opcodes
1840
  @type cl: luxi.Client
1841
  @param cl: the luxi client to use for communicating with the master;
1842
             if None, a new client will be created
1843

1844
  """
1845
  if cl is None:
1846
    cl = GetClient()
1847

    
1848
  job_id = cl.SubmitJob(ops)
1849

    
1850
  return job_id
1851

    
1852

    
1853
def GenericPollJob(job_id, cbs, report_cbs):
1854
  """Generic job-polling function.
1855

1856
  @type job_id: number
1857
  @param job_id: Job ID
1858
  @type cbs: Instance of L{JobPollCbBase}
1859
  @param cbs: Data callbacks
1860
  @type report_cbs: Instance of L{JobPollReportCbBase}
1861
  @param report_cbs: Reporting callbacks
1862

1863
  """
1864
  prev_job_info = None
1865
  prev_logmsg_serial = None
1866

    
1867
  status = None
1868

    
1869
  while True:
1870
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1871
                                      prev_logmsg_serial)
1872
    if not result:
1873
      # job not found, go away!
1874
      raise errors.JobLost("Job with id %s lost" % job_id)
1875

    
1876
    if result == constants.JOB_NOTCHANGED:
1877
      report_cbs.ReportNotChanged(job_id, status)
1878

    
1879
      # Wait again
1880
      continue
1881

    
1882
    # Split result, a tuple of (field values, log entries)
1883
    (job_info, log_entries) = result
1884
    (status, ) = job_info
1885

    
1886
    if log_entries:
1887
      for log_entry in log_entries:
1888
        (serial, timestamp, log_type, message) = log_entry
1889
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1890
                                    log_type, message)
1891
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1892

    
1893
    # TODO: Handle canceled and archived jobs
1894
    elif status in (constants.JOB_STATUS_SUCCESS,
1895
                    constants.JOB_STATUS_ERROR,
1896
                    constants.JOB_STATUS_CANCELING,
1897
                    constants.JOB_STATUS_CANCELED):
1898
      break
1899

    
1900
    prev_job_info = job_info
1901

    
1902
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1903
  if not jobs:
1904
    raise errors.JobLost("Job with id %s lost" % job_id)
1905

    
1906
  status, opstatus, result = jobs[0]
1907

    
1908
  if status == constants.JOB_STATUS_SUCCESS:
1909
    return result
1910

    
1911
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1912
    raise errors.OpExecError("Job was canceled")
1913

    
1914
  has_ok = False
1915
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1916
    if status == constants.OP_STATUS_SUCCESS:
1917
      has_ok = True
1918
    elif status == constants.OP_STATUS_ERROR:
1919
      errors.MaybeRaise(msg)
1920

    
1921
      if has_ok:
1922
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1923
                                 (idx, msg))
1924

    
1925
      raise errors.OpExecError(str(msg))
1926

    
1927
  # default failure mode
1928
  raise errors.OpExecError(result)
1929

    
1930

    
1931
class JobPollCbBase:
1932
  """Base class for L{GenericPollJob} callbacks.
1933

1934
  """
1935
  def __init__(self):
1936
    """Initializes this class.
1937

1938
    """
1939

    
1940
  def WaitForJobChangeOnce(self, job_id, fields,
1941
                           prev_job_info, prev_log_serial):
1942
    """Waits for changes on a job.
1943

1944
    """
1945
    raise NotImplementedError()
1946

    
1947
  def QueryJobs(self, job_ids, fields):
1948
    """Returns the selected fields for the selected job IDs.
1949

1950
    @type job_ids: list of numbers
1951
    @param job_ids: Job IDs
1952
    @type fields: list of strings
1953
    @param fields: Fields
1954

1955
    """
1956
    raise NotImplementedError()
1957

    
1958

    
1959
class JobPollReportCbBase:
1960
  """Base class for L{GenericPollJob} reporting callbacks.
1961

1962
  """
1963
  def __init__(self):
1964
    """Initializes this class.
1965

1966
    """
1967

    
1968
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1969
    """Handles a log message.
1970

1971
    """
1972
    raise NotImplementedError()
1973

    
1974
  def ReportNotChanged(self, job_id, status):
1975
    """Called for if a job hasn't changed in a while.
1976

1977
    @type job_id: number
1978
    @param job_id: Job ID
1979
    @type status: string or None
1980
    @param status: Job status if available
1981

1982
    """
1983
    raise NotImplementedError()
1984

    
1985

    
1986
class _LuxiJobPollCb(JobPollCbBase):
1987
  def __init__(self, cl):
1988
    """Initializes this class.
1989

1990
    """
1991
    JobPollCbBase.__init__(self)
1992
    self.cl = cl
1993

    
1994
  def WaitForJobChangeOnce(self, job_id, fields,
1995
                           prev_job_info, prev_log_serial):
1996
    """Waits for changes on a job.
1997

1998
    """
1999
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2000
                                        prev_job_info, prev_log_serial)
2001

    
2002
  def QueryJobs(self, job_ids, fields):
2003
    """Returns the selected fields for the selected job IDs.
2004

2005
    """
2006
    return self.cl.QueryJobs(job_ids, fields)
2007

    
2008

    
2009
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2010
  def __init__(self, feedback_fn):
2011
    """Initializes this class.
2012

2013
    """
2014
    JobPollReportCbBase.__init__(self)
2015

    
2016
    self.feedback_fn = feedback_fn
2017

    
2018
    assert callable(feedback_fn)
2019

    
2020
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2021
    """Handles a log message.
2022

2023
    """
2024
    self.feedback_fn((timestamp, log_type, log_msg))
2025

    
2026
  def ReportNotChanged(self, job_id, status):
2027
    """Called if a job hasn't changed in a while.
2028

2029
    """
2030
    # Ignore
2031

    
2032

    
2033
class StdioJobPollReportCb(JobPollReportCbBase):
2034
  def __init__(self):
2035
    """Initializes this class.
2036

2037
    """
2038
    JobPollReportCbBase.__init__(self)
2039

    
2040
    self.notified_queued = False
2041
    self.notified_waitlock = False
2042

    
2043
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2044
    """Handles a log message.
2045

2046
    """
2047
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2048
             FormatLogMessage(log_type, log_msg))
2049

    
2050
  def ReportNotChanged(self, job_id, status):
2051
    """Called if a job hasn't changed in a while.
2052

2053
    """
2054
    if status is None:
2055
      return
2056

    
2057
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2058
      ToStderr("Job %s is waiting in queue", job_id)
2059
      self.notified_queued = True
2060

    
2061
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2062
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2063
      self.notified_waitlock = True
2064

    
2065

    
2066
def FormatLogMessage(log_type, log_msg):
2067
  """Formats a job message according to its type.
2068

2069
  """
2070
  if log_type != constants.ELOG_MESSAGE:
2071
    log_msg = str(log_msg)
2072

    
2073
  return utils.SafeEncode(log_msg)
2074

    
2075

    
2076
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2077
  """Function to poll for the result of a job.
2078

2079
  @type job_id: job identified
2080
  @param job_id: the job to poll for results
2081
  @type cl: luxi.Client
2082
  @param cl: the luxi client to use for communicating with the master;
2083
             if None, a new client will be created
2084

2085
  """
2086
  if cl is None:
2087
    cl = GetClient()
2088

    
2089
  if reporter is None:
2090
    if feedback_fn:
2091
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2092
    else:
2093
      reporter = StdioJobPollReportCb()
2094
  elif feedback_fn:
2095
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2096

    
2097
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2098

    
2099

    
2100
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2101
  """Legacy function to submit an opcode.
2102

2103
  This is just a simple wrapper over the construction of the processor
2104
  instance. It should be extended to better handle feedback and
2105
  interaction functions.
2106

2107
  """
2108
  if cl is None:
2109
    cl = GetClient()
2110

    
2111
  SetGenericOpcodeOpts([op], opts)
2112

    
2113
  job_id = SendJob([op], cl=cl)
2114

    
2115
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2116
                       reporter=reporter)
2117

    
2118
  return op_results[0]
2119

    
2120

    
2121
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2122
  """Wrapper around SubmitOpCode or SendJob.
2123

2124
  This function will decide, based on the 'opts' parameter, whether to
2125
  submit and wait for the result of the opcode (and return it), or
2126
  whether to just send the job and print its identifier. It is used in
2127
  order to simplify the implementation of the '--submit' option.
2128

2129
  It will also process the opcodes if we're sending the via SendJob
2130
  (otherwise SubmitOpCode does it).
2131

2132
  """
2133
  if opts and opts.submit_only:
2134
    job = [op]
2135
    SetGenericOpcodeOpts(job, opts)
2136
    job_id = SendJob(job, cl=cl)
2137
    raise JobSubmittedException(job_id)
2138
  else:
2139
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2140

    
2141

    
2142
def SetGenericOpcodeOpts(opcode_list, options):
2143
  """Processor for generic options.
2144

2145
  This function updates the given opcodes based on generic command
2146
  line options (like debug, dry-run, etc.).
2147

2148
  @param opcode_list: list of opcodes
2149
  @param options: command line options or None
2150
  @return: None (in-place modification)
2151

2152
  """
2153
  if not options:
2154
    return
2155
  for op in opcode_list:
2156
    op.debug_level = options.debug
2157
    if hasattr(options, "dry_run"):
2158
      op.dry_run = options.dry_run
2159
    if getattr(options, "priority", None) is not None:
2160
      op.priority = _PRIONAME_TO_VALUE[options.priority]
2161

    
2162

    
2163
def GetClient(query=False):
2164
  """Connects to the a luxi socket and returns a client.
2165

2166
  @type query: boolean
2167
  @param query: this signifies that the client will only be
2168
      used for queries; if the build-time parameter
2169
      enable-split-queries is enabled, then the client will be
2170
      connected to the query socket instead of the masterd socket
2171

2172
  """
2173
  if query and constants.ENABLE_SPLIT_QUERY:
2174
    address = pathutils.QUERY_SOCKET
2175
  else:
2176
    address = None
2177
  # TODO: Cache object?
2178
  try:
2179
    client = luxi.Client(address=address)
2180
  except luxi.NoMasterError:
2181
    ss = ssconf.SimpleStore()
2182

    
2183
    # Try to read ssconf file
2184
    try:
2185
      ss.GetMasterNode()
2186
    except errors.ConfigurationError:
2187
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2188
                                 " not part of a cluster",
2189
                                 errors.ECODE_INVAL)
2190

    
2191
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2192
    if master != myself:
2193
      raise errors.OpPrereqError("This is not the master node, please connect"
2194
                                 " to node '%s' and rerun the command" %
2195
                                 master, errors.ECODE_INVAL)
2196
    raise
2197
  return client
2198

    
2199

    
2200
def FormatError(err):
2201
  """Return a formatted error message for a given error.
2202

2203
  This function takes an exception instance and returns a tuple
2204
  consisting of two values: first, the recommended exit code, and
2205
  second, a string describing the error message (not
2206
  newline-terminated).
2207

2208
  """
2209
  retcode = 1
2210
  obuf = StringIO()
2211
  msg = str(err)
2212
  if isinstance(err, errors.ConfigurationError):
2213
    txt = "Corrupt configuration file: %s" % msg
2214
    logging.error(txt)
2215
    obuf.write(txt + "\n")
2216
    obuf.write("Aborting.")
2217
    retcode = 2
2218
  elif isinstance(err, errors.HooksAbort):
2219
    obuf.write("Failure: hooks execution failed:\n")
2220
    for node, script, out in err.args[0]:
2221
      if out:
2222
        obuf.write("  node: %s, script: %s, output: %s\n" %
2223
                   (node, script, out))
2224
      else:
2225
        obuf.write("  node: %s, script: %s (no output)\n" %
2226
                   (node, script))
2227
  elif isinstance(err, errors.HooksFailure):
2228
    obuf.write("Failure: hooks general failure: %s" % msg)
2229
  elif isinstance(err, errors.ResolverError):
2230
    this_host = netutils.Hostname.GetSysName()
2231
    if err.args[0] == this_host:
2232
      msg = "Failure: can't resolve my own hostname ('%s')"
2233
    else:
2234
      msg = "Failure: can't resolve hostname '%s'"
2235
    obuf.write(msg % err.args[0])
2236
  elif isinstance(err, errors.OpPrereqError):
2237
    if len(err.args) == 2:
2238
      obuf.write("Failure: prerequisites not met for this"
2239
                 " operation:\nerror type: %s, error details:\n%s" %
2240
                 (err.args[1], err.args[0]))
2241
    else:
2242
      obuf.write("Failure: prerequisites not met for this"
2243
                 " operation:\n%s" % msg)
2244
  elif isinstance(err, errors.OpExecError):
2245
    obuf.write("Failure: command execution error:\n%s" % msg)
2246
  elif isinstance(err, errors.TagError):
2247
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2248
  elif isinstance(err, errors.JobQueueDrainError):
2249
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2250
               " accept new requests\n")
2251
  elif isinstance(err, errors.JobQueueFull):
2252
    obuf.write("Failure: the job queue is full and doesn't accept new"
2253
               " job submissions until old jobs are archived\n")
2254
  elif isinstance(err, errors.TypeEnforcementError):
2255
    obuf.write("Parameter Error: %s" % msg)
2256
  elif isinstance(err, errors.ParameterError):
2257
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2258
  elif isinstance(err, luxi.NoMasterError):
2259
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
2260
               " and listening for connections?")
2261
  elif isinstance(err, luxi.TimeoutError):
2262
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2263
               " been submitted and will continue to run even if the call"
2264
               " timed out. Useful commands in this situation are \"gnt-job"
2265
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2266
    obuf.write(msg)
2267
  elif isinstance(err, luxi.PermissionError):
2268
    obuf.write("It seems you don't have permissions to connect to the"
2269
               " master daemon.\nPlease retry as a different user.")
2270
  elif isinstance(err, luxi.ProtocolError):
2271
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2272
               "%s" % msg)
2273
  elif isinstance(err, errors.JobLost):
2274
    obuf.write("Error checking job status: %s" % msg)
2275
  elif isinstance(err, errors.QueryFilterParseError):
2276
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2277
    obuf.write("\n".join(err.GetDetails()))
2278
  elif isinstance(err, errors.GenericError):
2279
    obuf.write("Unhandled Ganeti error: %s" % msg)
2280
  elif isinstance(err, JobSubmittedException):
2281
    obuf.write("JobID: %s\n" % err.args[0])
2282
    retcode = 0
2283
  else:
2284
    obuf.write("Unhandled exception: %s" % msg)
2285
  return retcode, obuf.getvalue().rstrip("\n")
2286

    
2287

    
2288
def GenericMain(commands, override=None, aliases=None,
2289
                env_override=frozenset()):
2290
  """Generic main function for all the gnt-* commands.
2291

2292
  @param commands: a dictionary with a special structure, see the design doc
2293
                   for command line handling.
2294
  @param override: if not None, we expect a dictionary with keys that will
2295
                   override command line options; this can be used to pass
2296
                   options from the scripts to generic functions
2297
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2298
  @param env_override: list of environment names which are allowed to submit
2299
                       default args for commands
2300

2301
  """
2302
  # save the program name and the entire command line for later logging
2303
  if sys.argv:
2304
    binary = os.path.basename(sys.argv[0])
2305
    if not binary:
2306
      binary = sys.argv[0]
2307

    
2308
    if len(sys.argv) >= 2:
2309
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2310
    else:
2311
      logname = binary
2312

    
2313
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2314
  else:
2315
    binary = "<unknown program>"
2316
    cmdline = "<unknown>"
2317

    
2318
  if aliases is None:
2319
    aliases = {}
2320

    
2321
  try:
2322
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2323
                                       env_override)
2324
  except _ShowVersion:
2325
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2326
             constants.RELEASE_VERSION)
2327
    return constants.EXIT_SUCCESS
2328
  except _ShowUsage, err:
2329
    for line in _FormatUsage(binary, commands):
2330
      ToStdout(line)
2331

    
2332
    if err.exit_error:
2333
      return constants.EXIT_FAILURE
2334
    else:
2335
      return constants.EXIT_SUCCESS
2336
  except errors.ParameterError, err:
2337
    result, err_msg = FormatError(err)
2338
    ToStderr(err_msg)
2339
    return 1
2340

    
2341
  if func is None: # parse error
2342
    return 1
2343

    
2344
  if override is not None:
2345
    for key, val in override.iteritems():
2346
      setattr(options, key, val)
2347

    
2348
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2349
                     stderr_logging=True)
2350

    
2351
  logging.info("Command line: %s", cmdline)
2352

    
2353
  try:
2354
    result = func(options, args)
2355
  except (errors.GenericError, luxi.ProtocolError,
2356
          JobSubmittedException), err:
2357
    result, err_msg = FormatError(err)
2358
    logging.exception("Error during command processing")
2359
    ToStderr(err_msg)
2360
  except KeyboardInterrupt:
2361
    result = constants.EXIT_FAILURE
2362
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2363
             " might have been submitted and"
2364
             " will continue to run in the background.")
2365
  except IOError, err:
2366
    if err.errno == errno.EPIPE:
2367
      # our terminal went away, we'll exit
2368
      sys.exit(constants.EXIT_FAILURE)
2369
    else:
2370
      raise
2371

    
2372
  return result
2373

    
2374

    
2375
def ParseNicOption(optvalue):
2376
  """Parses the value of the --net option(s).
2377

2378
  """
2379
  try:
2380
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2381
  except (TypeError, ValueError), err:
2382
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2383
                               errors.ECODE_INVAL)
2384

    
2385
  nics = [{}] * nic_max
2386
  for nidx, ndict in optvalue:
2387
    nidx = int(nidx)
2388

    
2389
    if not isinstance(ndict, dict):
2390
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2391
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2392

    
2393
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2394

    
2395
    nics[nidx] = ndict
2396

    
2397
  return nics
2398

    
2399

    
2400
def GenericInstanceCreate(mode, opts, args):
2401
  """Add an instance to the cluster via either creation or import.
2402

2403
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2404
  @param opts: the command line options selected by the user
2405
  @type args: list
2406
  @param args: should contain only one element, the new instance name
2407
  @rtype: int
2408
  @return: the desired exit code
2409

2410
  """
2411
  instance = args[0]
2412

    
2413
  (pnode, snode) = SplitNodeOption(opts.node)
2414

    
2415
  hypervisor = None
2416
  hvparams = {}
2417
  if opts.hypervisor:
2418
    hypervisor, hvparams = opts.hypervisor
2419

    
2420
  if opts.nics:
2421
    nics = ParseNicOption(opts.nics)
2422
  elif opts.no_nics:
2423
    # no nics
2424
    nics = []
2425
  elif mode == constants.INSTANCE_CREATE:
2426
    # default of one nic, all auto
2427
    nics = [{}]
2428
  else:
2429
    # mode == import
2430
    nics = []
2431

    
2432
  if opts.disk_template == constants.DT_DISKLESS:
2433
    if opts.disks or opts.sd_size is not None:
2434
      raise errors.OpPrereqError("Diskless instance but disk"
2435
                                 " information passed", errors.ECODE_INVAL)
2436
    disks = []
2437
  else:
2438
    if (not opts.disks and not opts.sd_size
2439
        and mode == constants.INSTANCE_CREATE):
2440
      raise errors.OpPrereqError("No disk information specified",
2441
                                 errors.ECODE_INVAL)
2442
    if opts.disks and opts.sd_size is not None:
2443
      raise errors.OpPrereqError("Please use either the '--disk' or"
2444
                                 " '-s' option", errors.ECODE_INVAL)
2445
    if opts.sd_size is not None:
2446
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2447

    
2448
    if opts.disks:
2449
      try:
2450
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2451
      except ValueError, err:
2452
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2453
                                   errors.ECODE_INVAL)
2454
      disks = [{}] * disk_max
2455
    else:
2456
      disks = []
2457
    for didx, ddict in opts.disks:
2458
      didx = int(didx)
2459
      if not isinstance(ddict, dict):
2460
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2461
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2462
      elif constants.IDISK_SIZE in ddict:
2463
        if constants.IDISK_ADOPT in ddict:
2464
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2465
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2466
        try:
2467
          ddict[constants.IDISK_SIZE] = \
2468
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2469
        except ValueError, err:
2470
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2471
                                     (didx, err), errors.ECODE_INVAL)
2472
      elif constants.IDISK_ADOPT in ddict:
2473
        if mode == constants.INSTANCE_IMPORT:
2474
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2475
                                     " import", errors.ECODE_INVAL)
2476
        ddict[constants.IDISK_SIZE] = 0
2477
      else:
2478
        raise errors.OpPrereqError("Missing size or adoption source for"
2479
                                   " disk %d" % didx, errors.ECODE_INVAL)
2480
      disks[didx] = ddict
2481

    
2482
  if opts.tags is not None:
2483
    tags = opts.tags.split(",")
2484
  else:
2485
    tags = []
2486

    
2487
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2488
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2489

    
2490
  if mode == constants.INSTANCE_CREATE:
2491
    start = opts.start
2492
    os_type = opts.os
2493
    force_variant = opts.force_variant
2494
    src_node = None
2495
    src_path = None
2496
    no_install = opts.no_install
2497
    identify_defaults = False
2498
  elif mode == constants.INSTANCE_IMPORT:
2499
    start = False
2500
    os_type = None
2501
    force_variant = False
2502
    src_node = opts.src_node
2503
    src_path = opts.src_dir
2504
    no_install = None
2505
    identify_defaults = opts.identify_defaults
2506
  else:
2507
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2508

    
2509
  op = opcodes.OpInstanceCreate(instance_name=instance,
2510
                                disks=disks,
2511
                                disk_template=opts.disk_template,
2512
                                nics=nics,
2513
                                conflicts_check=opts.conflicts_check,
2514
                                pnode=pnode, snode=snode,
2515
                                ip_check=opts.ip_check,
2516
                                name_check=opts.name_check,
2517
                                wait_for_sync=opts.wait_for_sync,
2518
                                file_storage_dir=opts.file_storage_dir,
2519
                                file_driver=opts.file_driver,
2520
                                iallocator=opts.iallocator,
2521
                                hypervisor=hypervisor,
2522
                                hvparams=hvparams,
2523
                                beparams=opts.beparams,
2524
                                osparams=opts.osparams,
2525
                                mode=mode,
2526
                                start=start,
2527
                                os_type=os_type,
2528
                                force_variant=force_variant,
2529
                                src_node=src_node,
2530
                                src_path=src_path,
2531
                                tags=tags,
2532
                                no_install=no_install,
2533
                                identify_defaults=identify_defaults,
2534
                                ignore_ipolicy=opts.ignore_ipolicy)
2535

    
2536
  SubmitOrSend(op, opts)
2537
  return 0
2538

    
2539

    
2540
class _RunWhileClusterStoppedHelper:
2541
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2542

2543
  """
2544
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2545
    """Initializes this class.
2546

2547
    @type feedback_fn: callable
2548
    @param feedback_fn: Feedback function
2549
    @type cluster_name: string
2550
    @param cluster_name: Cluster name
2551
    @type master_node: string
2552
    @param master_node Master node name
2553
    @type online_nodes: list
2554
    @param online_nodes: List of names of online nodes
2555

2556
    """
2557
    self.feedback_fn = feedback_fn
2558
    self.cluster_name = cluster_name
2559
    self.master_node = master_node
2560
    self.online_nodes = online_nodes
2561

    
2562
    self.ssh = ssh.SshRunner(self.cluster_name)
2563

    
2564
    self.nonmaster_nodes = [name for name in online_nodes
2565
                            if name != master_node]
2566

    
2567
    assert self.master_node not in self.nonmaster_nodes
2568

    
2569
  def _RunCmd(self, node_name, cmd):
2570
    """Runs a command on the local or a remote machine.
2571

2572
    @type node_name: string
2573
    @param node_name: Machine name
2574
    @type cmd: list
2575
    @param cmd: Command
2576

2577
    """
2578
    if node_name is None or node_name == self.master_node:
2579
      # No need to use SSH
2580
      result = utils.RunCmd(cmd)
2581
    else:
2582
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2583
                            utils.ShellQuoteArgs(cmd))
2584

    
2585
    if result.failed:
2586
      errmsg = ["Failed to run command %s" % result.cmd]
2587
      if node_name:
2588
        errmsg.append("on node %s" % node_name)
2589
      errmsg.append(": exitcode %s and error %s" %
2590
                    (result.exit_code, result.output))
2591
      raise errors.OpExecError(" ".join(errmsg))
2592

    
2593
  def Call(self, fn, *args):
2594
    """Call function while all daemons are stopped.
2595

2596
    @type fn: callable
2597
    @param fn: Function to be called
2598

2599
    """
2600
    # Pause watcher by acquiring an exclusive lock on watcher state file
2601
    self.feedback_fn("Blocking watcher")
2602
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2603
    try:
2604
      # TODO: Currently, this just blocks. There's no timeout.
2605
      # TODO: Should it be a shared lock?
2606
      watcher_block.Exclusive(blocking=True)
2607

    
2608
      # Stop master daemons, so that no new jobs can come in and all running
2609
      # ones are finished
2610
      self.feedback_fn("Stopping master daemons")
2611
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2612
      try:
2613
        # Stop daemons on all nodes
2614
        for node_name in self.online_nodes:
2615
          self.feedback_fn("Stopping daemons on %s" % node_name)
2616
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2617

    
2618
        # All daemons are shut down now
2619
        try:
2620
          return fn(self, *args)
2621
        except Exception, err:
2622
          _, errmsg = FormatError(err)
2623
          logging.exception("Caught exception")
2624
          self.feedback_fn(errmsg)
2625
          raise
2626
      finally:
2627
        # Start cluster again, master node last
2628
        for node_name in self.nonmaster_nodes + [self.master_node]:
2629
          self.feedback_fn("Starting daemons on %s" % node_name)
2630
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2631
    finally:
2632
      # Resume watcher
2633
      watcher_block.Close()
2634

    
2635

    
2636
def RunWhileClusterStopped(feedback_fn, fn, *args):
2637
  """Calls a function while all cluster daemons are stopped.
2638

2639
  @type feedback_fn: callable
2640
  @param feedback_fn: Feedback function
2641
  @type fn: callable
2642
  @param fn: Function to be called when daemons are stopped
2643

2644
  """
2645
  feedback_fn("Gathering cluster information")
2646

    
2647
  # This ensures we're running on the master daemon
2648
  cl = GetClient()
2649

    
2650
  (cluster_name, master_node) = \
2651
    cl.QueryConfigValues(["cluster_name", "master_node"])
2652

    
2653
  online_nodes = GetOnlineNodes([], cl=cl)
2654

    
2655
  # Don't keep a reference to the client. The master daemon will go away.
2656
  del cl
2657

    
2658
  assert master_node in online_nodes
2659

    
2660
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2661
                                       online_nodes).Call(fn, *args)
2662

    
2663

    
2664
def GenerateTable(headers, fields, separator, data,
2665
                  numfields=None, unitfields=None,
2666
                  units=None):
2667
  """Prints a table with headers and different fields.
2668

2669
  @type headers: dict
2670
  @param headers: dictionary mapping field names to headers for
2671
      the table
2672
  @type fields: list
2673
  @param fields: the field names corresponding to each row in
2674
      the data field
2675
  @param separator: the separator to be used; if this is None,
2676
      the default 'smart' algorithm is used which computes optimal
2677
      field width, otherwise just the separator is used between
2678
      each field
2679
  @type data: list
2680
  @param data: a list of lists, each sublist being one row to be output
2681
  @type numfields: list
2682
  @param numfields: a list with the fields that hold numeric
2683
      values and thus should be right-aligned
2684
  @type unitfields: list
2685
  @param unitfields: a list with the fields that hold numeric
2686
      values that should be formatted with the units field
2687
  @type units: string or None
2688
  @param units: the units we should use for formatting, or None for
2689
      automatic choice (human-readable for non-separator usage, otherwise
2690
      megabytes); this is a one-letter string
2691

2692
  """
2693
  if units is None:
2694
    if separator:
2695
      units = "m"
2696
    else:
2697
      units = "h"
2698

    
2699
  if numfields is None:
2700
    numfields = []
2701
  if unitfields is None:
2702
    unitfields = []
2703

    
2704
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2705
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2706

    
2707
  format_fields = []
2708
  for field in fields:
2709
    if headers and field not in headers:
2710
      # TODO: handle better unknown fields (either revert to old
2711
      # style of raising exception, or deal more intelligently with
2712
      # variable fields)
2713
      headers[field] = field
2714
    if separator is not None:
2715
      format_fields.append("%s")
2716
    elif numfields.Matches(field):
2717
      format_fields.append("%*s")
2718
    else:
2719
      format_fields.append("%-*s")
2720

    
2721
  if separator is None:
2722
    mlens = [0 for name in fields]
2723
    format_str = " ".join(format_fields)
2724
  else:
2725
    format_str = separator.replace("%", "%%").join(format_fields)
2726

    
2727
  for row in data:
2728
    if row is None:
2729
      continue
2730
    for idx, val in enumerate(row):
2731
      if unitfields.Matches(fields[idx]):
2732
        try:
2733
          val = int(val)
2734
        except (TypeError, ValueError):
2735
          pass
2736
        else:
2737
          val = row[idx] = utils.FormatUnit(val, units)
2738
      val = row[idx] = str(val)
2739
      if separator is None:
2740
        mlens[idx] = max(mlens[idx], len(val))
2741

    
2742
  result = []
2743
  if headers:
2744
    args = []
2745
    for idx, name in enumerate(fields):
2746
      hdr = headers[name]
2747
      if separator is None:
2748
        mlens[idx] = max(mlens[idx], len(hdr))
2749
        args.append(mlens[idx])
2750
      args.append(hdr)
2751
    result.append(format_str % tuple(args))
2752

    
2753
  if separator is None:
2754
    assert len(mlens) == len(fields)
2755

    
2756
    if fields and not numfields.Matches(fields[-1]):
2757
      mlens[-1] = 0
2758

    
2759
  for line in data:
2760
    args = []
2761
    if line is None:
2762
      line = ["-" for _ in fields]
2763
    for idx in range(len(fields)):
2764
      if separator is None:
2765
        args.append(mlens[idx])
2766
      args.append(line[idx])
2767
    result.append(format_str % tuple(args))
2768

    
2769
  return result
2770

    
2771

    
2772
def _FormatBool(value):
2773
  """Formats a boolean value as a string.
2774

2775
  """
2776
  if value:
2777
    return "Y"
2778
  return "N"
2779

    
2780

    
2781
#: Default formatting for query results; (callback, align right)
2782
_DEFAULT_FORMAT_QUERY = {
2783
  constants.QFT_TEXT: (str, False),
2784
  constants.QFT_BOOL: (_FormatBool, False),
2785
  constants.QFT_NUMBER: (str, True),
2786
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2787
  constants.QFT_OTHER: (str, False),
2788
  constants.QFT_UNKNOWN: (str, False),
2789
  }
2790

    
2791

    
2792
def _GetColumnFormatter(fdef, override, unit):
2793
  """Returns formatting function for a field.
2794

2795
  @type fdef: L{objects.QueryFieldDefinition}
2796
  @type override: dict
2797
  @param override: Dictionary for overriding field formatting functions,
2798
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2799
  @type unit: string
2800
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2801
  @rtype: tuple; (callable, bool)
2802
  @return: Returns the function to format a value (takes one parameter) and a
2803
    boolean for aligning the value on the right-hand side
2804

2805
  """
2806
  fmt = override.get(fdef.name, None)
2807
  if fmt is not None:
2808
    return fmt
2809

    
2810
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2811

    
2812
  if fdef.kind == constants.QFT_UNIT:
2813
    # Can't keep this information in the static dictionary
2814
    return (lambda value: utils.FormatUnit(value, unit), True)
2815

    
2816
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2817
  if fmt is not None:
2818
    return fmt
2819

    
2820
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2821

    
2822

    
2823
class _QueryColumnFormatter:
2824
  """Callable class for formatting fields of a query.
2825

2826
  """
2827
  def __init__(self, fn, status_fn, verbose):
2828
    """Initializes this class.
2829

2830
    @type fn: callable
2831
    @param fn: Formatting function
2832
    @type status_fn: callable
2833
    @param status_fn: Function to report fields' status
2834
    @type verbose: boolean
2835
    @param verbose: whether to use verbose field descriptions or not
2836

2837
    """
2838
    self._fn = fn
2839
    self._status_fn = status_fn
2840
    self._verbose = verbose
2841

    
2842
  def __call__(self, data):
2843
    """Returns a field's string representation.
2844

2845
    """
2846
    (status, value) = data
2847

    
2848
    # Report status
2849
    self._status_fn(status)
2850

    
2851
    if status == constants.RS_NORMAL:
2852
      return self._fn(value)
2853

    
2854
    assert value is None, \
2855
           "Found value %r for abnormal status %s" % (value, status)
2856

    
2857
    return FormatResultError(status, self._verbose)
2858

    
2859

    
2860
def FormatResultError(status, verbose):
2861
  """Formats result status other than L{constants.RS_NORMAL}.
2862

2863
  @param status: The result status
2864
  @type verbose: boolean
2865
  @param verbose: Whether to return the verbose text
2866
  @return: Text of result status
2867

2868
  """
2869
  assert status != constants.RS_NORMAL, \
2870
         "FormatResultError called with status equal to constants.RS_NORMAL"
2871
  try:
2872
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2873
  except KeyError:
2874
    raise NotImplementedError("Unknown status %s" % status)
2875
  else:
2876
    if verbose:
2877
      return verbose_text
2878
    return normal_text
2879

    
2880

    
2881
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2882
                      header=False, verbose=False):
2883
  """Formats data in L{objects.QueryResponse}.
2884

2885
  @type result: L{objects.QueryResponse}
2886
  @param result: result of query operation
2887
  @type unit: string
2888
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2889
    see L{utils.text.FormatUnit}
2890
  @type format_override: dict
2891
  @param format_override: Dictionary for overriding field formatting functions,
2892
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2893
  @type separator: string or None
2894
  @param separator: String used to separate fields
2895
  @type header: bool
2896
  @param header: Whether to output header row
2897
  @type verbose: boolean
2898
  @param verbose: whether to use verbose field descriptions or not
2899

2900
  """
2901
  if unit is None:
2902
    if separator:
2903
      unit = "m"
2904
    else:
2905
      unit = "h"
2906

    
2907
  if format_override is None:
2908
    format_override = {}
2909

    
2910
  stats = dict.fromkeys(constants.RS_ALL, 0)
2911

    
2912
  def _RecordStatus(status):
2913
    if status in stats:
2914
      stats[status] += 1
2915

    
2916
  columns = []
2917
  for fdef in result.fields:
2918
    assert fdef.title and fdef.name
2919
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2920
    columns.append(TableColumn(fdef.title,
2921
                               _QueryColumnFormatter(fn, _RecordStatus,
2922
                                                     verbose),
2923
                               align_right))
2924

    
2925
  table = FormatTable(result.data, columns, header, separator)
2926

    
2927
  # Collect statistics
2928
  assert len(stats) == len(constants.RS_ALL)
2929
  assert compat.all(count >= 0 for count in stats.values())
2930

    
2931
  # Determine overall status. If there was no data, unknown fields must be
2932
  # detected via the field definitions.
2933
  if (stats[constants.RS_UNKNOWN] or
2934
      (not result.data and _GetUnknownFields(result.fields))):
2935
    status = QR_UNKNOWN
2936
  elif compat.any(count > 0 for key, count in stats.items()
2937
                  if key != constants.RS_NORMAL):
2938
    status = QR_INCOMPLETE
2939
  else:
2940
    status = QR_NORMAL
2941

    
2942
  return (status, table)
2943

    
2944

    
2945
def _GetUnknownFields(fdefs):
2946
  """Returns list of unknown fields included in C{fdefs}.
2947

2948
  @type fdefs: list of L{objects.QueryFieldDefinition}
2949

2950
  """
2951
  return [fdef for fdef in fdefs
2952
          if fdef.kind == constants.QFT_UNKNOWN]
2953

    
2954

    
2955
def _WarnUnknownFields(fdefs):
2956
  """Prints a warning to stderr if a query included unknown fields.
2957

2958
  @type fdefs: list of L{objects.QueryFieldDefinition}
2959

2960
  """
2961
  unknown = _GetUnknownFields(fdefs)
2962
  if unknown:
2963
    ToStderr("Warning: Queried for unknown fields %s",
2964
             utils.CommaJoin(fdef.name for fdef in unknown))
2965
    return True
2966

    
2967
  return False
2968

    
2969

    
2970
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2971
                format_override=None, verbose=False, force_filter=False,
2972
                namefield=None, qfilter=None, isnumeric=False):
2973
  """Generic implementation for listing all items of a resource.
2974

2975
  @param resource: One of L{constants.QR_VIA_LUXI}
2976
  @type fields: list of strings
2977
  @param fields: List of fields to query for
2978
  @type names: list of strings
2979
  @param names: Names of items to query for
2980
  @type unit: string or None
2981
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2982
    None for automatic choice (human-readable for non-separator usage,
2983
    otherwise megabytes); this is a one-letter string
2984
  @type separator: string or None
2985
  @param separator: String used to separate fields
2986
  @type header: bool
2987
  @param header: Whether to show header row
2988
  @type force_filter: bool
2989
  @param force_filter: Whether to always treat names as filter
2990
  @type format_override: dict
2991
  @param format_override: Dictionary for overriding field formatting functions,
2992
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2993
  @type verbose: boolean
2994
  @param verbose: whether to use verbose field descriptions or not
2995
  @type namefield: string
2996
  @param namefield: Name of field to use for simple filters (see
2997
    L{qlang.MakeFilter} for details)
2998
  @type qfilter: list or None
2999
  @param qfilter: Query filter (in addition to names)
3000
  @param isnumeric: bool
3001
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3002
    any simple filters built by namefield should use integer values to
3003
    reflect that
3004

3005
  """
3006
  if not names:
3007
    names = None
3008

    
3009
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3010
                                isnumeric=isnumeric)
3011

    
3012
  if qfilter is None:
3013
    qfilter = namefilter
3014
  elif namefilter is not None:
3015
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3016

    
3017
  if cl is None:
3018
    cl = GetClient()
3019

    
3020
  response = cl.Query(resource, fields, qfilter)
3021

    
3022
  found_unknown = _WarnUnknownFields(response.fields)
3023

    
3024
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3025
                                     header=header,
3026
                                     format_override=format_override,
3027
                                     verbose=verbose)
3028

    
3029
  for line in data:
3030
    ToStdout(line)
3031

    
3032
  assert ((found_unknown and status == QR_UNKNOWN) or
3033
          (not found_unknown and status != QR_UNKNOWN))
3034

    
3035
  if status == QR_UNKNOWN:
3036
    return constants.EXIT_UNKNOWN_FIELD
3037

    
3038
  # TODO: Should the list command fail if not all data could be collected?
3039
  return constants.EXIT_SUCCESS
3040

    
3041

    
3042
def GenericListFields(resource, fields, separator, header, cl=None):
3043
  """Generic implementation for listing fields for a resource.
3044

3045
  @param resource: One of L{constants.QR_VIA_LUXI}
3046
  @type fields: list of strings
3047
  @param fields: List of fields to query for
3048
  @type separator: string or None
3049
  @param separator: String used to separate fields
3050
  @type header: bool
3051
  @param header: Whether to show header row
3052

3053
  """
3054
  if cl is None:
3055
    cl = GetClient()
3056

    
3057
  if not fields:
3058
    fields = None
3059

    
3060
  response = cl.QueryFields(resource, fields)
3061

    
3062
  found_unknown = _WarnUnknownFields(response.fields)
3063

    
3064
  columns = [
3065
    TableColumn("Name", str, False),
3066
    TableColumn("Title", str, False),
3067
    TableColumn("Description", str, False),
3068
    ]
3069

    
3070
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
3071

    
3072
  for line in FormatTable(rows, columns, header, separator):
3073
    ToStdout(line)
3074

    
3075
  if found_unknown:
3076
    return constants.EXIT_UNKNOWN_FIELD
3077

    
3078
  return constants.EXIT_SUCCESS
3079

    
3080

    
3081
class TableColumn:
3082
  """Describes a column for L{FormatTable}.
3083

3084
  """
3085
  def __init__(self, title, fn, align_right):
3086
    """Initializes this class.
3087

3088
    @type title: string
3089
    @param title: Column title
3090
    @type fn: callable
3091
    @param fn: Formatting function
3092
    @type align_right: bool
3093
    @param align_right: Whether to align values on the right-hand side
3094

3095
    """
3096
    self.title = title
3097
    self.format = fn
3098
    self.align_right = align_right
3099

    
3100

    
3101
def _GetColFormatString(width, align_right):
3102
  """Returns the format string for a field.
3103

3104
  """
3105
  if align_right:
3106
    sign = ""
3107
  else:
3108
    sign = "-"
3109

    
3110
  return "%%%s%ss" % (sign, width)
3111

    
3112

    
3113
def FormatTable(rows, columns, header, separator):
3114
  """Formats data as a table.
3115

3116
  @type rows: list of lists
3117
  @param rows: Row data, one list per row
3118
  @type columns: list of L{TableColumn}
3119
  @param columns: Column descriptions
3120
  @type header: bool
3121
  @param header: Whether to show header row
3122
  @type separator: string or None
3123
  @param separator: String used to separate columns
3124

3125
  """
3126
  if header:
3127
    data = [[col.title for col in columns]]
3128
    colwidth = [len(col.title) for col in columns]
3129
  else:
3130
    data = []
3131
    colwidth = [0 for _ in columns]
3132

    
3133
  # Format row data
3134
  for row in rows:
3135
    assert len(row) == len(columns)
3136

    
3137
    formatted = [col.format(value) for value, col in zip(row, columns)]
3138

    
3139
    if separator is None:
3140
      # Update column widths
3141
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3142
        # Modifying a list's items while iterating is fine
3143
        colwidth[idx] = max(oldwidth, len(value))
3144

    
3145
    data.append(formatted)
3146

    
3147
  if separator is not None:
3148
    # Return early if a separator is used
3149
    return [separator.join(row) for row in data]
3150

    
3151
  if columns and not columns[-1].align_right:
3152
    # Avoid unnecessary spaces at end of line
3153
    colwidth[-1] = 0
3154

    
3155
  # Build format string
3156
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3157
                  for col, width in zip(columns, colwidth)])
3158

    
3159
  return [fmt % tuple(row) for row in data]
3160

    
3161

    
3162
def FormatTimestamp(ts):
3163
  """Formats a given timestamp.
3164

3165
  @type ts: timestamp
3166
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3167

3168
  @rtype: string
3169
  @return: a string with the formatted timestamp
3170

3171
  """
3172
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3173
    return "?"
3174

    
3175
  (sec, usecs) = ts
3176
  return utils.FormatTime(sec, usecs=usecs)
3177

    
3178

    
3179
def ParseTimespec(value):
3180
  """Parse a time specification.
3181

3182
  The following suffixed will be recognized:
3183

3184
    - s: seconds
3185
    - m: minutes
3186
    - h: hours
3187
    - d: day
3188
    - w: weeks
3189

3190
  Without any suffix, the value will be taken to be in seconds.
3191

3192
  """
3193
  value = str(value)
3194
  if not value:
3195
    raise errors.OpPrereqError("Empty time specification passed",
3196
                               errors.ECODE_INVAL)
3197
  suffix_map = {
3198
    "s": 1,
3199
    "m": 60,
3200
    "h": 3600,
3201
    "d": 86400,
3202
    "w": 604800,
3203
    }
3204
  if value[-1] not in suffix_map:
3205
    try:
3206
      value = int(value)
3207
    except (TypeError, ValueError):
3208
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3209
                                 errors.ECODE_INVAL)
3210
  else:
3211
    multiplier = suffix_map[value[-1]]
3212
    value = value[:-1]
3213
    if not value: # no data left after stripping the suffix
3214
      raise errors.OpPrereqError("Invalid time specification (only"
3215
                                 " suffix passed)", errors.ECODE_INVAL)
3216
    try:
3217
      value = int(value) * multiplier
3218
    except (TypeError, ValueError):
3219
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3220
                                 errors.ECODE_INVAL)
3221
  return value
3222

    
3223

    
3224
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3225
                   filter_master=False, nodegroup=None):
3226
  """Returns the names of online nodes.
3227

3228
  This function will also log a warning on stderr with the names of
3229
  the online nodes.
3230

3231
  @param nodes: if not empty, use only this subset of nodes (minus the
3232
      offline ones)
3233
  @param cl: if not None, luxi client to use
3234
  @type nowarn: boolean
3235
  @param nowarn: by default, this function will output a note with the
3236
      offline nodes that are skipped; if this parameter is True the
3237
      note is not displayed
3238
  @type secondary_ips: boolean
3239
  @param secondary_ips: if True, return the secondary IPs instead of the
3240
      names, useful for doing network traffic over the replication interface
3241
      (if any)
3242
  @type filter_master: boolean
3243
  @param filter_master: if True, do not return the master node in the list
3244
      (useful in coordination with secondary_ips where we cannot check our
3245
      node name against the list)
3246
  @type nodegroup: string
3247
  @param nodegroup: If set, only return nodes in this node group
3248

3249
  """
3250
  if cl is None:
3251
    cl = GetClient()
3252

    
3253
  qfilter = []
3254

    
3255
  if nodes:
3256
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3257

    
3258
  if nodegroup is not None:
3259
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3260
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3261

    
3262
  if filter_master:
3263
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3264

    
3265
  if qfilter:
3266
    if len(qfilter) > 1:
3267
      final_filter = [qlang.OP_AND] + qfilter
3268
    else:
3269
      assert len(qfilter) == 1
3270
      final_filter = qfilter[0]
3271
  else:
3272
    final_filter = None
3273

    
3274
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3275

    
3276
  def _IsOffline(row):
3277
    (_, (_, offline), _) = row
3278
    return offline
3279

    
3280
  def _GetName(row):
3281
    ((_, name), _, _) = row
3282
    return name
3283

    
3284
  def _GetSip(row):
3285
    (_, _, (_, sip)) = row
3286
    return sip
3287

    
3288
  (offline, online) = compat.partition(result.data, _IsOffline)
3289

    
3290
  if offline and not nowarn:
3291
    ToStderr("Note: skipping offline node(s): %s" %
3292
             utils.CommaJoin(map(_GetName, offline)))
3293

    
3294
  if secondary_ips:
3295
    fn = _GetSip
3296
  else:
3297
    fn = _GetName
3298

    
3299
  return map(fn, online)
3300

    
3301

    
3302
def _ToStream(stream, txt, *args):
3303
  """Write a message to a stream, bypassing the logging system
3304

3305
  @type stream: file object
3306
  @param stream: the file to which we should write
3307
  @type txt: str
3308
  @param txt: the message
3309

3310
  """
3311
  try:
3312
    if args:
3313
      args = tuple(args)
3314
      stream.write(txt % args)
3315
    else:
3316
      stream.write(txt)
3317
    stream.write("\n")
3318
    stream.flush()
3319
  except IOError, err:
3320
    if err.errno == errno.EPIPE:
3321
      # our terminal went away, we'll exit
3322
      sys.exit(constants.EXIT_FAILURE)
3323
    else:
3324
      raise
3325

    
3326

    
3327
def ToStdout(txt, *args):
3328
  """Write a message to stdout only, bypassing the logging system
3329

3330
  This is just a wrapper over _ToStream.
3331

3332
  @type txt: str
3333
  @param txt: the message
3334

3335
  """
3336
  _ToStream(sys.stdout, txt, *args)
3337

    
3338

    
3339
def ToStderr(txt, *args):
3340
  """Write a message to stderr only, bypassing the logging system
3341

3342
  This is just a wrapper over _ToStream.
3343

3344
  @type txt: str
3345
  @param txt: the message
3346

3347
  """
3348
  _ToStream(sys.stderr, txt, *args)
3349

    
3350

    
3351
class JobExecutor(object):
3352
  """Class which manages the submission and execution of multiple jobs.
3353

3354
  Note that instances of this class should not be reused between
3355
  GetResults() calls.
3356

3357
  """
3358
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3359
    self.queue = []
3360
    if cl is None:
3361
      cl = GetClient()
3362
    self.cl = cl
3363
    self.verbose = verbose
3364
    self.jobs = []
3365
    self.opts = opts
3366
    self.feedback_fn = feedback_fn
3367
    self._counter = itertools.count()
3368

    
3369
  @staticmethod
3370
  def _IfName(name, fmt):
3371
    """Helper function for formatting name.
3372

3373
    """
3374
    if name:
3375
      return fmt % name
3376

    
3377
    return ""
3378

    
3379
  def QueueJob(self, name, *ops):
3380
    """Record a job for later submit.
3381

3382
    @type name: string
3383
    @param name: a description of the job, will be used in WaitJobSet
3384

3385
    """
3386
    SetGenericOpcodeOpts(ops, self.opts)
3387
    self.queue.append((self._counter.next(), name, ops))
3388

    
3389
  def AddJobId(self, name, status, job_id):
3390
    """Adds a job ID to the internal queue.
3391

3392
    """
3393
    self.jobs.append((self._counter.next(), status, job_id, name))
3394

    
3395
  def SubmitPending(self, each=False):
3396
    """Submit all pending jobs.
3397

3398
    """
3399
    if each:
3400
      results = []
3401
      for (_, _, ops) in self.queue:
3402
        # SubmitJob will remove the success status, but raise an exception if
3403
        # the submission fails, so we'll notice that anyway.
3404
        results.append([True, self.cl.SubmitJob(ops)[0]])
3405
    else:
3406
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3407
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3408
      self.jobs.append((idx, status, data, name))
3409

    
3410
  def _ChooseJob(self):
3411
    """Choose a non-waiting/queued job to poll next.
3412

3413
    """
3414
    assert self.jobs, "_ChooseJob called with empty job list"
3415

    
3416
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3417
                               ["status"])
3418
    assert result
3419

    
3420
    for job_data, status in zip(self.jobs, result):
3421
      if (isinstance(status, list) and status and
3422
          status[0] in (constants.JOB_STATUS_QUEUED,
3423
                        constants.JOB_STATUS_WAITING,
3424
                        constants.JOB_STATUS_CANCELING)):
3425
        # job is still present and waiting
3426
        continue
3427
      # good candidate found (either running job or lost job)
3428
      self.jobs.remove(job_data)
3429
      return job_data
3430

    
3431
    # no job found
3432
    return self.jobs.pop(0)
3433

    
3434
  def GetResults(self):
3435
    """Wait for and return the results of all jobs.
3436

3437
    @rtype: list
3438
    @return: list of tuples (success, job results), in the same order
3439
        as the submitted jobs; if a job has failed, instead of the result
3440
        there will be the error message
3441

3442
    """
3443
    if not self.jobs:
3444
      self.SubmitPending()
3445
    results = []
3446
    if self.verbose:
3447
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3448
      if ok_jobs:
3449
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3450

    
3451
    # first, remove any non-submitted jobs
3452
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3453
    for idx, _, jid, name in failures:
3454
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3455
      results.append((idx, False, jid))
3456

    
3457
    while self.jobs:
3458
      (idx, _, jid, name) = self._ChooseJob()
3459
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3460
      try:
3461
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3462
        success = True
3463
      except errors.JobLost, err:
3464
        _, job_result = FormatError(err)
3465
        ToStderr("Job %s%s has been archived, cannot check its result",
3466
                 jid, self._IfName(name, " for %s"))
3467
        success = False
3468
      except (errors.GenericError, luxi.ProtocolError), err:
3469
        _, job_result = FormatError(err)
3470
        success = False
3471
        # the error message will always be shown, verbose or not
3472
        ToStderr("Job %s%s has failed: %s",
3473
                 jid, self._IfName(name, " for %s"), job_result)
3474

    
3475
      results.append((idx, success, job_result))
3476

    
3477
    # sort based on the index, then drop it
3478
    results.sort()
3479
    results = [i[1:] for i in results]
3480

    
3481
    return results
3482

    
3483
  def WaitOrShow(self, wait):
3484
    """Wait for job results or only print the job IDs.
3485

3486
    @type wait: boolean
3487
    @param wait: whether to wait or not
3488

3489
    """
3490
    if wait:
3491
      return self.GetResults()
3492
    else:
3493
      if not self.jobs:
3494
        self.SubmitPending()
3495
      for _, status, result, name in self.jobs:
3496
        if status:
3497
          ToStdout("%s: %s", result, name)
3498
        else:
3499
          ToStderr("Failure for %s: %s", name, result)
3500
      return [row[1:3] for row in self.jobs]
3501

    
3502

    
3503
def FormatParameterDict(buf, param_dict, actual, level=1):
3504
  """Formats a parameter dictionary.
3505

3506
  @type buf: L{StringIO}
3507
  @param buf: the buffer into which to write
3508
  @type param_dict: dict
3509
  @param param_dict: the own parameters
3510
  @type actual: dict
3511
  @param actual: the current parameter set (including defaults)
3512
  @param level: Level of indent
3513

3514
  """
3515
  indent = "  " * level
3516

    
3517
  for key in sorted(actual):
3518
    data = actual[key]
3519
    buf.write("%s- %s:" % (indent, key))
3520

    
3521
    if isinstance(data, dict) and data:
3522
      buf.write("\n")
3523
      FormatParameterDict(buf, param_dict.get(key, {}), data,
3524
                          level=level + 1)
3525
    else:
3526
      val = param_dict.get(key, "default (%s)" % data)
3527
      buf.write(" %s\n" % val)
3528

    
3529

    
3530
def ConfirmOperation(names, list_type, text, extra=""):
3531
  """Ask the user to confirm an operation on a list of list_type.
3532

3533
  This function is used to request confirmation for doing an operation
3534
  on a given list of list_type.
3535

3536
  @type names: list
3537
  @param names: the list of names that we display when
3538
      we ask for confirmation
3539
  @type list_type: str
3540
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3541
  @type text: str
3542
  @param text: the operation that the user should confirm
3543
  @rtype: boolean
3544
  @return: True or False depending on user's confirmation.
3545

3546
  """
3547
  count = len(names)
3548
  msg = ("The %s will operate on %d %s.\n%s"
3549
         "Do you want to continue?" % (text, count, list_type, extra))
3550
  affected = (("\nAffected %s:\n" % list_type) +
3551
              "\n".join(["  %s" % name for name in names]))
3552

    
3553
  choices = [("y", True, "Yes, execute the %s" % text),
3554
             ("n", False, "No, abort the %s" % text)]
3555

    
3556
  if count > 20:
3557
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3558
    question = msg
3559
  else:
3560
    question = msg + affected
3561

    
3562
  choice = AskUser(question, choices)
3563
  if choice == "v":
3564
    choices.pop(1)
3565
    choice = AskUser(msg + affected, choices)
3566
  return choice
3567

    
3568

    
3569
def _MaybeParseUnit(elements):
3570
  """Parses and returns an array of potential values with units.
3571

3572
  """
3573
  parsed = {}
3574
  for k, v in elements.items():
3575
    if v == constants.VALUE_DEFAULT:
3576
      parsed[k] = v
3577
    else:
3578
      parsed[k] = utils.ParseUnit(v)
3579
  return parsed
3580

    
3581

    
3582
def CreateIPolicyFromOpts(ispecs_mem_size=None,
3583
                          ispecs_cpu_count=None,
3584
                          ispecs_disk_count=None,
3585
                          ispecs_disk_size=None,
3586
                          ispecs_nic_count=None,
3587
                          ipolicy_disk_templates=None,
3588
                          ipolicy_vcpu_ratio=None,
3589
                          ipolicy_spindle_ratio=None,
3590
                          group_ipolicy=False,
3591
                          allowed_values=None,
3592
                          fill_all=False):
3593
  """Creation of instance policy based on command line options.
3594

3595
  @param fill_all: whether for cluster policies we should ensure that
3596
    all values are filled
3597

3598

3599
  """
3600
  try:
3601
    if ispecs_mem_size:
3602
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3603
    if ispecs_disk_size:
3604
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3605
  except (TypeError, ValueError, errors.UnitParseError), err:
3606
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3607
                               " in policy: %s" %
3608
                               (ispecs_disk_size, ispecs_mem_size, err),
3609
                               errors.ECODE_INVAL)
3610

    
3611
  # prepare ipolicy dict
3612
  ipolicy_transposed = {
3613
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3614
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3615
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3616
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3617
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3618
    }
3619

    
3620
  # first, check that the values given are correct
3621
  if group_ipolicy:
3622
    forced_type = TISPECS_GROUP_TYPES
3623
  else:
3624
    forced_type = TISPECS_CLUSTER_TYPES
3625

    
3626
  for specs in ipolicy_transposed.values():
3627
    utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3628

    
3629
  # then transpose
3630
  ipolicy_out = objects.MakeEmptyIPolicy()
3631
  for name, specs in ipolicy_transposed.iteritems():
3632
    assert name in constants.ISPECS_PARAMETERS
3633
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3634
      ipolicy_out[key][name] = val
3635

    
3636
  # no filldict for non-dicts
3637
  if not group_ipolicy and fill_all:
3638
    if ipolicy_disk_templates is None:
3639
      ipolicy_disk_templates = constants.DISK_TEMPLATES
3640
    if ipolicy_vcpu_ratio is None:
3641
      ipolicy_vcpu_ratio = \
3642
        constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
3643
    if ipolicy_spindle_ratio is None:
3644
      ipolicy_spindle_ratio = \
3645
        constants.IPOLICY_DEFAULTS[constants.IPOLICY_SPINDLE_RATIO]
3646
  if ipolicy_disk_templates is not None:
3647
    ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3648
  if ipolicy_vcpu_ratio is not None:
3649
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3650
  if ipolicy_spindle_ratio is not None:
3651
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
3652

    
3653
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
3654

    
3655
  return ipolicy_out