Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ cb1e6c3c

History | View | Annotate | Download (98.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42
from ganeti import qlang
43

    
44
from optparse import (OptionParser, TitledHelpFormatter,
45
                      Option, OptionValueError)
46

    
47

    
48
__all__ = [
49
  # Command line options
50
  "ADD_UIDS_OPT",
51
  "ALLOCATABLE_OPT",
52
  "ALLOC_POLICY_OPT",
53
  "ALL_OPT",
54
  "ALLOW_FAILOVER_OPT",
55
  "AUTO_PROMOTE_OPT",
56
  "AUTO_REPLACE_OPT",
57
  "BACKEND_OPT",
58
  "BLK_OS_OPT",
59
  "CAPAB_MASTER_OPT",
60
  "CAPAB_VM_OPT",
61
  "CLEANUP_OPT",
62
  "CLUSTER_DOMAIN_SECRET_OPT",
63
  "CONFIRM_OPT",
64
  "CP_SIZE_OPT",
65
  "DEBUG_OPT",
66
  "DEBUG_SIMERR_OPT",
67
  "DISKIDX_OPT",
68
  "DISK_OPT",
69
  "DISK_TEMPLATE_OPT",
70
  "DRAINED_OPT",
71
  "DRY_RUN_OPT",
72
  "DRBD_HELPER_OPT",
73
  "DST_NODE_OPT",
74
  "EARLY_RELEASE_OPT",
75
  "ENABLED_HV_OPT",
76
  "ERROR_CODES_OPT",
77
  "FIELDS_OPT",
78
  "FILESTORE_DIR_OPT",
79
  "FILESTORE_DRIVER_OPT",
80
  "FORCE_OPT",
81
  "FORCE_VARIANT_OPT",
82
  "GLOBAL_FILEDIR_OPT",
83
  "HID_OS_OPT",
84
  "GLOBAL_SHARED_FILEDIR_OPT",
85
  "HVLIST_OPT",
86
  "HVOPTS_OPT",
87
  "HYPERVISOR_OPT",
88
  "IALLOCATOR_OPT",
89
  "DEFAULT_IALLOCATOR_OPT",
90
  "IDENTIFY_DEFAULTS_OPT",
91
  "IGNORE_CONSIST_OPT",
92
  "IGNORE_FAILURES_OPT",
93
  "IGNORE_OFFLINE_OPT",
94
  "IGNORE_REMOVE_FAILURES_OPT",
95
  "IGNORE_SECONDARIES_OPT",
96
  "IGNORE_SIZE_OPT",
97
  "INTERVAL_OPT",
98
  "MAC_PREFIX_OPT",
99
  "MAINTAIN_NODE_HEALTH_OPT",
100
  "MASTER_NETDEV_OPT",
101
  "MC_OPT",
102
  "MIGRATION_MODE_OPT",
103
  "NET_OPT",
104
  "NEW_CLUSTER_CERT_OPT",
105
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
106
  "NEW_CONFD_HMAC_KEY_OPT",
107
  "NEW_RAPI_CERT_OPT",
108
  "NEW_SECONDARY_OPT",
109
  "NIC_PARAMS_OPT",
110
  "NODE_FORCE_JOIN_OPT",
111
  "NODE_LIST_OPT",
112
  "NODE_PLACEMENT_OPT",
113
  "NODEGROUP_OPT",
114
  "NODE_PARAMS_OPT",
115
  "NODE_POWERED_OPT",
116
  "NODRBD_STORAGE_OPT",
117
  "NOHDR_OPT",
118
  "NOIPCHECK_OPT",
119
  "NO_INSTALL_OPT",
120
  "NONAMECHECK_OPT",
121
  "NOLVM_STORAGE_OPT",
122
  "NOMODIFY_ETCHOSTS_OPT",
123
  "NOMODIFY_SSH_SETUP_OPT",
124
  "NONICS_OPT",
125
  "NONLIVE_OPT",
126
  "NONPLUS1_OPT",
127
  "NOSHUTDOWN_OPT",
128
  "NOSTART_OPT",
129
  "NOSSH_KEYCHECK_OPT",
130
  "NOVOTING_OPT",
131
  "NWSYNC_OPT",
132
  "ON_PRIMARY_OPT",
133
  "ON_SECONDARY_OPT",
134
  "OFFLINE_OPT",
135
  "OSPARAMS_OPT",
136
  "OS_OPT",
137
  "OS_SIZE_OPT",
138
  "OOB_TIMEOUT_OPT",
139
  "POWER_DELAY_OPT",
140
  "PREALLOC_WIPE_DISKS_OPT",
141
  "PRIMARY_IP_VERSION_OPT",
142
  "PRIORITY_OPT",
143
  "RAPI_CERT_OPT",
144
  "READD_OPT",
145
  "REBOOT_TYPE_OPT",
146
  "REMOVE_INSTANCE_OPT",
147
  "REMOVE_UIDS_OPT",
148
  "RESERVED_LVS_OPT",
149
  "ROMAN_OPT",
150
  "SECONDARY_IP_OPT",
151
  "SELECT_OS_OPT",
152
  "SEP_OPT",
153
  "SHOWCMD_OPT",
154
  "SHUTDOWN_TIMEOUT_OPT",
155
  "SINGLE_NODE_OPT",
156
  "SRC_DIR_OPT",
157
  "SRC_NODE_OPT",
158
  "SUBMIT_OPT",
159
  "STATIC_OPT",
160
  "SYNC_OPT",
161
  "TAG_SRC_OPT",
162
  "TIMEOUT_OPT",
163
  "UIDPOOL_OPT",
164
  "USEUNITS_OPT",
165
  "USE_REPL_NET_OPT",
166
  "VERBOSE_OPT",
167
  "VG_NAME_OPT",
168
  "YES_DOIT_OPT",
169
  # Generic functions for CLI programs
170
  "ConfirmOperation",
171
  "GenericMain",
172
  "GenericInstanceCreate",
173
  "GenericList",
174
  "GenericListFields",
175
  "GetClient",
176
  "GetOnlineNodes",
177
  "JobExecutor",
178
  "JobSubmittedException",
179
  "ParseTimespec",
180
  "RunWhileClusterStopped",
181
  "SubmitOpCode",
182
  "SubmitOrSend",
183
  "UsesRPC",
184
  # Formatting functions
185
  "ToStderr", "ToStdout",
186
  "FormatError",
187
  "FormatQueryResult",
188
  "FormatParameterDict",
189
  "GenerateTable",
190
  "AskUser",
191
  "FormatTimestamp",
192
  "FormatLogMessage",
193
  # Tags functions
194
  "ListTags",
195
  "AddTags",
196
  "RemoveTags",
197
  # command line options support infrastructure
198
  "ARGS_MANY_INSTANCES",
199
  "ARGS_MANY_NODES",
200
  "ARGS_MANY_GROUPS",
201
  "ARGS_NONE",
202
  "ARGS_ONE_INSTANCE",
203
  "ARGS_ONE_NODE",
204
  "ARGS_ONE_GROUP",
205
  "ARGS_ONE_OS",
206
  "ArgChoice",
207
  "ArgCommand",
208
  "ArgFile",
209
  "ArgGroup",
210
  "ArgHost",
211
  "ArgInstance",
212
  "ArgJobId",
213
  "ArgNode",
214
  "ArgOs",
215
  "ArgSuggest",
216
  "ArgUnknown",
217
  "OPT_COMPL_INST_ADD_NODES",
218
  "OPT_COMPL_MANY_NODES",
219
  "OPT_COMPL_ONE_IALLOCATOR",
220
  "OPT_COMPL_ONE_INSTANCE",
221
  "OPT_COMPL_ONE_NODE",
222
  "OPT_COMPL_ONE_NODEGROUP",
223
  "OPT_COMPL_ONE_OS",
224
  "cli_option",
225
  "SplitNodeOption",
226
  "CalculateOSNames",
227
  "ParseFields",
228
  "COMMON_CREATE_OPTS",
229
  ]
230

    
231
NO_PREFIX = "no_"
232
UN_PREFIX = "-"
233

    
234
#: Priorities (sorted)
235
_PRIORITY_NAMES = [
236
  ("low", constants.OP_PRIO_LOW),
237
  ("normal", constants.OP_PRIO_NORMAL),
238
  ("high", constants.OP_PRIO_HIGH),
239
  ]
240

    
241
#: Priority dictionary for easier lookup
242
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
243
# we migrate to Python 2.6
244
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
245

    
246
# Query result status for clients
247
(QR_NORMAL,
248
 QR_UNKNOWN,
249
 QR_INCOMPLETE) = range(3)
250

    
251

    
252
class _Argument:
253
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
254
    self.min = min
255
    self.max = max
256

    
257
  def __repr__(self):
258
    return ("<%s min=%s max=%s>" %
259
            (self.__class__.__name__, self.min, self.max))
260

    
261

    
262
class ArgSuggest(_Argument):
263
  """Suggesting argument.
264

265
  Value can be any of the ones passed to the constructor.
266

267
  """
268
  # pylint: disable-msg=W0622
269
  def __init__(self, min=0, max=None, choices=None):
270
    _Argument.__init__(self, min=min, max=max)
271
    self.choices = choices
272

    
273
  def __repr__(self):
274
    return ("<%s min=%s max=%s choices=%r>" %
275
            (self.__class__.__name__, self.min, self.max, self.choices))
276

    
277

    
278
class ArgChoice(ArgSuggest):
279
  """Choice argument.
280

281
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
282
  but value must be one of the choices.
283

284
  """
285

    
286

    
287
class ArgUnknown(_Argument):
288
  """Unknown argument to program (e.g. determined at runtime).
289

290
  """
291

    
292

    
293
class ArgInstance(_Argument):
294
  """Instances argument.
295

296
  """
297

    
298

    
299
class ArgNode(_Argument):
300
  """Node argument.
301

302
  """
303

    
304

    
305
class ArgGroup(_Argument):
306
  """Node group argument.
307

308
  """
309

    
310

    
311
class ArgJobId(_Argument):
312
  """Job ID argument.
313

314
  """
315

    
316

    
317
class ArgFile(_Argument):
318
  """File path argument.
319

320
  """
321

    
322

    
323
class ArgCommand(_Argument):
324
  """Command argument.
325

326
  """
327

    
328

    
329
class ArgHost(_Argument):
330
  """Host argument.
331

332
  """
333

    
334

    
335
class ArgOs(_Argument):
336
  """OS argument.
337

338
  """
339

    
340

    
341
ARGS_NONE = []
342
ARGS_MANY_INSTANCES = [ArgInstance()]
343
ARGS_MANY_NODES = [ArgNode()]
344
ARGS_MANY_GROUPS = [ArgGroup()]
345
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
346
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
347
ARGS_ONE_GROUP = [ArgInstance(min=1, max=1)]
348
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
349

    
350

    
351
def _ExtractTagsObject(opts, args):
352
  """Extract the tag type object.
353

354
  Note that this function will modify its args parameter.
355

356
  """
357
  if not hasattr(opts, "tag_type"):
358
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
359
  kind = opts.tag_type
360
  if kind == constants.TAG_CLUSTER:
361
    retval = kind, kind
362
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
363
    if not args:
364
      raise errors.OpPrereqError("no arguments passed to the command")
365
    name = args.pop(0)
366
    retval = kind, name
367
  else:
368
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
369
  return retval
370

    
371

    
372
def _ExtendTags(opts, args):
373
  """Extend the args if a source file has been given.
374

375
  This function will extend the tags with the contents of the file
376
  passed in the 'tags_source' attribute of the opts parameter. A file
377
  named '-' will be replaced by stdin.
378

379
  """
380
  fname = opts.tags_source
381
  if fname is None:
382
    return
383
  if fname == "-":
384
    new_fh = sys.stdin
385
  else:
386
    new_fh = open(fname, "r")
387
  new_data = []
388
  try:
389
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
390
    # because of python bug 1633941
391
    while True:
392
      line = new_fh.readline()
393
      if not line:
394
        break
395
      new_data.append(line.strip())
396
  finally:
397
    new_fh.close()
398
  args.extend(new_data)
399

    
400

    
401
def ListTags(opts, args):
402
  """List the tags on a given object.
403

404
  This is a generic implementation that knows how to deal with all
405
  three cases of tag objects (cluster, node, instance). The opts
406
  argument is expected to contain a tag_type field denoting what
407
  object type we work on.
408

409
  """
410
  kind, name = _ExtractTagsObject(opts, args)
411
  cl = GetClient()
412
  result = cl.QueryTags(kind, name)
413
  result = list(result)
414
  result.sort()
415
  for tag in result:
416
    ToStdout(tag)
417

    
418

    
419
def AddTags(opts, args):
420
  """Add tags on a given object.
421

422
  This is a generic implementation that knows how to deal with all
423
  three cases of tag objects (cluster, node, instance). The opts
424
  argument is expected to contain a tag_type field denoting what
425
  object type we work on.
426

427
  """
428
  kind, name = _ExtractTagsObject(opts, args)
429
  _ExtendTags(opts, args)
430
  if not args:
431
    raise errors.OpPrereqError("No tags to be added")
432
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
433
  SubmitOpCode(op, opts=opts)
434

    
435

    
436
def RemoveTags(opts, args):
437
  """Remove tags from a given object.
438

439
  This is a generic implementation that knows how to deal with all
440
  three cases of tag objects (cluster, node, instance). The opts
441
  argument is expected to contain a tag_type field denoting what
442
  object type we work on.
443

444
  """
445
  kind, name = _ExtractTagsObject(opts, args)
446
  _ExtendTags(opts, args)
447
  if not args:
448
    raise errors.OpPrereqError("No tags to be removed")
449
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
450
  SubmitOpCode(op, opts=opts)
451

    
452

    
453
def check_unit(option, opt, value): # pylint: disable-msg=W0613
454
  """OptParsers custom converter for units.
455

456
  """
457
  try:
458
    return utils.ParseUnit(value)
459
  except errors.UnitParseError, err:
460
    raise OptionValueError("option %s: %s" % (opt, err))
461

    
462

    
463
def _SplitKeyVal(opt, data):
464
  """Convert a KeyVal string into a dict.
465

466
  This function will convert a key=val[,...] string into a dict. Empty
467
  values will be converted specially: keys which have the prefix 'no_'
468
  will have the value=False and the prefix stripped, the others will
469
  have value=True.
470

471
  @type opt: string
472
  @param opt: a string holding the option name for which we process the
473
      data, used in building error messages
474
  @type data: string
475
  @param data: a string of the format key=val,key=val,...
476
  @rtype: dict
477
  @return: {key=val, key=val}
478
  @raises errors.ParameterError: if there are duplicate keys
479

480
  """
481
  kv_dict = {}
482
  if data:
483
    for elem in utils.UnescapeAndSplit(data, sep=","):
484
      if "=" in elem:
485
        key, val = elem.split("=", 1)
486
      else:
487
        if elem.startswith(NO_PREFIX):
488
          key, val = elem[len(NO_PREFIX):], False
489
        elif elem.startswith(UN_PREFIX):
490
          key, val = elem[len(UN_PREFIX):], None
491
        else:
492
          key, val = elem, True
493
      if key in kv_dict:
494
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
495
                                    (key, opt))
496
      kv_dict[key] = val
497
  return kv_dict
498

    
499

    
500
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
501
  """Custom parser for ident:key=val,key=val options.
502

503
  This will store the parsed values as a tuple (ident, {key: val}). As such,
504
  multiple uses of this option via action=append is possible.
505

506
  """
507
  if ":" not in value:
508
    ident, rest = value, ''
509
  else:
510
    ident, rest = value.split(":", 1)
511

    
512
  if ident.startswith(NO_PREFIX):
513
    if rest:
514
      msg = "Cannot pass options when removing parameter groups: %s" % value
515
      raise errors.ParameterError(msg)
516
    retval = (ident[len(NO_PREFIX):], False)
517
  elif ident.startswith(UN_PREFIX):
518
    if rest:
519
      msg = "Cannot pass options when removing parameter groups: %s" % value
520
      raise errors.ParameterError(msg)
521
    retval = (ident[len(UN_PREFIX):], None)
522
  else:
523
    kv_dict = _SplitKeyVal(opt, rest)
524
    retval = (ident, kv_dict)
525
  return retval
526

    
527

    
528
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
529
  """Custom parser class for key=val,key=val options.
530

531
  This will store the parsed values as a dict {key: val}.
532

533
  """
534
  return _SplitKeyVal(opt, value)
535

    
536

    
537
def check_bool(option, opt, value): # pylint: disable-msg=W0613
538
  """Custom parser for yes/no options.
539

540
  This will store the parsed value as either True or False.
541

542
  """
543
  value = value.lower()
544
  if value == constants.VALUE_FALSE or value == "no":
545
    return False
546
  elif value == constants.VALUE_TRUE or value == "yes":
547
    return True
548
  else:
549
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
550

    
551

    
552
# completion_suggestion is normally a list. Using numeric values not evaluating
553
# to False for dynamic completion.
554
(OPT_COMPL_MANY_NODES,
555
 OPT_COMPL_ONE_NODE,
556
 OPT_COMPL_ONE_INSTANCE,
557
 OPT_COMPL_ONE_OS,
558
 OPT_COMPL_ONE_IALLOCATOR,
559
 OPT_COMPL_INST_ADD_NODES,
560
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
561

    
562
OPT_COMPL_ALL = frozenset([
563
  OPT_COMPL_MANY_NODES,
564
  OPT_COMPL_ONE_NODE,
565
  OPT_COMPL_ONE_INSTANCE,
566
  OPT_COMPL_ONE_OS,
567
  OPT_COMPL_ONE_IALLOCATOR,
568
  OPT_COMPL_INST_ADD_NODES,
569
  OPT_COMPL_ONE_NODEGROUP,
570
  ])
571

    
572

    
573
class CliOption(Option):
574
  """Custom option class for optparse.
575

576
  """
577
  ATTRS = Option.ATTRS + [
578
    "completion_suggest",
579
    ]
580
  TYPES = Option.TYPES + (
581
    "identkeyval",
582
    "keyval",
583
    "unit",
584
    "bool",
585
    )
586
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
587
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
588
  TYPE_CHECKER["keyval"] = check_key_val
589
  TYPE_CHECKER["unit"] = check_unit
590
  TYPE_CHECKER["bool"] = check_bool
591

    
592

    
593
# optparse.py sets make_option, so we do it for our own option class, too
594
cli_option = CliOption
595

    
596

    
597
_YORNO = "yes|no"
598

    
599
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
600
                       help="Increase debugging level")
601

    
602
NOHDR_OPT = cli_option("--no-headers", default=False,
603
                       action="store_true", dest="no_headers",
604
                       help="Don't display column headers")
605

    
606
SEP_OPT = cli_option("--separator", default=None,
607
                     action="store", dest="separator",
608
                     help=("Separator between output fields"
609
                           " (defaults to one space)"))
610

    
611
USEUNITS_OPT = cli_option("--units", default=None,
612
                          dest="units", choices=('h', 'm', 'g', 't'),
613
                          help="Specify units for output (one of h/m/g/t)")
614

    
615
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
616
                        type="string", metavar="FIELDS",
617
                        help="Comma separated list of output fields")
618

    
619
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
620
                       default=False, help="Force the operation")
621

    
622
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
623
                         default=False, help="Do not require confirmation")
624

    
625
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
626
                                  action="store_true", default=False,
627
                                  help=("Ignore offline nodes and do as much"
628
                                        " as possible"))
629

    
630
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
631
                         default=None, help="File with tag names")
632

    
633
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
634
                        default=False, action="store_true",
635
                        help=("Submit the job and return the job ID, but"
636
                              " don't wait for the job to finish"))
637

    
638
SYNC_OPT = cli_option("--sync", dest="do_locking",
639
                      default=False, action="store_true",
640
                      help=("Grab locks while doing the queries"
641
                            " in order to ensure more consistent results"))
642

    
643
DRY_RUN_OPT = cli_option("--dry-run", default=False,
644
                         action="store_true",
645
                         help=("Do not execute the operation, just run the"
646
                               " check steps and verify it it could be"
647
                               " executed"))
648

    
649
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
650
                         action="store_true",
651
                         help="Increase the verbosity of the operation")
652

    
653
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
654
                              action="store_true", dest="simulate_errors",
655
                              help="Debugging option that makes the operation"
656
                              " treat most runtime checks as failed")
657

    
658
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
659
                        default=True, action="store_false",
660
                        help="Don't wait for sync (DANGEROUS!)")
661

    
662
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
663
                               help="Custom disk setup (diskless, file,"
664
                               " plain or drbd)",
665
                               default=None, metavar="TEMPL",
666
                               choices=list(constants.DISK_TEMPLATES))
667

    
668
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
669
                        help="Do not create any network cards for"
670
                        " the instance")
671

    
672
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
673
                               help="Relative path under default cluster-wide"
674
                               " file storage dir to store file-based disks",
675
                               default=None, metavar="<DIR>")
676

    
677
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
678
                                  help="Driver to use for image files",
679
                                  default="loop", metavar="<DRIVER>",
680
                                  choices=list(constants.FILE_DRIVER))
681

    
682
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
683
                            help="Select nodes for the instance automatically"
684
                            " using the <NAME> iallocator plugin",
685
                            default=None, type="string",
686
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
687

    
688
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
689
                            metavar="<NAME>",
690
                            help="Set the default instance allocator plugin",
691
                            default=None, type="string",
692
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
693

    
694
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
695
                    metavar="<os>",
696
                    completion_suggest=OPT_COMPL_ONE_OS)
697

    
698
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
699
                         type="keyval", default={},
700
                         help="OS parameters")
701

    
702
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
703
                               action="store_true", default=False,
704
                               help="Force an unknown variant")
705

    
706
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
707
                            action="store_true", default=False,
708
                            help="Do not install the OS (will"
709
                            " enable no-start)")
710

    
711
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
712
                         type="keyval", default={},
713
                         help="Backend parameters")
714

    
715
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
716
                         default={}, dest="hvparams",
717
                         help="Hypervisor parameters")
718

    
719
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
720
                            help="Hypervisor and hypervisor options, in the"
721
                            " format hypervisor:option=value,option=value,...",
722
                            default=None, type="identkeyval")
723

    
724
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
725
                        help="Hypervisor and hypervisor options, in the"
726
                        " format hypervisor:option=value,option=value,...",
727
                        default=[], action="append", type="identkeyval")
728

    
729
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
730
                           action="store_false",
731
                           help="Don't check that the instance's IP"
732
                           " is alive")
733

    
734
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
735
                             default=True, action="store_false",
736
                             help="Don't check that the instance's name"
737
                             " is resolvable")
738

    
739
NET_OPT = cli_option("--net",
740
                     help="NIC parameters", default=[],
741
                     dest="nics", action="append", type="identkeyval")
742

    
743
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
744
                      dest="disks", action="append", type="identkeyval")
745

    
746
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
747
                         help="Comma-separated list of disks"
748
                         " indices to act on (e.g. 0,2) (optional,"
749
                         " defaults to all disks)")
750

    
751
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
752
                         help="Enforces a single-disk configuration using the"
753
                         " given disk size, in MiB unless a suffix is used",
754
                         default=None, type="unit", metavar="<size>")
755

    
756
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
757
                                dest="ignore_consistency",
758
                                action="store_true", default=False,
759
                                help="Ignore the consistency of the disks on"
760
                                " the secondary")
761

    
762
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
763
                                dest="allow_failover",
764
                                action="store_true", default=False,
765
                                help="If migration is not possible fallback to"
766
                                     " failover")
767

    
768
NONLIVE_OPT = cli_option("--non-live", dest="live",
769
                         default=True, action="store_false",
770
                         help="Do a non-live migration (this usually means"
771
                         " freeze the instance, save the state, transfer and"
772
                         " only then resume running on the secondary node)")
773

    
774
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
775
                                default=None,
776
                                choices=list(constants.HT_MIGRATION_MODES),
777
                                help="Override default migration mode (choose"
778
                                " either live or non-live")
779

    
780
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
781
                                help="Target node and optional secondary node",
782
                                metavar="<pnode>[:<snode>]",
783
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
784

    
785
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
786
                           action="append", metavar="<node>",
787
                           help="Use only this node (can be used multiple"
788
                           " times, if not given defaults to all nodes)",
789
                           completion_suggest=OPT_COMPL_ONE_NODE)
790

    
791
NODEGROUP_OPT = cli_option("-g", "--node-group",
792
                           dest="nodegroup",
793
                           help="Node group (name or uuid)",
794
                           metavar="<nodegroup>",
795
                           default=None, type="string",
796
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
797

    
798
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
799
                             metavar="<node>",
800
                             completion_suggest=OPT_COMPL_ONE_NODE)
801

    
802
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
803
                         action="store_false",
804
                         help="Don't start the instance after creation")
805

    
806
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
807
                         action="store_true", default=False,
808
                         help="Show command instead of executing it")
809

    
810
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
811
                         default=False, action="store_true",
812
                         help="Instead of performing the migration, try to"
813
                         " recover from a failed cleanup. This is safe"
814
                         " to run even if the instance is healthy, but it"
815
                         " will create extra replication traffic and "
816
                         " disrupt briefly the replication (like during the"
817
                         " migration")
818

    
819
STATIC_OPT = cli_option("-s", "--static", dest="static",
820
                        action="store_true", default=False,
821
                        help="Only show configuration data, not runtime data")
822

    
823
ALL_OPT = cli_option("--all", dest="show_all",
824
                     default=False, action="store_true",
825
                     help="Show info on all instances on the cluster."
826
                     " This can take a long time to run, use wisely")
827

    
828
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
829
                           action="store_true", default=False,
830
                           help="Interactive OS reinstall, lists available"
831
                           " OS templates for selection")
832

    
833
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
834
                                 action="store_true", default=False,
835
                                 help="Remove the instance from the cluster"
836
                                 " configuration even if there are failures"
837
                                 " during the removal process")
838

    
839
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
840
                                        dest="ignore_remove_failures",
841
                                        action="store_true", default=False,
842
                                        help="Remove the instance from the"
843
                                        " cluster configuration even if there"
844
                                        " are failures during the removal"
845
                                        " process")
846

    
847
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
848
                                 action="store_true", default=False,
849
                                 help="Remove the instance from the cluster")
850

    
851
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
852
                               help="Specifies the new node for the instance",
853
                               metavar="NODE", default=None,
854
                               completion_suggest=OPT_COMPL_ONE_NODE)
855

    
856
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
857
                               help="Specifies the new secondary node",
858
                               metavar="NODE", default=None,
859
                               completion_suggest=OPT_COMPL_ONE_NODE)
860

    
861
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
862
                            default=False, action="store_true",
863
                            help="Replace the disk(s) on the primary"
864
                            " node (only for the drbd template)")
865

    
866
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
867
                              default=False, action="store_true",
868
                              help="Replace the disk(s) on the secondary"
869
                              " node (only for the drbd template)")
870

    
871
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
872
                              default=False, action="store_true",
873
                              help="Lock all nodes and auto-promote as needed"
874
                              " to MC status")
875

    
876
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
877
                              default=False, action="store_true",
878
                              help="Automatically replace faulty disks"
879
                              " (only for the drbd template)")
880

    
881
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
882
                             default=False, action="store_true",
883
                             help="Ignore current recorded size"
884
                             " (useful for forcing activation when"
885
                             " the recorded size is wrong)")
886

    
887
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
888
                          metavar="<node>",
889
                          completion_suggest=OPT_COMPL_ONE_NODE)
890

    
891
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
892
                         metavar="<dir>")
893

    
894
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
895
                              help="Specify the secondary ip for the node",
896
                              metavar="ADDRESS", default=None)
897

    
898
READD_OPT = cli_option("--readd", dest="readd",
899
                       default=False, action="store_true",
900
                       help="Readd old node after replacing it")
901

    
902
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
903
                                default=True, action="store_false",
904
                                help="Disable SSH key fingerprint checking")
905

    
906
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
907
                                 default=False, action="store_true",
908
                                 help="Force the joining of a node")
909

    
910
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
911
                    type="bool", default=None, metavar=_YORNO,
912
                    help="Set the master_candidate flag on the node")
913

    
914
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
915
                         type="bool", default=None,
916
                         help=("Set the offline flag on the node"
917
                               " (cluster does not communicate with offline"
918
                               " nodes)"))
919

    
920
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
921
                         type="bool", default=None,
922
                         help=("Set the drained flag on the node"
923
                               " (excluded from allocation operations)"))
924

    
925
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
926
                    type="bool", default=None, metavar=_YORNO,
927
                    help="Set the master_capable flag on the node")
928

    
929
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
930
                    type="bool", default=None, metavar=_YORNO,
931
                    help="Set the vm_capable flag on the node")
932

    
933
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
934
                             type="bool", default=None, metavar=_YORNO,
935
                             help="Set the allocatable flag on a volume")
936

    
937
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
938
                               help="Disable support for lvm based instances"
939
                               " (cluster-wide)",
940
                               action="store_false", default=True)
941

    
942
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
943
                            dest="enabled_hypervisors",
944
                            help="Comma-separated list of hypervisors",
945
                            type="string", default=None)
946

    
947
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
948
                            type="keyval", default={},
949
                            help="NIC parameters")
950

    
951
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
952
                         dest="candidate_pool_size", type="int",
953
                         help="Set the candidate pool size")
954

    
955
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
956
                         help=("Enables LVM and specifies the volume group"
957
                               " name (cluster-wide) for disk allocation"
958
                               " [%s]" % constants.DEFAULT_VG),
959
                         metavar="VG", default=None)
960

    
961
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
962
                          help="Destroy cluster", action="store_true")
963

    
964
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
965
                          help="Skip node agreement check (dangerous)",
966
                          action="store_true", default=False)
967

    
968
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
969
                            help="Specify the mac prefix for the instance IP"
970
                            " addresses, in the format XX:XX:XX",
971
                            metavar="PREFIX",
972
                            default=None)
973

    
974
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
975
                               help="Specify the node interface (cluster-wide)"
976
                               " on which the master IP address will be added"
977
                               " (cluster init default: %s)" %
978
                               constants.DEFAULT_BRIDGE,
979
                               metavar="NETDEV",
980
                               default=None)
981

    
982
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
983
                                help="Specify the default directory (cluster-"
984
                                "wide) for storing the file-based disks [%s]" %
985
                                constants.DEFAULT_FILE_STORAGE_DIR,
986
                                metavar="DIR",
987
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
988

    
989
GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
990
                            dest="shared_file_storage_dir",
991
                            help="Specify the default directory (cluster-"
992
                            "wide) for storing the shared file-based"
993
                            " disks [%s]" %
994
                            constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
995
                            metavar="SHAREDDIR",
996
                            default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
997

    
998
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
999
                                   help="Don't modify /etc/hosts",
1000
                                   action="store_false", default=True)
1001

    
1002
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1003
                                    help="Don't initialize SSH keys",
1004
                                    action="store_false", default=True)
1005

    
1006
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1007
                             help="Enable parseable error messages",
1008
                             action="store_true", default=False)
1009

    
1010
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1011
                          help="Skip N+1 memory redundancy tests",
1012
                          action="store_true", default=False)
1013

    
1014
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1015
                             help="Type of reboot: soft/hard/full",
1016
                             default=constants.INSTANCE_REBOOT_HARD,
1017
                             metavar="<REBOOT>",
1018
                             choices=list(constants.REBOOT_TYPES))
1019

    
1020
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1021
                                    dest="ignore_secondaries",
1022
                                    default=False, action="store_true",
1023
                                    help="Ignore errors from secondaries")
1024

    
1025
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1026
                            action="store_false", default=True,
1027
                            help="Don't shutdown the instance (unsafe)")
1028

    
1029
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1030
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1031
                         help="Maximum time to wait")
1032

    
1033
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1034
                         dest="shutdown_timeout", type="int",
1035
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1036
                         help="Maximum time to wait for instance shutdown")
1037

    
1038
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1039
                          default=None,
1040
                          help=("Number of seconds between repetions of the"
1041
                                " command"))
1042

    
1043
EARLY_RELEASE_OPT = cli_option("--early-release",
1044
                               dest="early_release", default=False,
1045
                               action="store_true",
1046
                               help="Release the locks on the secondary"
1047
                               " node(s) early")
1048

    
1049
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1050
                                  dest="new_cluster_cert",
1051
                                  default=False, action="store_true",
1052
                                  help="Generate a new cluster certificate")
1053

    
1054
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1055
                           default=None,
1056
                           help="File containing new RAPI certificate")
1057

    
1058
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1059
                               default=None, action="store_true",
1060
                               help=("Generate a new self-signed RAPI"
1061
                                     " certificate"))
1062

    
1063
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1064
                                    dest="new_confd_hmac_key",
1065
                                    default=False, action="store_true",
1066
                                    help=("Create a new HMAC key for %s" %
1067
                                          constants.CONFD))
1068

    
1069
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1070
                                       dest="cluster_domain_secret",
1071
                                       default=None,
1072
                                       help=("Load new new cluster domain"
1073
                                             " secret from file"))
1074

    
1075
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1076
                                           dest="new_cluster_domain_secret",
1077
                                           default=False, action="store_true",
1078
                                           help=("Create a new cluster domain"
1079
                                                 " secret"))
1080

    
1081
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1082
                              dest="use_replication_network",
1083
                              help="Whether to use the replication network"
1084
                              " for talking to the nodes",
1085
                              action="store_true", default=False)
1086

    
1087
MAINTAIN_NODE_HEALTH_OPT = \
1088
    cli_option("--maintain-node-health", dest="maintain_node_health",
1089
               metavar=_YORNO, default=None, type="bool",
1090
               help="Configure the cluster to automatically maintain node"
1091
               " health, by shutting down unknown instances, shutting down"
1092
               " unknown DRBD devices, etc.")
1093

    
1094
IDENTIFY_DEFAULTS_OPT = \
1095
    cli_option("--identify-defaults", dest="identify_defaults",
1096
               default=False, action="store_true",
1097
               help="Identify which saved instance parameters are equal to"
1098
               " the current cluster defaults and set them as such, instead"
1099
               " of marking them as overridden")
1100

    
1101
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1102
                         action="store", dest="uid_pool",
1103
                         help=("A list of user-ids or user-id"
1104
                               " ranges separated by commas"))
1105

    
1106
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1107
                          action="store", dest="add_uids",
1108
                          help=("A list of user-ids or user-id"
1109
                                " ranges separated by commas, to be"
1110
                                " added to the user-id pool"))
1111

    
1112
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1113
                             action="store", dest="remove_uids",
1114
                             help=("A list of user-ids or user-id"
1115
                                   " ranges separated by commas, to be"
1116
                                   " removed from the user-id pool"))
1117

    
1118
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1119
                             action="store", dest="reserved_lvs",
1120
                             help=("A comma-separated list of reserved"
1121
                                   " logical volumes names, that will be"
1122
                                   " ignored by cluster verify"))
1123

    
1124
ROMAN_OPT = cli_option("--roman",
1125
                       dest="roman_integers", default=False,
1126
                       action="store_true",
1127
                       help="Use roman numbers for positive integers")
1128

    
1129
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1130
                             action="store", default=None,
1131
                             help="Specifies usermode helper for DRBD")
1132

    
1133
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1134
                                action="store_false", default=True,
1135
                                help="Disable support for DRBD")
1136

    
1137
PRIMARY_IP_VERSION_OPT = \
1138
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1139
               action="store", dest="primary_ip_version",
1140
               metavar="%d|%d" % (constants.IP4_VERSION,
1141
                                  constants.IP6_VERSION),
1142
               help="Cluster-wide IP version for primary IP")
1143

    
1144
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1145
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1146
                          choices=_PRIONAME_TO_VALUE.keys(),
1147
                          help="Priority for opcode processing")
1148

    
1149
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1150
                        type="bool", default=None, metavar=_YORNO,
1151
                        help="Sets the hidden flag on the OS")
1152

    
1153
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1154
                        type="bool", default=None, metavar=_YORNO,
1155
                        help="Sets the blacklisted flag on the OS")
1156

    
1157
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1158
                                     type="bool", metavar=_YORNO,
1159
                                     dest="prealloc_wipe_disks",
1160
                                     help=("Wipe disks prior to instance"
1161
                                           " creation"))
1162

    
1163
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1164
                             type="keyval", default=None,
1165
                             help="Node parameters")
1166

    
1167
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1168
                              action="store", metavar="POLICY", default=None,
1169
                              help="Allocation policy for the node group")
1170

    
1171
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1172
                              type="bool", metavar=_YORNO,
1173
                              dest="node_powered",
1174
                              help="Specify if the SoR for node is powered")
1175

    
1176
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1177
                         default=constants.OOB_TIMEOUT,
1178
                         help="Maximum time to wait for out-of-band helper")
1179

    
1180
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1181
                             default=constants.OOB_POWER_DELAY,
1182
                             help="Time in seconds to wait between power-ons")
1183

    
1184

    
1185
#: Options provided by all commands
1186
COMMON_OPTS = [DEBUG_OPT]
1187

    
1188
# common options for creating instances. add and import then add their own
1189
# specific ones.
1190
COMMON_CREATE_OPTS = [
1191
  BACKEND_OPT,
1192
  DISK_OPT,
1193
  DISK_TEMPLATE_OPT,
1194
  FILESTORE_DIR_OPT,
1195
  FILESTORE_DRIVER_OPT,
1196
  HYPERVISOR_OPT,
1197
  IALLOCATOR_OPT,
1198
  NET_OPT,
1199
  NODE_PLACEMENT_OPT,
1200
  NOIPCHECK_OPT,
1201
  NONAMECHECK_OPT,
1202
  NONICS_OPT,
1203
  NWSYNC_OPT,
1204
  OSPARAMS_OPT,
1205
  OS_SIZE_OPT,
1206
  SUBMIT_OPT,
1207
  DRY_RUN_OPT,
1208
  PRIORITY_OPT,
1209
  ]
1210

    
1211

    
1212
def _ParseArgs(argv, commands, aliases):
1213
  """Parser for the command line arguments.
1214

1215
  This function parses the arguments and returns the function which
1216
  must be executed together with its (modified) arguments.
1217

1218
  @param argv: the command line
1219
  @param commands: dictionary with special contents, see the design
1220
      doc for cmdline handling
1221
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1222

1223
  """
1224
  if len(argv) == 0:
1225
    binary = "<command>"
1226
  else:
1227
    binary = argv[0].split("/")[-1]
1228

    
1229
  if len(argv) > 1 and argv[1] == "--version":
1230
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1231
             constants.RELEASE_VERSION)
1232
    # Quit right away. That way we don't have to care about this special
1233
    # argument. optparse.py does it the same.
1234
    sys.exit(0)
1235

    
1236
  if len(argv) < 2 or not (argv[1] in commands or
1237
                           argv[1] in aliases):
1238
    # let's do a nice thing
1239
    sortedcmds = commands.keys()
1240
    sortedcmds.sort()
1241

    
1242
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1243
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1244
    ToStdout("")
1245

    
1246
    # compute the max line length for cmd + usage
1247
    mlen = max([len(" %s" % cmd) for cmd in commands])
1248
    mlen = min(60, mlen) # should not get here...
1249

    
1250
    # and format a nice command list
1251
    ToStdout("Commands:")
1252
    for cmd in sortedcmds:
1253
      cmdstr = " %s" % (cmd,)
1254
      help_text = commands[cmd][4]
1255
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1256
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1257
      for line in help_lines:
1258
        ToStdout("%-*s   %s", mlen, "", line)
1259

    
1260
    ToStdout("")
1261

    
1262
    return None, None, None
1263

    
1264
  # get command, unalias it, and look it up in commands
1265
  cmd = argv.pop(1)
1266
  if cmd in aliases:
1267
    if cmd in commands:
1268
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1269
                                   " command" % cmd)
1270

    
1271
    if aliases[cmd] not in commands:
1272
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1273
                                   " command '%s'" % (cmd, aliases[cmd]))
1274

    
1275
    cmd = aliases[cmd]
1276

    
1277
  func, args_def, parser_opts, usage, description = commands[cmd]
1278
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1279
                        description=description,
1280
                        formatter=TitledHelpFormatter(),
1281
                        usage="%%prog %s %s" % (cmd, usage))
1282
  parser.disable_interspersed_args()
1283
  options, args = parser.parse_args()
1284

    
1285
  if not _CheckArguments(cmd, args_def, args):
1286
    return None, None, None
1287

    
1288
  return func, options, args
1289

    
1290

    
1291
def _CheckArguments(cmd, args_def, args):
1292
  """Verifies the arguments using the argument definition.
1293

1294
  Algorithm:
1295

1296
    1. Abort with error if values specified by user but none expected.
1297

1298
    1. For each argument in definition
1299

1300
      1. Keep running count of minimum number of values (min_count)
1301
      1. Keep running count of maximum number of values (max_count)
1302
      1. If it has an unlimited number of values
1303

1304
        1. Abort with error if it's not the last argument in the definition
1305

1306
    1. If last argument has limited number of values
1307

1308
      1. Abort with error if number of values doesn't match or is too large
1309

1310
    1. Abort with error if user didn't pass enough values (min_count)
1311

1312
  """
1313
  if args and not args_def:
1314
    ToStderr("Error: Command %s expects no arguments", cmd)
1315
    return False
1316

    
1317
  min_count = None
1318
  max_count = None
1319
  check_max = None
1320

    
1321
  last_idx = len(args_def) - 1
1322

    
1323
  for idx, arg in enumerate(args_def):
1324
    if min_count is None:
1325
      min_count = arg.min
1326
    elif arg.min is not None:
1327
      min_count += arg.min
1328

    
1329
    if max_count is None:
1330
      max_count = arg.max
1331
    elif arg.max is not None:
1332
      max_count += arg.max
1333

    
1334
    if idx == last_idx:
1335
      check_max = (arg.max is not None)
1336

    
1337
    elif arg.max is None:
1338
      raise errors.ProgrammerError("Only the last argument can have max=None")
1339

    
1340
  if check_max:
1341
    # Command with exact number of arguments
1342
    if (min_count is not None and max_count is not None and
1343
        min_count == max_count and len(args) != min_count):
1344
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1345
      return False
1346

    
1347
    # Command with limited number of arguments
1348
    if max_count is not None and len(args) > max_count:
1349
      ToStderr("Error: Command %s expects only %d argument(s)",
1350
               cmd, max_count)
1351
      return False
1352

    
1353
  # Command with some required arguments
1354
  if min_count is not None and len(args) < min_count:
1355
    ToStderr("Error: Command %s expects at least %d argument(s)",
1356
             cmd, min_count)
1357
    return False
1358

    
1359
  return True
1360

    
1361

    
1362
def SplitNodeOption(value):
1363
  """Splits the value of a --node option.
1364

1365
  """
1366
  if value and ':' in value:
1367
    return value.split(':', 1)
1368
  else:
1369
    return (value, None)
1370

    
1371

    
1372
def CalculateOSNames(os_name, os_variants):
1373
  """Calculates all the names an OS can be called, according to its variants.
1374

1375
  @type os_name: string
1376
  @param os_name: base name of the os
1377
  @type os_variants: list or None
1378
  @param os_variants: list of supported variants
1379
  @rtype: list
1380
  @return: list of valid names
1381

1382
  """
1383
  if os_variants:
1384
    return ['%s+%s' % (os_name, v) for v in os_variants]
1385
  else:
1386
    return [os_name]
1387

    
1388

    
1389
def ParseFields(selected, default):
1390
  """Parses the values of "--field"-like options.
1391

1392
  @type selected: string or None
1393
  @param selected: User-selected options
1394
  @type default: list
1395
  @param default: Default fields
1396

1397
  """
1398
  if selected is None:
1399
    return default
1400

    
1401
  if selected.startswith("+"):
1402
    return default + selected[1:].split(",")
1403

    
1404
  return selected.split(",")
1405

    
1406

    
1407
UsesRPC = rpc.RunWithRPC
1408

    
1409

    
1410
def AskUser(text, choices=None):
1411
  """Ask the user a question.
1412

1413
  @param text: the question to ask
1414

1415
  @param choices: list with elements tuples (input_char, return_value,
1416
      description); if not given, it will default to: [('y', True,
1417
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1418
      note that the '?' char is reserved for help
1419

1420
  @return: one of the return values from the choices list; if input is
1421
      not possible (i.e. not running with a tty, we return the last
1422
      entry from the list
1423

1424
  """
1425
  if choices is None:
1426
    choices = [('y', True, 'Perform the operation'),
1427
               ('n', False, 'Do not perform the operation')]
1428
  if not choices or not isinstance(choices, list):
1429
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1430
  for entry in choices:
1431
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1432
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1433

    
1434
  answer = choices[-1][1]
1435
  new_text = []
1436
  for line in text.splitlines():
1437
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1438
  text = "\n".join(new_text)
1439
  try:
1440
    f = file("/dev/tty", "a+")
1441
  except IOError:
1442
    return answer
1443
  try:
1444
    chars = [entry[0] for entry in choices]
1445
    chars[-1] = "[%s]" % chars[-1]
1446
    chars.append('?')
1447
    maps = dict([(entry[0], entry[1]) for entry in choices])
1448
    while True:
1449
      f.write(text)
1450
      f.write('\n')
1451
      f.write("/".join(chars))
1452
      f.write(": ")
1453
      line = f.readline(2).strip().lower()
1454
      if line in maps:
1455
        answer = maps[line]
1456
        break
1457
      elif line == '?':
1458
        for entry in choices:
1459
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1460
        f.write("\n")
1461
        continue
1462
  finally:
1463
    f.close()
1464
  return answer
1465

    
1466

    
1467
class JobSubmittedException(Exception):
1468
  """Job was submitted, client should exit.
1469

1470
  This exception has one argument, the ID of the job that was
1471
  submitted. The handler should print this ID.
1472

1473
  This is not an error, just a structured way to exit from clients.
1474

1475
  """
1476

    
1477

    
1478
def SendJob(ops, cl=None):
1479
  """Function to submit an opcode without waiting for the results.
1480

1481
  @type ops: list
1482
  @param ops: list of opcodes
1483
  @type cl: luxi.Client
1484
  @param cl: the luxi client to use for communicating with the master;
1485
             if None, a new client will be created
1486

1487
  """
1488
  if cl is None:
1489
    cl = GetClient()
1490

    
1491
  job_id = cl.SubmitJob(ops)
1492

    
1493
  return job_id
1494

    
1495

    
1496
def GenericPollJob(job_id, cbs, report_cbs):
1497
  """Generic job-polling function.
1498

1499
  @type job_id: number
1500
  @param job_id: Job ID
1501
  @type cbs: Instance of L{JobPollCbBase}
1502
  @param cbs: Data callbacks
1503
  @type report_cbs: Instance of L{JobPollReportCbBase}
1504
  @param report_cbs: Reporting callbacks
1505

1506
  """
1507
  prev_job_info = None
1508
  prev_logmsg_serial = None
1509

    
1510
  status = None
1511

    
1512
  while True:
1513
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1514
                                      prev_logmsg_serial)
1515
    if not result:
1516
      # job not found, go away!
1517
      raise errors.JobLost("Job with id %s lost" % job_id)
1518

    
1519
    if result == constants.JOB_NOTCHANGED:
1520
      report_cbs.ReportNotChanged(job_id, status)
1521

    
1522
      # Wait again
1523
      continue
1524

    
1525
    # Split result, a tuple of (field values, log entries)
1526
    (job_info, log_entries) = result
1527
    (status, ) = job_info
1528

    
1529
    if log_entries:
1530
      for log_entry in log_entries:
1531
        (serial, timestamp, log_type, message) = log_entry
1532
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1533
                                    log_type, message)
1534
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1535

    
1536
    # TODO: Handle canceled and archived jobs
1537
    elif status in (constants.JOB_STATUS_SUCCESS,
1538
                    constants.JOB_STATUS_ERROR,
1539
                    constants.JOB_STATUS_CANCELING,
1540
                    constants.JOB_STATUS_CANCELED):
1541
      break
1542

    
1543
    prev_job_info = job_info
1544

    
1545
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1546
  if not jobs:
1547
    raise errors.JobLost("Job with id %s lost" % job_id)
1548

    
1549
  status, opstatus, result = jobs[0]
1550

    
1551
  if status == constants.JOB_STATUS_SUCCESS:
1552
    return result
1553

    
1554
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1555
    raise errors.OpExecError("Job was canceled")
1556

    
1557
  has_ok = False
1558
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1559
    if status == constants.OP_STATUS_SUCCESS:
1560
      has_ok = True
1561
    elif status == constants.OP_STATUS_ERROR:
1562
      errors.MaybeRaise(msg)
1563

    
1564
      if has_ok:
1565
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1566
                                 (idx, msg))
1567

    
1568
      raise errors.OpExecError(str(msg))
1569

    
1570
  # default failure mode
1571
  raise errors.OpExecError(result)
1572

    
1573

    
1574
class JobPollCbBase:
1575
  """Base class for L{GenericPollJob} callbacks.
1576

1577
  """
1578
  def __init__(self):
1579
    """Initializes this class.
1580

1581
    """
1582

    
1583
  def WaitForJobChangeOnce(self, job_id, fields,
1584
                           prev_job_info, prev_log_serial):
1585
    """Waits for changes on a job.
1586

1587
    """
1588
    raise NotImplementedError()
1589

    
1590
  def QueryJobs(self, job_ids, fields):
1591
    """Returns the selected fields for the selected job IDs.
1592

1593
    @type job_ids: list of numbers
1594
    @param job_ids: Job IDs
1595
    @type fields: list of strings
1596
    @param fields: Fields
1597

1598
    """
1599
    raise NotImplementedError()
1600

    
1601

    
1602
class JobPollReportCbBase:
1603
  """Base class for L{GenericPollJob} reporting callbacks.
1604

1605
  """
1606
  def __init__(self):
1607
    """Initializes this class.
1608

1609
    """
1610

    
1611
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1612
    """Handles a log message.
1613

1614
    """
1615
    raise NotImplementedError()
1616

    
1617
  def ReportNotChanged(self, job_id, status):
1618
    """Called for if a job hasn't changed in a while.
1619

1620
    @type job_id: number
1621
    @param job_id: Job ID
1622
    @type status: string or None
1623
    @param status: Job status if available
1624

1625
    """
1626
    raise NotImplementedError()
1627

    
1628

    
1629
class _LuxiJobPollCb(JobPollCbBase):
1630
  def __init__(self, cl):
1631
    """Initializes this class.
1632

1633
    """
1634
    JobPollCbBase.__init__(self)
1635
    self.cl = cl
1636

    
1637
  def WaitForJobChangeOnce(self, job_id, fields,
1638
                           prev_job_info, prev_log_serial):
1639
    """Waits for changes on a job.
1640

1641
    """
1642
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1643
                                        prev_job_info, prev_log_serial)
1644

    
1645
  def QueryJobs(self, job_ids, fields):
1646
    """Returns the selected fields for the selected job IDs.
1647

1648
    """
1649
    return self.cl.QueryJobs(job_ids, fields)
1650

    
1651

    
1652
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1653
  def __init__(self, feedback_fn):
1654
    """Initializes this class.
1655

1656
    """
1657
    JobPollReportCbBase.__init__(self)
1658

    
1659
    self.feedback_fn = feedback_fn
1660

    
1661
    assert callable(feedback_fn)
1662

    
1663
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1664
    """Handles a log message.
1665

1666
    """
1667
    self.feedback_fn((timestamp, log_type, log_msg))
1668

    
1669
  def ReportNotChanged(self, job_id, status):
1670
    """Called if a job hasn't changed in a while.
1671

1672
    """
1673
    # Ignore
1674

    
1675

    
1676
class StdioJobPollReportCb(JobPollReportCbBase):
1677
  def __init__(self):
1678
    """Initializes this class.
1679

1680
    """
1681
    JobPollReportCbBase.__init__(self)
1682

    
1683
    self.notified_queued = False
1684
    self.notified_waitlock = False
1685

    
1686
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1687
    """Handles a log message.
1688

1689
    """
1690
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1691
             FormatLogMessage(log_type, log_msg))
1692

    
1693
  def ReportNotChanged(self, job_id, status):
1694
    """Called if a job hasn't changed in a while.
1695

1696
    """
1697
    if status is None:
1698
      return
1699

    
1700
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1701
      ToStderr("Job %s is waiting in queue", job_id)
1702
      self.notified_queued = True
1703

    
1704
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1705
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1706
      self.notified_waitlock = True
1707

    
1708

    
1709
def FormatLogMessage(log_type, log_msg):
1710
  """Formats a job message according to its type.
1711

1712
  """
1713
  if log_type != constants.ELOG_MESSAGE:
1714
    log_msg = str(log_msg)
1715

    
1716
  return utils.SafeEncode(log_msg)
1717

    
1718

    
1719
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1720
  """Function to poll for the result of a job.
1721

1722
  @type job_id: job identified
1723
  @param job_id: the job to poll for results
1724
  @type cl: luxi.Client
1725
  @param cl: the luxi client to use for communicating with the master;
1726
             if None, a new client will be created
1727

1728
  """
1729
  if cl is None:
1730
    cl = GetClient()
1731

    
1732
  if reporter is None:
1733
    if feedback_fn:
1734
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1735
    else:
1736
      reporter = StdioJobPollReportCb()
1737
  elif feedback_fn:
1738
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1739

    
1740
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1741

    
1742

    
1743
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1744
  """Legacy function to submit an opcode.
1745

1746
  This is just a simple wrapper over the construction of the processor
1747
  instance. It should be extended to better handle feedback and
1748
  interaction functions.
1749

1750
  """
1751
  if cl is None:
1752
    cl = GetClient()
1753

    
1754
  SetGenericOpcodeOpts([op], opts)
1755

    
1756
  job_id = SendJob([op], cl=cl)
1757

    
1758
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1759
                       reporter=reporter)
1760

    
1761
  return op_results[0]
1762

    
1763

    
1764
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1765
  """Wrapper around SubmitOpCode or SendJob.
1766

1767
  This function will decide, based on the 'opts' parameter, whether to
1768
  submit and wait for the result of the opcode (and return it), or
1769
  whether to just send the job and print its identifier. It is used in
1770
  order to simplify the implementation of the '--submit' option.
1771

1772
  It will also process the opcodes if we're sending the via SendJob
1773
  (otherwise SubmitOpCode does it).
1774

1775
  """
1776
  if opts and opts.submit_only:
1777
    job = [op]
1778
    SetGenericOpcodeOpts(job, opts)
1779
    job_id = SendJob(job, cl=cl)
1780
    raise JobSubmittedException(job_id)
1781
  else:
1782
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1783

    
1784

    
1785
def SetGenericOpcodeOpts(opcode_list, options):
1786
  """Processor for generic options.
1787

1788
  This function updates the given opcodes based on generic command
1789
  line options (like debug, dry-run, etc.).
1790

1791
  @param opcode_list: list of opcodes
1792
  @param options: command line options or None
1793
  @return: None (in-place modification)
1794

1795
  """
1796
  if not options:
1797
    return
1798
  for op in opcode_list:
1799
    op.debug_level = options.debug
1800
    if hasattr(options, "dry_run"):
1801
      op.dry_run = options.dry_run
1802
    if getattr(options, "priority", None) is not None:
1803
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1804

    
1805

    
1806
def GetClient():
1807
  # TODO: Cache object?
1808
  try:
1809
    client = luxi.Client()
1810
  except luxi.NoMasterError:
1811
    ss = ssconf.SimpleStore()
1812

    
1813
    # Try to read ssconf file
1814
    try:
1815
      ss.GetMasterNode()
1816
    except errors.ConfigurationError:
1817
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1818
                                 " not part of a cluster")
1819

    
1820
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1821
    if master != myself:
1822
      raise errors.OpPrereqError("This is not the master node, please connect"
1823
                                 " to node '%s' and rerun the command" %
1824
                                 master)
1825
    raise
1826
  return client
1827

    
1828

    
1829
def FormatError(err):
1830
  """Return a formatted error message for a given error.
1831

1832
  This function takes an exception instance and returns a tuple
1833
  consisting of two values: first, the recommended exit code, and
1834
  second, a string describing the error message (not
1835
  newline-terminated).
1836

1837
  """
1838
  retcode = 1
1839
  obuf = StringIO()
1840
  msg = str(err)
1841
  if isinstance(err, errors.ConfigurationError):
1842
    txt = "Corrupt configuration file: %s" % msg
1843
    logging.error(txt)
1844
    obuf.write(txt + "\n")
1845
    obuf.write("Aborting.")
1846
    retcode = 2
1847
  elif isinstance(err, errors.HooksAbort):
1848
    obuf.write("Failure: hooks execution failed:\n")
1849
    for node, script, out in err.args[0]:
1850
      if out:
1851
        obuf.write("  node: %s, script: %s, output: %s\n" %
1852
                   (node, script, out))
1853
      else:
1854
        obuf.write("  node: %s, script: %s (no output)\n" %
1855
                   (node, script))
1856
  elif isinstance(err, errors.HooksFailure):
1857
    obuf.write("Failure: hooks general failure: %s" % msg)
1858
  elif isinstance(err, errors.ResolverError):
1859
    this_host = netutils.Hostname.GetSysName()
1860
    if err.args[0] == this_host:
1861
      msg = "Failure: can't resolve my own hostname ('%s')"
1862
    else:
1863
      msg = "Failure: can't resolve hostname '%s'"
1864
    obuf.write(msg % err.args[0])
1865
  elif isinstance(err, errors.OpPrereqError):
1866
    if len(err.args) == 2:
1867
      obuf.write("Failure: prerequisites not met for this"
1868
               " operation:\nerror type: %s, error details:\n%s" %
1869
                 (err.args[1], err.args[0]))
1870
    else:
1871
      obuf.write("Failure: prerequisites not met for this"
1872
                 " operation:\n%s" % msg)
1873
  elif isinstance(err, errors.OpExecError):
1874
    obuf.write("Failure: command execution error:\n%s" % msg)
1875
  elif isinstance(err, errors.TagError):
1876
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1877
  elif isinstance(err, errors.JobQueueDrainError):
1878
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1879
               " accept new requests\n")
1880
  elif isinstance(err, errors.JobQueueFull):
1881
    obuf.write("Failure: the job queue is full and doesn't accept new"
1882
               " job submissions until old jobs are archived\n")
1883
  elif isinstance(err, errors.TypeEnforcementError):
1884
    obuf.write("Parameter Error: %s" % msg)
1885
  elif isinstance(err, errors.ParameterError):
1886
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1887
  elif isinstance(err, luxi.NoMasterError):
1888
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1889
               " and listening for connections?")
1890
  elif isinstance(err, luxi.TimeoutError):
1891
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
1892
               " been submitted and will continue to run even if the call"
1893
               " timed out. Useful commands in this situation are \"gnt-job"
1894
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1895
    obuf.write(msg)
1896
  elif isinstance(err, luxi.PermissionError):
1897
    obuf.write("It seems you don't have permissions to connect to the"
1898
               " master daemon.\nPlease retry as a different user.")
1899
  elif isinstance(err, luxi.ProtocolError):
1900
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1901
               "%s" % msg)
1902
  elif isinstance(err, errors.JobLost):
1903
    obuf.write("Error checking job status: %s" % msg)
1904
  elif isinstance(err, errors.QueryFilterParseError):
1905
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
1906
    obuf.write("\n".join(err.GetDetails()))
1907
  elif isinstance(err, errors.GenericError):
1908
    obuf.write("Unhandled Ganeti error: %s" % msg)
1909
  elif isinstance(err, JobSubmittedException):
1910
    obuf.write("JobID: %s\n" % err.args[0])
1911
    retcode = 0
1912
  else:
1913
    obuf.write("Unhandled exception: %s" % msg)
1914
  return retcode, obuf.getvalue().rstrip('\n')
1915

    
1916

    
1917
def GenericMain(commands, override=None, aliases=None):
1918
  """Generic main function for all the gnt-* commands.
1919

1920
  Arguments:
1921
    - commands: a dictionary with a special structure, see the design doc
1922
                for command line handling.
1923
    - override: if not None, we expect a dictionary with keys that will
1924
                override command line options; this can be used to pass
1925
                options from the scripts to generic functions
1926
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1927

1928
  """
1929
  # save the program name and the entire command line for later logging
1930
  if sys.argv:
1931
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1932
    if len(sys.argv) >= 2:
1933
      binary += " " + sys.argv[1]
1934
      old_cmdline = " ".join(sys.argv[2:])
1935
    else:
1936
      old_cmdline = ""
1937
  else:
1938
    binary = "<unknown program>"
1939
    old_cmdline = ""
1940

    
1941
  if aliases is None:
1942
    aliases = {}
1943

    
1944
  try:
1945
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1946
  except errors.ParameterError, err:
1947
    result, err_msg = FormatError(err)
1948
    ToStderr(err_msg)
1949
    return 1
1950

    
1951
  if func is None: # parse error
1952
    return 1
1953

    
1954
  if override is not None:
1955
    for key, val in override.iteritems():
1956
      setattr(options, key, val)
1957

    
1958
  utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
1959
                     stderr_logging=True)
1960

    
1961
  if old_cmdline:
1962
    logging.info("run with arguments '%s'", old_cmdline)
1963
  else:
1964
    logging.info("run with no arguments")
1965

    
1966
  try:
1967
    result = func(options, args)
1968
  except (errors.GenericError, luxi.ProtocolError,
1969
          JobSubmittedException), err:
1970
    result, err_msg = FormatError(err)
1971
    logging.exception("Error during command processing")
1972
    ToStderr(err_msg)
1973
  except KeyboardInterrupt:
1974
    result = constants.EXIT_FAILURE
1975
    ToStderr("Aborted. Note that if the operation created any jobs, they"
1976
             " might have been submitted and"
1977
             " will continue to run in the background.")
1978

    
1979
  return result
1980

    
1981

    
1982
def ParseNicOption(optvalue):
1983
  """Parses the value of the --net option(s).
1984

1985
  """
1986
  try:
1987
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1988
  except (TypeError, ValueError), err:
1989
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1990

    
1991
  nics = [{}] * nic_max
1992
  for nidx, ndict in optvalue:
1993
    nidx = int(nidx)
1994

    
1995
    if not isinstance(ndict, dict):
1996
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1997
                                 " got %s" % (nidx, ndict))
1998

    
1999
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2000

    
2001
    nics[nidx] = ndict
2002

    
2003
  return nics
2004

    
2005

    
2006
def GenericInstanceCreate(mode, opts, args):
2007
  """Add an instance to the cluster via either creation or import.
2008

2009
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2010
  @param opts: the command line options selected by the user
2011
  @type args: list
2012
  @param args: should contain only one element, the new instance name
2013
  @rtype: int
2014
  @return: the desired exit code
2015

2016
  """
2017
  instance = args[0]
2018

    
2019
  (pnode, snode) = SplitNodeOption(opts.node)
2020

    
2021
  hypervisor = None
2022
  hvparams = {}
2023
  if opts.hypervisor:
2024
    hypervisor, hvparams = opts.hypervisor
2025

    
2026
  if opts.nics:
2027
    nics = ParseNicOption(opts.nics)
2028
  elif opts.no_nics:
2029
    # no nics
2030
    nics = []
2031
  elif mode == constants.INSTANCE_CREATE:
2032
    # default of one nic, all auto
2033
    nics = [{}]
2034
  else:
2035
    # mode == import
2036
    nics = []
2037

    
2038
  if opts.disk_template == constants.DT_DISKLESS:
2039
    if opts.disks or opts.sd_size is not None:
2040
      raise errors.OpPrereqError("Diskless instance but disk"
2041
                                 " information passed")
2042
    disks = []
2043
  else:
2044
    if (not opts.disks and not opts.sd_size
2045
        and mode == constants.INSTANCE_CREATE):
2046
      raise errors.OpPrereqError("No disk information specified")
2047
    if opts.disks and opts.sd_size is not None:
2048
      raise errors.OpPrereqError("Please use either the '--disk' or"
2049
                                 " '-s' option")
2050
    if opts.sd_size is not None:
2051
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2052

    
2053
    if opts.disks:
2054
      try:
2055
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2056
      except ValueError, err:
2057
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2058
      disks = [{}] * disk_max
2059
    else:
2060
      disks = []
2061
    for didx, ddict in opts.disks:
2062
      didx = int(didx)
2063
      if not isinstance(ddict, dict):
2064
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2065
        raise errors.OpPrereqError(msg)
2066
      elif constants.IDISK_SIZE in ddict:
2067
        if constants.IDISK_ADOPT in ddict:
2068
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2069
                                     " (disk %d)" % didx)
2070
        try:
2071
          ddict[constants.IDISK_SIZE] = \
2072
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2073
        except ValueError, err:
2074
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2075
                                     (didx, err))
2076
      elif constants.IDISK_ADOPT in ddict:
2077
        if mode == constants.INSTANCE_IMPORT:
2078
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2079
                                     " import")
2080
        ddict[constants.IDISK_SIZE] = 0
2081
      else:
2082
        raise errors.OpPrereqError("Missing size or adoption source for"
2083
                                   " disk %d" % didx)
2084
      disks[didx] = ddict
2085

    
2086
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2087
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2088

    
2089
  if mode == constants.INSTANCE_CREATE:
2090
    start = opts.start
2091
    os_type = opts.os
2092
    force_variant = opts.force_variant
2093
    src_node = None
2094
    src_path = None
2095
    no_install = opts.no_install
2096
    identify_defaults = False
2097
  elif mode == constants.INSTANCE_IMPORT:
2098
    start = False
2099
    os_type = None
2100
    force_variant = False
2101
    src_node = opts.src_node
2102
    src_path = opts.src_dir
2103
    no_install = None
2104
    identify_defaults = opts.identify_defaults
2105
  else:
2106
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2107

    
2108
  op = opcodes.OpInstanceCreate(instance_name=instance,
2109
                                disks=disks,
2110
                                disk_template=opts.disk_template,
2111
                                nics=nics,
2112
                                pnode=pnode, snode=snode,
2113
                                ip_check=opts.ip_check,
2114
                                name_check=opts.name_check,
2115
                                wait_for_sync=opts.wait_for_sync,
2116
                                file_storage_dir=opts.file_storage_dir,
2117
                                file_driver=opts.file_driver,
2118
                                iallocator=opts.iallocator,
2119
                                hypervisor=hypervisor,
2120
                                hvparams=hvparams,
2121
                                beparams=opts.beparams,
2122
                                osparams=opts.osparams,
2123
                                mode=mode,
2124
                                start=start,
2125
                                os_type=os_type,
2126
                                force_variant=force_variant,
2127
                                src_node=src_node,
2128
                                src_path=src_path,
2129
                                no_install=no_install,
2130
                                identify_defaults=identify_defaults)
2131

    
2132
  SubmitOrSend(op, opts)
2133
  return 0
2134

    
2135

    
2136
class _RunWhileClusterStoppedHelper:
2137
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2138

2139
  """
2140
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2141
    """Initializes this class.
2142

2143
    @type feedback_fn: callable
2144
    @param feedback_fn: Feedback function
2145
    @type cluster_name: string
2146
    @param cluster_name: Cluster name
2147
    @type master_node: string
2148
    @param master_node Master node name
2149
    @type online_nodes: list
2150
    @param online_nodes: List of names of online nodes
2151

2152
    """
2153
    self.feedback_fn = feedback_fn
2154
    self.cluster_name = cluster_name
2155
    self.master_node = master_node
2156
    self.online_nodes = online_nodes
2157

    
2158
    self.ssh = ssh.SshRunner(self.cluster_name)
2159

    
2160
    self.nonmaster_nodes = [name for name in online_nodes
2161
                            if name != master_node]
2162

    
2163
    assert self.master_node not in self.nonmaster_nodes
2164

    
2165
  def _RunCmd(self, node_name, cmd):
2166
    """Runs a command on the local or a remote machine.
2167

2168
    @type node_name: string
2169
    @param node_name: Machine name
2170
    @type cmd: list
2171
    @param cmd: Command
2172

2173
    """
2174
    if node_name is None or node_name == self.master_node:
2175
      # No need to use SSH
2176
      result = utils.RunCmd(cmd)
2177
    else:
2178
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2179

    
2180
    if result.failed:
2181
      errmsg = ["Failed to run command %s" % result.cmd]
2182
      if node_name:
2183
        errmsg.append("on node %s" % node_name)
2184
      errmsg.append(": exitcode %s and error %s" %
2185
                    (result.exit_code, result.output))
2186
      raise errors.OpExecError(" ".join(errmsg))
2187

    
2188
  def Call(self, fn, *args):
2189
    """Call function while all daemons are stopped.
2190

2191
    @type fn: callable
2192
    @param fn: Function to be called
2193

2194
    """
2195
    # Pause watcher by acquiring an exclusive lock on watcher state file
2196
    self.feedback_fn("Blocking watcher")
2197
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2198
    try:
2199
      # TODO: Currently, this just blocks. There's no timeout.
2200
      # TODO: Should it be a shared lock?
2201
      watcher_block.Exclusive(blocking=True)
2202

    
2203
      # Stop master daemons, so that no new jobs can come in and all running
2204
      # ones are finished
2205
      self.feedback_fn("Stopping master daemons")
2206
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2207
      try:
2208
        # Stop daemons on all nodes
2209
        for node_name in self.online_nodes:
2210
          self.feedback_fn("Stopping daemons on %s" % node_name)
2211
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2212

    
2213
        # All daemons are shut down now
2214
        try:
2215
          return fn(self, *args)
2216
        except Exception, err:
2217
          _, errmsg = FormatError(err)
2218
          logging.exception("Caught exception")
2219
          self.feedback_fn(errmsg)
2220
          raise
2221
      finally:
2222
        # Start cluster again, master node last
2223
        for node_name in self.nonmaster_nodes + [self.master_node]:
2224
          self.feedback_fn("Starting daemons on %s" % node_name)
2225
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2226
    finally:
2227
      # Resume watcher
2228
      watcher_block.Close()
2229

    
2230

    
2231
def RunWhileClusterStopped(feedback_fn, fn, *args):
2232
  """Calls a function while all cluster daemons are stopped.
2233

2234
  @type feedback_fn: callable
2235
  @param feedback_fn: Feedback function
2236
  @type fn: callable
2237
  @param fn: Function to be called when daemons are stopped
2238

2239
  """
2240
  feedback_fn("Gathering cluster information")
2241

    
2242
  # This ensures we're running on the master daemon
2243
  cl = GetClient()
2244

    
2245
  (cluster_name, master_node) = \
2246
    cl.QueryConfigValues(["cluster_name", "master_node"])
2247

    
2248
  online_nodes = GetOnlineNodes([], cl=cl)
2249

    
2250
  # Don't keep a reference to the client. The master daemon will go away.
2251
  del cl
2252

    
2253
  assert master_node in online_nodes
2254

    
2255
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2256
                                       online_nodes).Call(fn, *args)
2257

    
2258

    
2259
def GenerateTable(headers, fields, separator, data,
2260
                  numfields=None, unitfields=None,
2261
                  units=None):
2262
  """Prints a table with headers and different fields.
2263

2264
  @type headers: dict
2265
  @param headers: dictionary mapping field names to headers for
2266
      the table
2267
  @type fields: list
2268
  @param fields: the field names corresponding to each row in
2269
      the data field
2270
  @param separator: the separator to be used; if this is None,
2271
      the default 'smart' algorithm is used which computes optimal
2272
      field width, otherwise just the separator is used between
2273
      each field
2274
  @type data: list
2275
  @param data: a list of lists, each sublist being one row to be output
2276
  @type numfields: list
2277
  @param numfields: a list with the fields that hold numeric
2278
      values and thus should be right-aligned
2279
  @type unitfields: list
2280
  @param unitfields: a list with the fields that hold numeric
2281
      values that should be formatted with the units field
2282
  @type units: string or None
2283
  @param units: the units we should use for formatting, or None for
2284
      automatic choice (human-readable for non-separator usage, otherwise
2285
      megabytes); this is a one-letter string
2286

2287
  """
2288
  if units is None:
2289
    if separator:
2290
      units = "m"
2291
    else:
2292
      units = "h"
2293

    
2294
  if numfields is None:
2295
    numfields = []
2296
  if unitfields is None:
2297
    unitfields = []
2298

    
2299
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2300
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2301

    
2302
  format_fields = []
2303
  for field in fields:
2304
    if headers and field not in headers:
2305
      # TODO: handle better unknown fields (either revert to old
2306
      # style of raising exception, or deal more intelligently with
2307
      # variable fields)
2308
      headers[field] = field
2309
    if separator is not None:
2310
      format_fields.append("%s")
2311
    elif numfields.Matches(field):
2312
      format_fields.append("%*s")
2313
    else:
2314
      format_fields.append("%-*s")
2315

    
2316
  if separator is None:
2317
    mlens = [0 for name in fields]
2318
    format_str = ' '.join(format_fields)
2319
  else:
2320
    format_str = separator.replace("%", "%%").join(format_fields)
2321

    
2322
  for row in data:
2323
    if row is None:
2324
      continue
2325
    for idx, val in enumerate(row):
2326
      if unitfields.Matches(fields[idx]):
2327
        try:
2328
          val = int(val)
2329
        except (TypeError, ValueError):
2330
          pass
2331
        else:
2332
          val = row[idx] = utils.FormatUnit(val, units)
2333
      val = row[idx] = str(val)
2334
      if separator is None:
2335
        mlens[idx] = max(mlens[idx], len(val))
2336

    
2337
  result = []
2338
  if headers:
2339
    args = []
2340
    for idx, name in enumerate(fields):
2341
      hdr = headers[name]
2342
      if separator is None:
2343
        mlens[idx] = max(mlens[idx], len(hdr))
2344
        args.append(mlens[idx])
2345
      args.append(hdr)
2346
    result.append(format_str % tuple(args))
2347

    
2348
  if separator is None:
2349
    assert len(mlens) == len(fields)
2350

    
2351
    if fields and not numfields.Matches(fields[-1]):
2352
      mlens[-1] = 0
2353

    
2354
  for line in data:
2355
    args = []
2356
    if line is None:
2357
      line = ['-' for _ in fields]
2358
    for idx in range(len(fields)):
2359
      if separator is None:
2360
        args.append(mlens[idx])
2361
      args.append(line[idx])
2362
    result.append(format_str % tuple(args))
2363

    
2364
  return result
2365

    
2366

    
2367
def _FormatBool(value):
2368
  """Formats a boolean value as a string.
2369

2370
  """
2371
  if value:
2372
    return "Y"
2373
  return "N"
2374

    
2375

    
2376
#: Default formatting for query results; (callback, align right)
2377
_DEFAULT_FORMAT_QUERY = {
2378
  constants.QFT_TEXT: (str, False),
2379
  constants.QFT_BOOL: (_FormatBool, False),
2380
  constants.QFT_NUMBER: (str, True),
2381
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2382
  constants.QFT_OTHER: (str, False),
2383
  constants.QFT_UNKNOWN: (str, False),
2384
  }
2385

    
2386

    
2387
def _GetColumnFormatter(fdef, override, unit):
2388
  """Returns formatting function for a field.
2389

2390
  @type fdef: L{objects.QueryFieldDefinition}
2391
  @type override: dict
2392
  @param override: Dictionary for overriding field formatting functions,
2393
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2394
  @type unit: string
2395
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2396
  @rtype: tuple; (callable, bool)
2397
  @return: Returns the function to format a value (takes one parameter) and a
2398
    boolean for aligning the value on the right-hand side
2399

2400
  """
2401
  fmt = override.get(fdef.name, None)
2402
  if fmt is not None:
2403
    return fmt
2404

    
2405
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2406

    
2407
  if fdef.kind == constants.QFT_UNIT:
2408
    # Can't keep this information in the static dictionary
2409
    return (lambda value: utils.FormatUnit(value, unit), True)
2410

    
2411
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2412
  if fmt is not None:
2413
    return fmt
2414

    
2415
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2416

    
2417

    
2418
class _QueryColumnFormatter:
2419
  """Callable class for formatting fields of a query.
2420

2421
  """
2422
  def __init__(self, fn, status_fn, verbose):
2423
    """Initializes this class.
2424

2425
    @type fn: callable
2426
    @param fn: Formatting function
2427
    @type status_fn: callable
2428
    @param status_fn: Function to report fields' status
2429
    @type verbose: boolean
2430
    @param verbose: whether to use verbose field descriptions or not
2431

2432
    """
2433
    self._fn = fn
2434
    self._status_fn = status_fn
2435
    self._verbose = verbose
2436

    
2437
  def __call__(self, data):
2438
    """Returns a field's string representation.
2439

2440
    """
2441
    (status, value) = data
2442

    
2443
    # Report status
2444
    self._status_fn(status)
2445

    
2446
    if status == constants.RS_NORMAL:
2447
      return self._fn(value)
2448

    
2449
    assert value is None, \
2450
           "Found value %r for abnormal status %s" % (value, status)
2451

    
2452
    return FormatResultError(status, self._verbose)
2453

    
2454

    
2455
def FormatResultError(status, verbose):
2456
  """Formats result status other than L{constants.RS_NORMAL}.
2457

2458
  @param status: The result status
2459
  @type verbose: boolean
2460
  @param verbose: Whether to return the verbose text
2461
  @return: Text of result status
2462

2463
  """
2464
  assert status != constants.RS_NORMAL, \
2465
         "FormatResultError called with status equal to constants.RS_NORMAL"
2466
  try:
2467
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2468
  except KeyError:
2469
    raise NotImplementedError("Unknown status %s" % status)
2470
  else:
2471
    if verbose:
2472
      return verbose_text
2473
    return normal_text
2474

    
2475

    
2476
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2477
                      header=False, verbose=False):
2478
  """Formats data in L{objects.QueryResponse}.
2479

2480
  @type result: L{objects.QueryResponse}
2481
  @param result: result of query operation
2482
  @type unit: string
2483
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2484
    see L{utils.text.FormatUnit}
2485
  @type format_override: dict
2486
  @param format_override: Dictionary for overriding field formatting functions,
2487
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2488
  @type separator: string or None
2489
  @param separator: String used to separate fields
2490
  @type header: bool
2491
  @param header: Whether to output header row
2492
  @type verbose: boolean
2493
  @param verbose: whether to use verbose field descriptions or not
2494

2495
  """
2496
  if unit is None:
2497
    if separator:
2498
      unit = "m"
2499
    else:
2500
      unit = "h"
2501

    
2502
  if format_override is None:
2503
    format_override = {}
2504

    
2505
  stats = dict.fromkeys(constants.RS_ALL, 0)
2506

    
2507
  def _RecordStatus(status):
2508
    if status in stats:
2509
      stats[status] += 1
2510

    
2511
  columns = []
2512
  for fdef in result.fields:
2513
    assert fdef.title and fdef.name
2514
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2515
    columns.append(TableColumn(fdef.title,
2516
                               _QueryColumnFormatter(fn, _RecordStatus,
2517
                                                     verbose),
2518
                               align_right))
2519

    
2520
  table = FormatTable(result.data, columns, header, separator)
2521

    
2522
  # Collect statistics
2523
  assert len(stats) == len(constants.RS_ALL)
2524
  assert compat.all(count >= 0 for count in stats.values())
2525

    
2526
  # Determine overall status. If there was no data, unknown fields must be
2527
  # detected via the field definitions.
2528
  if (stats[constants.RS_UNKNOWN] or
2529
      (not result.data and _GetUnknownFields(result.fields))):
2530
    status = QR_UNKNOWN
2531
  elif compat.any(count > 0 for key, count in stats.items()
2532
                  if key != constants.RS_NORMAL):
2533
    status = QR_INCOMPLETE
2534
  else:
2535
    status = QR_NORMAL
2536

    
2537
  return (status, table)
2538

    
2539

    
2540
def _GetUnknownFields(fdefs):
2541
  """Returns list of unknown fields included in C{fdefs}.
2542

2543
  @type fdefs: list of L{objects.QueryFieldDefinition}
2544

2545
  """
2546
  return [fdef for fdef in fdefs
2547
          if fdef.kind == constants.QFT_UNKNOWN]
2548

    
2549

    
2550
def _WarnUnknownFields(fdefs):
2551
  """Prints a warning to stderr if a query included unknown fields.
2552

2553
  @type fdefs: list of L{objects.QueryFieldDefinition}
2554

2555
  """
2556
  unknown = _GetUnknownFields(fdefs)
2557
  if unknown:
2558
    ToStderr("Warning: Queried for unknown fields %s",
2559
             utils.CommaJoin(fdef.name for fdef in unknown))
2560
    return True
2561

    
2562
  return False
2563

    
2564

    
2565
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2566
                format_override=None, verbose=False):
2567
  """Generic implementation for listing all items of a resource.
2568

2569
  @param resource: One of L{constants.QR_VIA_LUXI}
2570
  @type fields: list of strings
2571
  @param fields: List of fields to query for
2572
  @type names: list of strings
2573
  @param names: Names of items to query for
2574
  @type unit: string or None
2575
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2576
    None for automatic choice (human-readable for non-separator usage,
2577
    otherwise megabytes); this is a one-letter string
2578
  @type separator: string or None
2579
  @param separator: String used to separate fields
2580
  @type header: bool
2581
  @param header: Whether to show header row
2582
  @type format_override: dict
2583
  @param format_override: Dictionary for overriding field formatting functions,
2584
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2585
  @type verbose: boolean
2586
  @param verbose: whether to use verbose field descriptions or not
2587

2588
  """
2589
  if cl is None:
2590
    cl = GetClient()
2591

    
2592
  if not names:
2593
    names = None
2594

    
2595
  response = cl.Query(resource, fields, qlang.MakeSimpleFilter("name", names))
2596

    
2597
  found_unknown = _WarnUnknownFields(response.fields)
2598

    
2599
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2600
                                     header=header,
2601
                                     format_override=format_override,
2602
                                     verbose=verbose)
2603

    
2604
  for line in data:
2605
    ToStdout(line)
2606

    
2607
  assert ((found_unknown and status == QR_UNKNOWN) or
2608
          (not found_unknown and status != QR_UNKNOWN))
2609

    
2610
  if status == QR_UNKNOWN:
2611
    return constants.EXIT_UNKNOWN_FIELD
2612

    
2613
  # TODO: Should the list command fail if not all data could be collected?
2614
  return constants.EXIT_SUCCESS
2615

    
2616

    
2617
def GenericListFields(resource, fields, separator, header, cl=None):
2618
  """Generic implementation for listing fields for a resource.
2619

2620
  @param resource: One of L{constants.QR_VIA_LUXI}
2621
  @type fields: list of strings
2622
  @param fields: List of fields to query for
2623
  @type separator: string or None
2624
  @param separator: String used to separate fields
2625
  @type header: bool
2626
  @param header: Whether to show header row
2627

2628
  """
2629
  if cl is None:
2630
    cl = GetClient()
2631

    
2632
  if not fields:
2633
    fields = None
2634

    
2635
  response = cl.QueryFields(resource, fields)
2636

    
2637
  found_unknown = _WarnUnknownFields(response.fields)
2638

    
2639
  columns = [
2640
    TableColumn("Name", str, False),
2641
    TableColumn("Title", str, False),
2642
    TableColumn("Description", str, False),
2643
    ]
2644

    
2645
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2646

    
2647
  for line in FormatTable(rows, columns, header, separator):
2648
    ToStdout(line)
2649

    
2650
  if found_unknown:
2651
    return constants.EXIT_UNKNOWN_FIELD
2652

    
2653
  return constants.EXIT_SUCCESS
2654

    
2655

    
2656
class TableColumn:
2657
  """Describes a column for L{FormatTable}.
2658

2659
  """
2660
  def __init__(self, title, fn, align_right):
2661
    """Initializes this class.
2662

2663
    @type title: string
2664
    @param title: Column title
2665
    @type fn: callable
2666
    @param fn: Formatting function
2667
    @type align_right: bool
2668
    @param align_right: Whether to align values on the right-hand side
2669

2670
    """
2671
    self.title = title
2672
    self.format = fn
2673
    self.align_right = align_right
2674

    
2675

    
2676
def _GetColFormatString(width, align_right):
2677
  """Returns the format string for a field.
2678

2679
  """
2680
  if align_right:
2681
    sign = ""
2682
  else:
2683
    sign = "-"
2684

    
2685
  return "%%%s%ss" % (sign, width)
2686

    
2687

    
2688
def FormatTable(rows, columns, header, separator):
2689
  """Formats data as a table.
2690

2691
  @type rows: list of lists
2692
  @param rows: Row data, one list per row
2693
  @type columns: list of L{TableColumn}
2694
  @param columns: Column descriptions
2695
  @type header: bool
2696
  @param header: Whether to show header row
2697
  @type separator: string or None
2698
  @param separator: String used to separate columns
2699

2700
  """
2701
  if header:
2702
    data = [[col.title for col in columns]]
2703
    colwidth = [len(col.title) for col in columns]
2704
  else:
2705
    data = []
2706
    colwidth = [0 for _ in columns]
2707

    
2708
  # Format row data
2709
  for row in rows:
2710
    assert len(row) == len(columns)
2711

    
2712
    formatted = [col.format(value) for value, col in zip(row, columns)]
2713

    
2714
    if separator is None:
2715
      # Update column widths
2716
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2717
        # Modifying a list's items while iterating is fine
2718
        colwidth[idx] = max(oldwidth, len(value))
2719

    
2720
    data.append(formatted)
2721

    
2722
  if separator is not None:
2723
    # Return early if a separator is used
2724
    return [separator.join(row) for row in data]
2725

    
2726
  if columns and not columns[-1].align_right:
2727
    # Avoid unnecessary spaces at end of line
2728
    colwidth[-1] = 0
2729

    
2730
  # Build format string
2731
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2732
                  for col, width in zip(columns, colwidth)])
2733

    
2734
  return [fmt % tuple(row) for row in data]
2735

    
2736

    
2737
def FormatTimestamp(ts):
2738
  """Formats a given timestamp.
2739

2740
  @type ts: timestamp
2741
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2742

2743
  @rtype: string
2744
  @return: a string with the formatted timestamp
2745

2746
  """
2747
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2748
    return '?'
2749
  sec, usec = ts
2750
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2751

    
2752

    
2753
def ParseTimespec(value):
2754
  """Parse a time specification.
2755

2756
  The following suffixed will be recognized:
2757

2758
    - s: seconds
2759
    - m: minutes
2760
    - h: hours
2761
    - d: day
2762
    - w: weeks
2763

2764
  Without any suffix, the value will be taken to be in seconds.
2765

2766
  """
2767
  value = str(value)
2768
  if not value:
2769
    raise errors.OpPrereqError("Empty time specification passed")
2770
  suffix_map = {
2771
    's': 1,
2772
    'm': 60,
2773
    'h': 3600,
2774
    'd': 86400,
2775
    'w': 604800,
2776
    }
2777
  if value[-1] not in suffix_map:
2778
    try:
2779
      value = int(value)
2780
    except (TypeError, ValueError):
2781
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2782
  else:
2783
    multiplier = suffix_map[value[-1]]
2784
    value = value[:-1]
2785
    if not value: # no data left after stripping the suffix
2786
      raise errors.OpPrereqError("Invalid time specification (only"
2787
                                 " suffix passed)")
2788
    try:
2789
      value = int(value) * multiplier
2790
    except (TypeError, ValueError):
2791
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2792
  return value
2793

    
2794

    
2795
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2796
                   filter_master=False):
2797
  """Returns the names of online nodes.
2798

2799
  This function will also log a warning on stderr with the names of
2800
  the online nodes.
2801

2802
  @param nodes: if not empty, use only this subset of nodes (minus the
2803
      offline ones)
2804
  @param cl: if not None, luxi client to use
2805
  @type nowarn: boolean
2806
  @param nowarn: by default, this function will output a note with the
2807
      offline nodes that are skipped; if this parameter is True the
2808
      note is not displayed
2809
  @type secondary_ips: boolean
2810
  @param secondary_ips: if True, return the secondary IPs instead of the
2811
      names, useful for doing network traffic over the replication interface
2812
      (if any)
2813
  @type filter_master: boolean
2814
  @param filter_master: if True, do not return the master node in the list
2815
      (useful in coordination with secondary_ips where we cannot check our
2816
      node name against the list)
2817

2818
  """
2819
  if cl is None:
2820
    cl = GetClient()
2821

    
2822
  if secondary_ips:
2823
    name_idx = 2
2824
  else:
2825
    name_idx = 0
2826

    
2827
  if filter_master:
2828
    master_node = cl.QueryConfigValues(["master_node"])[0]
2829
    filter_fn = lambda x: x != master_node
2830
  else:
2831
    filter_fn = lambda _: True
2832

    
2833
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2834
                         use_locking=False)
2835
  offline = [row[0] for row in result if row[1]]
2836
  if offline and not nowarn:
2837
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2838
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2839

    
2840

    
2841
def _ToStream(stream, txt, *args):
2842
  """Write a message to a stream, bypassing the logging system
2843

2844
  @type stream: file object
2845
  @param stream: the file to which we should write
2846
  @type txt: str
2847
  @param txt: the message
2848

2849
  """
2850
  if args:
2851
    args = tuple(args)
2852
    stream.write(txt % args)
2853
  else:
2854
    stream.write(txt)
2855
  stream.write('\n')
2856
  stream.flush()
2857

    
2858

    
2859
def ToStdout(txt, *args):
2860
  """Write a message to stdout only, bypassing the logging system
2861

2862
  This is just a wrapper over _ToStream.
2863

2864
  @type txt: str
2865
  @param txt: the message
2866

2867
  """
2868
  _ToStream(sys.stdout, txt, *args)
2869

    
2870

    
2871
def ToStderr(txt, *args):
2872
  """Write a message to stderr only, bypassing the logging system
2873

2874
  This is just a wrapper over _ToStream.
2875

2876
  @type txt: str
2877
  @param txt: the message
2878

2879
  """
2880
  _ToStream(sys.stderr, txt, *args)
2881

    
2882

    
2883
class JobExecutor(object):
2884
  """Class which manages the submission and execution of multiple jobs.
2885

2886
  Note that instances of this class should not be reused between
2887
  GetResults() calls.
2888

2889
  """
2890
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2891
    self.queue = []
2892
    if cl is None:
2893
      cl = GetClient()
2894
    self.cl = cl
2895
    self.verbose = verbose
2896
    self.jobs = []
2897
    self.opts = opts
2898
    self.feedback_fn = feedback_fn
2899

    
2900
  def QueueJob(self, name, *ops):
2901
    """Record a job for later submit.
2902

2903
    @type name: string
2904
    @param name: a description of the job, will be used in WaitJobSet
2905
    """
2906
    SetGenericOpcodeOpts(ops, self.opts)
2907
    self.queue.append((name, ops))
2908

    
2909
  def SubmitPending(self, each=False):
2910
    """Submit all pending jobs.
2911

2912
    """
2913
    if each:
2914
      results = []
2915
      for row in self.queue:
2916
        # SubmitJob will remove the success status, but raise an exception if
2917
        # the submission fails, so we'll notice that anyway.
2918
        results.append([True, self.cl.SubmitJob(row[1])])
2919
    else:
2920
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2921
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2922
                                                            self.queue)):
2923
      self.jobs.append((idx, status, data, name))
2924

    
2925
  def _ChooseJob(self):
2926
    """Choose a non-waiting/queued job to poll next.
2927

2928
    """
2929
    assert self.jobs, "_ChooseJob called with empty job list"
2930

    
2931
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2932
    assert result
2933

    
2934
    for job_data, status in zip(self.jobs, result):
2935
      if (isinstance(status, list) and status and
2936
          status[0] in (constants.JOB_STATUS_QUEUED,
2937
                        constants.JOB_STATUS_WAITLOCK,
2938
                        constants.JOB_STATUS_CANCELING)):
2939
        # job is still present and waiting
2940
        continue
2941
      # good candidate found (either running job or lost job)
2942
      self.jobs.remove(job_data)
2943
      return job_data
2944

    
2945
    # no job found
2946
    return self.jobs.pop(0)
2947

    
2948
  def GetResults(self):
2949
    """Wait for and return the results of all jobs.
2950

2951
    @rtype: list
2952
    @return: list of tuples (success, job results), in the same order
2953
        as the submitted jobs; if a job has failed, instead of the result
2954
        there will be the error message
2955

2956
    """
2957
    if not self.jobs:
2958
      self.SubmitPending()
2959
    results = []
2960
    if self.verbose:
2961
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2962
      if ok_jobs:
2963
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2964

    
2965
    # first, remove any non-submitted jobs
2966
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2967
    for idx, _, jid, name in failures:
2968
      ToStderr("Failed to submit job for %s: %s", name, jid)
2969
      results.append((idx, False, jid))
2970

    
2971
    while self.jobs:
2972
      (idx, _, jid, name) = self._ChooseJob()
2973
      ToStdout("Waiting for job %s for %s...", jid, name)
2974
      try:
2975
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2976
        success = True
2977
      except errors.JobLost, err:
2978
        _, job_result = FormatError(err)
2979
        ToStderr("Job %s for %s has been archived, cannot check its result",
2980
                 jid, name)
2981
        success = False
2982
      except (errors.GenericError, luxi.ProtocolError), err:
2983
        _, job_result = FormatError(err)
2984
        success = False
2985
        # the error message will always be shown, verbose or not
2986
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2987

    
2988
      results.append((idx, success, job_result))
2989

    
2990
    # sort based on the index, then drop it
2991
    results.sort()
2992
    results = [i[1:] for i in results]
2993

    
2994
    return results
2995

    
2996
  def WaitOrShow(self, wait):
2997
    """Wait for job results or only print the job IDs.
2998

2999
    @type wait: boolean
3000
    @param wait: whether to wait or not
3001

3002
    """
3003
    if wait:
3004
      return self.GetResults()
3005
    else:
3006
      if not self.jobs:
3007
        self.SubmitPending()
3008
      for _, status, result, name in self.jobs:
3009
        if status:
3010
          ToStdout("%s: %s", result, name)
3011
        else:
3012
          ToStderr("Failure for %s: %s", name, result)
3013
      return [row[1:3] for row in self.jobs]
3014

    
3015

    
3016
def FormatParameterDict(buf, param_dict, actual, level=1):
3017
  """Formats a parameter dictionary.
3018

3019
  @type buf: L{StringIO}
3020
  @param buf: the buffer into which to write
3021
  @type param_dict: dict
3022
  @param param_dict: the own parameters
3023
  @type actual: dict
3024
  @param actual: the current parameter set (including defaults)
3025
  @param level: Level of indent
3026

3027
  """
3028
  indent = "  " * level
3029
  for key in sorted(actual):
3030
    val = param_dict.get(key, "default (%s)" % actual[key])
3031
    buf.write("%s- %s: %s\n" % (indent, key, val))
3032

    
3033

    
3034
def ConfirmOperation(names, list_type, text, extra=""):
3035
  """Ask the user to confirm an operation on a list of list_type.
3036

3037
  This function is used to request confirmation for doing an operation
3038
  on a given list of list_type.
3039

3040
  @type names: list
3041
  @param names: the list of names that we display when
3042
      we ask for confirmation
3043
  @type list_type: str
3044
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3045
  @type text: str
3046
  @param text: the operation that the user should confirm
3047
  @rtype: boolean
3048
  @return: True or False depending on user's confirmation.
3049

3050
  """
3051
  count = len(names)
3052
  msg = ("The %s will operate on %d %s.\n%s"
3053
         "Do you want to continue?" % (text, count, list_type, extra))
3054
  affected = (("\nAffected %s:\n" % list_type) +
3055
              "\n".join(["  %s" % name for name in names]))
3056

    
3057
  choices = [("y", True, "Yes, execute the %s" % text),
3058
             ("n", False, "No, abort the %s" % text)]
3059

    
3060
  if count > 20:
3061
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3062
    question = msg
3063
  else:
3064
    question = msg + affected
3065

    
3066
  choice = AskUser(question, choices)
3067
  if choice == "v":
3068
    choices.pop(1)
3069
    choice = AskUser(msg + affected, choices)
3070
  return choice