Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 72043dac

History | View | Annotate | Download (117.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ALLOCATABLE_OPT",
58
  "ALLOC_POLICY_OPT",
59
  "ALL_OPT",
60
  "ALLOW_FAILOVER_OPT",
61
  "AUTO_PROMOTE_OPT",
62
  "AUTO_REPLACE_OPT",
63
  "BACKEND_OPT",
64
  "BLK_OS_OPT",
65
  "CAPAB_MASTER_OPT",
66
  "CAPAB_VM_OPT",
67
  "CLEANUP_OPT",
68
  "CLUSTER_DOMAIN_SECRET_OPT",
69
  "CONFIRM_OPT",
70
  "CP_SIZE_OPT",
71
  "DEBUG_OPT",
72
  "DEBUG_SIMERR_OPT",
73
  "DISKIDX_OPT",
74
  "DISK_OPT",
75
  "DISK_PARAMS_OPT",
76
  "DISK_TEMPLATE_OPT",
77
  "DRAINED_OPT",
78
  "DRY_RUN_OPT",
79
  "DRBD_HELPER_OPT",
80
  "DST_NODE_OPT",
81
  "EARLY_RELEASE_OPT",
82
  "ENABLED_HV_OPT",
83
  "ERROR_CODES_OPT",
84
  "FIELDS_OPT",
85
  "FILESTORE_DIR_OPT",
86
  "FILESTORE_DRIVER_OPT",
87
  "FORCE_FILTER_OPT",
88
  "FORCE_OPT",
89
  "FORCE_VARIANT_OPT",
90
  "GLOBAL_FILEDIR_OPT",
91
  "HID_OS_OPT",
92
  "GLOBAL_SHARED_FILEDIR_OPT",
93
  "HVLIST_OPT",
94
  "HVOPTS_OPT",
95
  "HYPERVISOR_OPT",
96
  "IALLOCATOR_OPT",
97
  "DEFAULT_IALLOCATOR_OPT",
98
  "IDENTIFY_DEFAULTS_OPT",
99
  "IGNORE_CONSIST_OPT",
100
  "IGNORE_ERRORS_OPT",
101
  "IGNORE_FAILURES_OPT",
102
  "IGNORE_OFFLINE_OPT",
103
  "IGNORE_REMOVE_FAILURES_OPT",
104
  "IGNORE_SECONDARIES_OPT",
105
  "IGNORE_SIZE_OPT",
106
  "INTERVAL_OPT",
107
  "MAC_PREFIX_OPT",
108
  "MAINTAIN_NODE_HEALTH_OPT",
109
  "MASTER_NETDEV_OPT",
110
  "MASTER_NETMASK_OPT",
111
  "MC_OPT",
112
  "MIGRATION_MODE_OPT",
113
  "NET_OPT",
114
  "NEW_CLUSTER_CERT_OPT",
115
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
116
  "NEW_CONFD_HMAC_KEY_OPT",
117
  "NEW_RAPI_CERT_OPT",
118
  "NEW_SECONDARY_OPT",
119
  "NEW_SPICE_CERT_OPT",
120
  "NIC_PARAMS_OPT",
121
  "NODE_FORCE_JOIN_OPT",
122
  "NODE_LIST_OPT",
123
  "NODE_PLACEMENT_OPT",
124
  "NODEGROUP_OPT",
125
  "NODE_PARAMS_OPT",
126
  "NODE_POWERED_OPT",
127
  "NODRBD_STORAGE_OPT",
128
  "NOHDR_OPT",
129
  "NOIPCHECK_OPT",
130
  "NO_INSTALL_OPT",
131
  "NONAMECHECK_OPT",
132
  "NOLVM_STORAGE_OPT",
133
  "NOMODIFY_ETCHOSTS_OPT",
134
  "NOMODIFY_SSH_SETUP_OPT",
135
  "NONICS_OPT",
136
  "NONLIVE_OPT",
137
  "NONPLUS1_OPT",
138
  "NORUNTIME_CHGS_OPT",
139
  "NOSHUTDOWN_OPT",
140
  "NOSTART_OPT",
141
  "NOSSH_KEYCHECK_OPT",
142
  "NOVOTING_OPT",
143
  "NO_REMEMBER_OPT",
144
  "NWSYNC_OPT",
145
  "OFFLINE_INST_OPT",
146
  "ONLINE_INST_OPT",
147
  "ON_PRIMARY_OPT",
148
  "ON_SECONDARY_OPT",
149
  "OFFLINE_OPT",
150
  "OSPARAMS_OPT",
151
  "OS_OPT",
152
  "OS_SIZE_OPT",
153
  "OOB_TIMEOUT_OPT",
154
  "POWER_DELAY_OPT",
155
  "PREALLOC_WIPE_DISKS_OPT",
156
  "PRIMARY_IP_VERSION_OPT",
157
  "PRIMARY_ONLY_OPT",
158
  "PRIORITY_OPT",
159
  "RAPI_CERT_OPT",
160
  "READD_OPT",
161
  "REBOOT_TYPE_OPT",
162
  "REMOVE_INSTANCE_OPT",
163
  "REMOVE_UIDS_OPT",
164
  "RESERVED_LVS_OPT",
165
  "RUNTIME_MEM_OPT",
166
  "ROMAN_OPT",
167
  "SECONDARY_IP_OPT",
168
  "SECONDARY_ONLY_OPT",
169
  "SELECT_OS_OPT",
170
  "SEP_OPT",
171
  "SHOWCMD_OPT",
172
  "SHUTDOWN_TIMEOUT_OPT",
173
  "SINGLE_NODE_OPT",
174
  "SPECS_CPU_COUNT_OPT",
175
  "SPECS_DISK_COUNT_OPT",
176
  "SPECS_DISK_SIZE_OPT",
177
  "SPECS_MEM_SIZE_OPT",
178
  "SPECS_NIC_COUNT_OPT",
179
  "IPOLICY_DISK_TEMPLATES",
180
  "IPOLICY_VCPU_RATIO",
181
  "SPICE_CACERT_OPT",
182
  "SPICE_CERT_OPT",
183
  "SRC_DIR_OPT",
184
  "SRC_NODE_OPT",
185
  "SUBMIT_OPT",
186
  "STARTUP_PAUSED_OPT",
187
  "STATIC_OPT",
188
  "SYNC_OPT",
189
  "TAG_ADD_OPT",
190
  "TAG_SRC_OPT",
191
  "TIMEOUT_OPT",
192
  "TO_GROUP_OPT",
193
  "UIDPOOL_OPT",
194
  "USEUNITS_OPT",
195
  "USE_EXTERNAL_MIP_SCRIPT",
196
  "USE_REPL_NET_OPT",
197
  "VERBOSE_OPT",
198
  "VG_NAME_OPT",
199
  "WFSYNC_OPT",
200
  "YES_DOIT_OPT",
201
  "DISK_STATE_OPT",
202
  "HV_STATE_OPT",
203
  "IGNORE_IPOLICY_OPT",
204
  "INSTANCE_POLICY_OPTS",
205
  # Generic functions for CLI programs
206
  "ConfirmOperation",
207
  "CreateIPolicyFromOpts",
208
  "GenericMain",
209
  "GenericInstanceCreate",
210
  "GenericList",
211
  "GenericListFields",
212
  "GetClient",
213
  "GetOnlineNodes",
214
  "JobExecutor",
215
  "JobSubmittedException",
216
  "ParseTimespec",
217
  "RunWhileClusterStopped",
218
  "SubmitOpCode",
219
  "SubmitOrSend",
220
  "UsesRPC",
221
  # Formatting functions
222
  "ToStderr", "ToStdout",
223
  "FormatError",
224
  "FormatQueryResult",
225
  "FormatParameterDict",
226
  "GenerateTable",
227
  "AskUser",
228
  "FormatTimestamp",
229
  "FormatLogMessage",
230
  # Tags functions
231
  "ListTags",
232
  "AddTags",
233
  "RemoveTags",
234
  # command line options support infrastructure
235
  "ARGS_MANY_INSTANCES",
236
  "ARGS_MANY_NODES",
237
  "ARGS_MANY_GROUPS",
238
  "ARGS_NONE",
239
  "ARGS_ONE_INSTANCE",
240
  "ARGS_ONE_NODE",
241
  "ARGS_ONE_GROUP",
242
  "ARGS_ONE_OS",
243
  "ArgChoice",
244
  "ArgCommand",
245
  "ArgFile",
246
  "ArgGroup",
247
  "ArgHost",
248
  "ArgInstance",
249
  "ArgJobId",
250
  "ArgNode",
251
  "ArgOs",
252
  "ArgSuggest",
253
  "ArgUnknown",
254
  "OPT_COMPL_INST_ADD_NODES",
255
  "OPT_COMPL_MANY_NODES",
256
  "OPT_COMPL_ONE_IALLOCATOR",
257
  "OPT_COMPL_ONE_INSTANCE",
258
  "OPT_COMPL_ONE_NODE",
259
  "OPT_COMPL_ONE_NODEGROUP",
260
  "OPT_COMPL_ONE_OS",
261
  "cli_option",
262
  "SplitNodeOption",
263
  "CalculateOSNames",
264
  "ParseFields",
265
  "COMMON_CREATE_OPTS",
266
  ]
267

    
268
NO_PREFIX = "no_"
269
UN_PREFIX = "-"
270

    
271
#: Priorities (sorted)
272
_PRIORITY_NAMES = [
273
  ("low", constants.OP_PRIO_LOW),
274
  ("normal", constants.OP_PRIO_NORMAL),
275
  ("high", constants.OP_PRIO_HIGH),
276
  ]
277

    
278
#: Priority dictionary for easier lookup
279
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
280
# we migrate to Python 2.6
281
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
282

    
283
# Query result status for clients
284
(QR_NORMAL,
285
 QR_UNKNOWN,
286
 QR_INCOMPLETE) = range(3)
287

    
288
#: Maximum batch size for ChooseJob
289
_CHOOSE_BATCH = 25
290

    
291

    
292
# constants used to create InstancePolicy dictionary
293
TISPECS_GROUP_TYPES = {
294
  constants.ISPECS_MIN: constants.VTYPE_INT,
295
  constants.ISPECS_MAX: constants.VTYPE_INT,
296
  }
297

    
298
TISPECS_CLUSTER_TYPES = {
299
  constants.ISPECS_MIN: constants.VTYPE_INT,
300
  constants.ISPECS_MAX: constants.VTYPE_INT,
301
  constants.ISPECS_STD: constants.VTYPE_INT,
302
  }
303

    
304

    
305
class _Argument:
306
  def __init__(self, min=0, max=None): # pylint: disable=W0622
307
    self.min = min
308
    self.max = max
309

    
310
  def __repr__(self):
311
    return ("<%s min=%s max=%s>" %
312
            (self.__class__.__name__, self.min, self.max))
313

    
314

    
315
class ArgSuggest(_Argument):
316
  """Suggesting argument.
317

318
  Value can be any of the ones passed to the constructor.
319

320
  """
321
  # pylint: disable=W0622
322
  def __init__(self, min=0, max=None, choices=None):
323
    _Argument.__init__(self, min=min, max=max)
324
    self.choices = choices
325

    
326
  def __repr__(self):
327
    return ("<%s min=%s max=%s choices=%r>" %
328
            (self.__class__.__name__, self.min, self.max, self.choices))
329

    
330

    
331
class ArgChoice(ArgSuggest):
332
  """Choice argument.
333

334
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
335
  but value must be one of the choices.
336

337
  """
338

    
339

    
340
class ArgUnknown(_Argument):
341
  """Unknown argument to program (e.g. determined at runtime).
342

343
  """
344

    
345

    
346
class ArgInstance(_Argument):
347
  """Instances argument.
348

349
  """
350

    
351

    
352
class ArgNode(_Argument):
353
  """Node argument.
354

355
  """
356

    
357

    
358
class ArgGroup(_Argument):
359
  """Node group argument.
360

361
  """
362

    
363

    
364
class ArgJobId(_Argument):
365
  """Job ID argument.
366

367
  """
368

    
369

    
370
class ArgFile(_Argument):
371
  """File path argument.
372

373
  """
374

    
375

    
376
class ArgCommand(_Argument):
377
  """Command argument.
378

379
  """
380

    
381

    
382
class ArgHost(_Argument):
383
  """Host argument.
384

385
  """
386

    
387

    
388
class ArgOs(_Argument):
389
  """OS argument.
390

391
  """
392

    
393

    
394
ARGS_NONE = []
395
ARGS_MANY_INSTANCES = [ArgInstance()]
396
ARGS_MANY_NODES = [ArgNode()]
397
ARGS_MANY_GROUPS = [ArgGroup()]
398
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
399
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
400
# TODO
401
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
402
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
403

    
404

    
405
def _ExtractTagsObject(opts, args):
406
  """Extract the tag type object.
407

408
  Note that this function will modify its args parameter.
409

410
  """
411
  if not hasattr(opts, "tag_type"):
412
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
413
  kind = opts.tag_type
414
  if kind == constants.TAG_CLUSTER:
415
    retval = kind, kind
416
  elif kind in (constants.TAG_NODEGROUP,
417
                constants.TAG_NODE,
418
                constants.TAG_INSTANCE):
419
    if not args:
420
      raise errors.OpPrereqError("no arguments passed to the command",
421
                                 errors.ECODE_INVAL)
422
    name = args.pop(0)
423
    retval = kind, name
424
  else:
425
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
426
  return retval
427

    
428

    
429
def _ExtendTags(opts, args):
430
  """Extend the args if a source file has been given.
431

432
  This function will extend the tags with the contents of the file
433
  passed in the 'tags_source' attribute of the opts parameter. A file
434
  named '-' will be replaced by stdin.
435

436
  """
437
  fname = opts.tags_source
438
  if fname is None:
439
    return
440
  if fname == "-":
441
    new_fh = sys.stdin
442
  else:
443
    new_fh = open(fname, "r")
444
  new_data = []
445
  try:
446
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
447
    # because of python bug 1633941
448
    while True:
449
      line = new_fh.readline()
450
      if not line:
451
        break
452
      new_data.append(line.strip())
453
  finally:
454
    new_fh.close()
455
  args.extend(new_data)
456

    
457

    
458
def ListTags(opts, args):
459
  """List the tags on a given object.
460

461
  This is a generic implementation that knows how to deal with all
462
  three cases of tag objects (cluster, node, instance). The opts
463
  argument is expected to contain a tag_type field denoting what
464
  object type we work on.
465

466
  """
467
  kind, name = _ExtractTagsObject(opts, args)
468
  cl = GetClient(query=True)
469
  result = cl.QueryTags(kind, name)
470
  result = list(result)
471
  result.sort()
472
  for tag in result:
473
    ToStdout(tag)
474

    
475

    
476
def AddTags(opts, args):
477
  """Add tags on a given object.
478

479
  This is a generic implementation that knows how to deal with all
480
  three cases of tag objects (cluster, node, instance). The opts
481
  argument is expected to contain a tag_type field denoting what
482
  object type we work on.
483

484
  """
485
  kind, name = _ExtractTagsObject(opts, args)
486
  _ExtendTags(opts, args)
487
  if not args:
488
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
489
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
490
  SubmitOrSend(op, opts)
491

    
492

    
493
def RemoveTags(opts, args):
494
  """Remove tags from a given object.
495

496
  This is a generic implementation that knows how to deal with all
497
  three cases of tag objects (cluster, node, instance). The opts
498
  argument is expected to contain a tag_type field denoting what
499
  object type we work on.
500

501
  """
502
  kind, name = _ExtractTagsObject(opts, args)
503
  _ExtendTags(opts, args)
504
  if not args:
505
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
506
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
507
  SubmitOrSend(op, opts)
508

    
509

    
510
def check_unit(option, opt, value): # pylint: disable=W0613
511
  """OptParsers custom converter for units.
512

513
  """
514
  try:
515
    return utils.ParseUnit(value)
516
  except errors.UnitParseError, err:
517
    raise OptionValueError("option %s: %s" % (opt, err))
518

    
519

    
520
def _SplitKeyVal(opt, data):
521
  """Convert a KeyVal string into a dict.
522

523
  This function will convert a key=val[,...] string into a dict. Empty
524
  values will be converted specially: keys which have the prefix 'no_'
525
  will have the value=False and the prefix stripped, the others will
526
  have value=True.
527

528
  @type opt: string
529
  @param opt: a string holding the option name for which we process the
530
      data, used in building error messages
531
  @type data: string
532
  @param data: a string of the format key=val,key=val,...
533
  @rtype: dict
534
  @return: {key=val, key=val}
535
  @raises errors.ParameterError: if there are duplicate keys
536

537
  """
538
  kv_dict = {}
539
  if data:
540
    for elem in utils.UnescapeAndSplit(data, sep=","):
541
      if "=" in elem:
542
        key, val = elem.split("=", 1)
543
      else:
544
        if elem.startswith(NO_PREFIX):
545
          key, val = elem[len(NO_PREFIX):], False
546
        elif elem.startswith(UN_PREFIX):
547
          key, val = elem[len(UN_PREFIX):], None
548
        else:
549
          key, val = elem, True
550
      if key in kv_dict:
551
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
552
                                    (key, opt))
553
      kv_dict[key] = val
554
  return kv_dict
555

    
556

    
557
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
558
  """Custom parser for ident:key=val,key=val options.
559

560
  This will store the parsed values as a tuple (ident, {key: val}). As such,
561
  multiple uses of this option via action=append is possible.
562

563
  """
564
  if ":" not in value:
565
    ident, rest = value, ""
566
  else:
567
    ident, rest = value.split(":", 1)
568

    
569
  if ident.startswith(NO_PREFIX):
570
    if rest:
571
      msg = "Cannot pass options when removing parameter groups: %s" % value
572
      raise errors.ParameterError(msg)
573
    retval = (ident[len(NO_PREFIX):], False)
574
  elif (ident.startswith(UN_PREFIX) and
575
        (len(ident) <= len(UN_PREFIX) or
576
         not ident[len(UN_PREFIX)][0].isdigit())):
577
    if rest:
578
      msg = "Cannot pass options when removing parameter groups: %s" % value
579
      raise errors.ParameterError(msg)
580
    retval = (ident[len(UN_PREFIX):], None)
581
  else:
582
    kv_dict = _SplitKeyVal(opt, rest)
583
    retval = (ident, kv_dict)
584
  return retval
585

    
586

    
587
def check_key_val(option, opt, value):  # pylint: disable=W0613
588
  """Custom parser class for key=val,key=val options.
589

590
  This will store the parsed values as a dict {key: val}.
591

592
  """
593
  return _SplitKeyVal(opt, value)
594

    
595

    
596
def check_bool(option, opt, value): # pylint: disable=W0613
597
  """Custom parser for yes/no options.
598

599
  This will store the parsed value as either True or False.
600

601
  """
602
  value = value.lower()
603
  if value == constants.VALUE_FALSE or value == "no":
604
    return False
605
  elif value == constants.VALUE_TRUE or value == "yes":
606
    return True
607
  else:
608
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
609

    
610

    
611
def check_list(option, opt, value): # pylint: disable=W0613
612
  """Custom parser for comma-separated lists.
613

614
  """
615
  # we have to make this explicit check since "".split(",") is [""],
616
  # not an empty list :(
617
  if not value:
618
    return []
619
  else:
620
    return utils.UnescapeAndSplit(value)
621

    
622

    
623
def check_maybefloat(option, opt, value): # pylint: disable=W0613
624
  """Custom parser for float numbers which might be also defaults.
625

626
  """
627
  value = value.lower()
628

    
629
  if value == constants.VALUE_DEFAULT:
630
    return value
631
  else:
632
    return float(value)
633

    
634

    
635
# completion_suggestion is normally a list. Using numeric values not evaluating
636
# to False for dynamic completion.
637
(OPT_COMPL_MANY_NODES,
638
 OPT_COMPL_ONE_NODE,
639
 OPT_COMPL_ONE_INSTANCE,
640
 OPT_COMPL_ONE_OS,
641
 OPT_COMPL_ONE_IALLOCATOR,
642
 OPT_COMPL_INST_ADD_NODES,
643
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
644

    
645
OPT_COMPL_ALL = frozenset([
646
  OPT_COMPL_MANY_NODES,
647
  OPT_COMPL_ONE_NODE,
648
  OPT_COMPL_ONE_INSTANCE,
649
  OPT_COMPL_ONE_OS,
650
  OPT_COMPL_ONE_IALLOCATOR,
651
  OPT_COMPL_INST_ADD_NODES,
652
  OPT_COMPL_ONE_NODEGROUP,
653
  ])
654

    
655

    
656
class CliOption(Option):
657
  """Custom option class for optparse.
658

659
  """
660
  ATTRS = Option.ATTRS + [
661
    "completion_suggest",
662
    ]
663
  TYPES = Option.TYPES + (
664
    "identkeyval",
665
    "keyval",
666
    "unit",
667
    "bool",
668
    "list",
669
    "maybefloat",
670
    )
671
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
672
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
673
  TYPE_CHECKER["keyval"] = check_key_val
674
  TYPE_CHECKER["unit"] = check_unit
675
  TYPE_CHECKER["bool"] = check_bool
676
  TYPE_CHECKER["list"] = check_list
677
  TYPE_CHECKER["maybefloat"] = check_maybefloat
678

    
679

    
680
# optparse.py sets make_option, so we do it for our own option class, too
681
cli_option = CliOption
682

    
683

    
684
_YORNO = "yes|no"
685

    
686
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
687
                       help="Increase debugging level")
688

    
689
NOHDR_OPT = cli_option("--no-headers", default=False,
690
                       action="store_true", dest="no_headers",
691
                       help="Don't display column headers")
692

    
693
SEP_OPT = cli_option("--separator", default=None,
694
                     action="store", dest="separator",
695
                     help=("Separator between output fields"
696
                           " (defaults to one space)"))
697

    
698
USEUNITS_OPT = cli_option("--units", default=None,
699
                          dest="units", choices=("h", "m", "g", "t"),
700
                          help="Specify units for output (one of h/m/g/t)")
701

    
702
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
703
                        type="string", metavar="FIELDS",
704
                        help="Comma separated list of output fields")
705

    
706
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
707
                       default=False, help="Force the operation")
708

    
709
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
710
                         default=False, help="Do not require confirmation")
711

    
712
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
713
                                  action="store_true", default=False,
714
                                  help=("Ignore offline nodes and do as much"
715
                                        " as possible"))
716

    
717
TAG_ADD_OPT = cli_option("--tags", dest="tags",
718
                         default=None, help="Comma-separated list of instance"
719
                                            " tags")
720

    
721
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
722
                         default=None, help="File with tag names")
723

    
724
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
725
                        default=False, action="store_true",
726
                        help=("Submit the job and return the job ID, but"
727
                              " don't wait for the job to finish"))
728

    
729
SYNC_OPT = cli_option("--sync", dest="do_locking",
730
                      default=False, action="store_true",
731
                      help=("Grab locks while doing the queries"
732
                            " in order to ensure more consistent results"))
733

    
734
DRY_RUN_OPT = cli_option("--dry-run", default=False,
735
                         action="store_true",
736
                         help=("Do not execute the operation, just run the"
737
                               " check steps and verify it it could be"
738
                               " executed"))
739

    
740
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
741
                         action="store_true",
742
                         help="Increase the verbosity of the operation")
743

    
744
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
745
                              action="store_true", dest="simulate_errors",
746
                              help="Debugging option that makes the operation"
747
                              " treat most runtime checks as failed")
748

    
749
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
750
                        default=True, action="store_false",
751
                        help="Don't wait for sync (DANGEROUS!)")
752

    
753
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
754
                        default=False, action="store_true",
755
                        help="Wait for disks to sync")
756

    
757
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
758
                             action="store_true", default=False,
759
                             help="Enable offline instance")
760

    
761
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
762
                              action="store_true", default=False,
763
                              help="Disable down instance")
764

    
765
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
766
                               help=("Custom disk setup (%s)" %
767
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
768
                               default=None, metavar="TEMPL",
769
                               choices=list(constants.DISK_TEMPLATES))
770

    
771
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
772
                        help="Do not create any network cards for"
773
                        " the instance")
774

    
775
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
776
                               help="Relative path under default cluster-wide"
777
                               " file storage dir to store file-based disks",
778
                               default=None, metavar="<DIR>")
779

    
780
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
781
                                  help="Driver to use for image files",
782
                                  default="loop", metavar="<DRIVER>",
783
                                  choices=list(constants.FILE_DRIVER))
784

    
785
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
786
                            help="Select nodes for the instance automatically"
787
                            " using the <NAME> iallocator plugin",
788
                            default=None, type="string",
789
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
790

    
791
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
792
                                    metavar="<NAME>",
793
                                    help="Set the default instance"
794
                                    " allocator plugin",
795
                                    default=None, type="string",
796
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
797

    
798
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
799
                    metavar="<os>",
800
                    completion_suggest=OPT_COMPL_ONE_OS)
801

    
802
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
803
                          type="keyval", default={},
804
                          help="OS parameters")
805

    
806
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
807
                               action="store_true", default=False,
808
                               help="Force an unknown variant")
809

    
810
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
811
                            action="store_true", default=False,
812
                            help="Do not install the OS (will"
813
                            " enable no-start)")
814

    
815
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
816
                                dest="allow_runtime_chgs",
817
                                default=True, action="store_false",
818
                                help="Don't allow runtime changes")
819

    
820
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
821
                         type="keyval", default={},
822
                         help="Backend parameters")
823

    
824
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
825
                        default={}, dest="hvparams",
826
                        help="Hypervisor parameters")
827

    
828
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
829
                             help="Disk template parameters, in the format"
830
                             " template:option=value,option=value,...",
831
                             type="identkeyval", action="append", default=[])
832

    
833
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
834
                                 type="keyval", default={},
835
                                 help="Memory size specs: list of key=value,"
836
                                " where key is one of min, max, std"
837
                                 " (in MB or using a unit)")
838

    
839
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
840
                                 type="keyval", default={},
841
                                 help="CPU count specs: list of key=value,"
842
                                 " where key is one of min, max, std")
843

    
844
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
845
                                  dest="ispecs_disk_count",
846
                                  type="keyval", default={},
847
                                  help="Disk count specs: list of key=value,"
848
                                  " where key is one of min, max, std")
849

    
850
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
851
                                 type="keyval", default={},
852
                                 help="Disk size specs: list of key=value,"
853
                                 " where key is one of min, max, std"
854
                                 " (in MB or using a unit)")
855

    
856
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
857
                                 type="keyval", default={},
858
                                 help="NIC count specs: list of key=value,"
859
                                 " where key is one of min, max, std")
860

    
861
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
862
                                    dest="ipolicy_disk_templates",
863
                                    type="list", default=None,
864
                                    help="Comma-separated list of"
865
                                    " enabled disk templates")
866

    
867
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
868
                                 dest="ipolicy_vcpu_ratio",
869
                                 type="maybefloat", default=None,
870
                                 help="The maximum allowed vcpu-to-cpu ratio")
871

    
872
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
873
                                   dest="ipolicy_spindle_ratio",
874
                                   type="maybefloat", default=None,
875
                                   help=("The maximum allowed instances to"
876
                                         " spindle ratio"))
877

    
878
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
879
                            help="Hypervisor and hypervisor options, in the"
880
                            " format hypervisor:option=value,option=value,...",
881
                            default=None, type="identkeyval")
882

    
883
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
884
                        help="Hypervisor and hypervisor options, in the"
885
                        " format hypervisor:option=value,option=value,...",
886
                        default=[], action="append", type="identkeyval")
887

    
888
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
889
                           action="store_false",
890
                           help="Don't check that the instance's IP"
891
                           " is alive")
892

    
893
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
894
                             default=True, action="store_false",
895
                             help="Don't check that the instance's name"
896
                             " is resolvable")
897

    
898
NET_OPT = cli_option("--net",
899
                     help="NIC parameters", default=[],
900
                     dest="nics", action="append", type="identkeyval")
901

    
902
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
903
                      dest="disks", action="append", type="identkeyval")
904

    
905
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
906
                         help="Comma-separated list of disks"
907
                         " indices to act on (e.g. 0,2) (optional,"
908
                         " defaults to all disks)")
909

    
910
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
911
                         help="Enforces a single-disk configuration using the"
912
                         " given disk size, in MiB unless a suffix is used",
913
                         default=None, type="unit", metavar="<size>")
914

    
915
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
916
                                dest="ignore_consistency",
917
                                action="store_true", default=False,
918
                                help="Ignore the consistency of the disks on"
919
                                " the secondary")
920

    
921
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
922
                                dest="allow_failover",
923
                                action="store_true", default=False,
924
                                help="If migration is not possible fallback to"
925
                                     " failover")
926

    
927
NONLIVE_OPT = cli_option("--non-live", dest="live",
928
                         default=True, action="store_false",
929
                         help="Do a non-live migration (this usually means"
930
                         " freeze the instance, save the state, transfer and"
931
                         " only then resume running on the secondary node)")
932

    
933
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
934
                                default=None,
935
                                choices=list(constants.HT_MIGRATION_MODES),
936
                                help="Override default migration mode (choose"
937
                                " either live or non-live")
938

    
939
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
940
                                help="Target node and optional secondary node",
941
                                metavar="<pnode>[:<snode>]",
942
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
943

    
944
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
945
                           action="append", metavar="<node>",
946
                           help="Use only this node (can be used multiple"
947
                           " times, if not given defaults to all nodes)",
948
                           completion_suggest=OPT_COMPL_ONE_NODE)
949

    
950
NODEGROUP_OPT_NAME = "--node-group"
951
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
952
                           dest="nodegroup",
953
                           help="Node group (name or uuid)",
954
                           metavar="<nodegroup>",
955
                           default=None, type="string",
956
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
957

    
958
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
959
                             metavar="<node>",
960
                             completion_suggest=OPT_COMPL_ONE_NODE)
961

    
962
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
963
                         action="store_false",
964
                         help="Don't start the instance after creation")
965

    
966
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
967
                         action="store_true", default=False,
968
                         help="Show command instead of executing it")
969

    
970
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
971
                         default=False, action="store_true",
972
                         help="Instead of performing the migration, try to"
973
                         " recover from a failed cleanup. This is safe"
974
                         " to run even if the instance is healthy, but it"
975
                         " will create extra replication traffic and "
976
                         " disrupt briefly the replication (like during the"
977
                         " migration")
978

    
979
STATIC_OPT = cli_option("-s", "--static", dest="static",
980
                        action="store_true", default=False,
981
                        help="Only show configuration data, not runtime data")
982

    
983
ALL_OPT = cli_option("--all", dest="show_all",
984
                     default=False, action="store_true",
985
                     help="Show info on all instances on the cluster."
986
                     " This can take a long time to run, use wisely")
987

    
988
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
989
                           action="store_true", default=False,
990
                           help="Interactive OS reinstall, lists available"
991
                           " OS templates for selection")
992

    
993
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
994
                                 action="store_true", default=False,
995
                                 help="Remove the instance from the cluster"
996
                                 " configuration even if there are failures"
997
                                 " during the removal process")
998

    
999
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1000
                                        dest="ignore_remove_failures",
1001
                                        action="store_true", default=False,
1002
                                        help="Remove the instance from the"
1003
                                        " cluster configuration even if there"
1004
                                        " are failures during the removal"
1005
                                        " process")
1006

    
1007
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1008
                                 action="store_true", default=False,
1009
                                 help="Remove the instance from the cluster")
1010

    
1011
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1012
                               help="Specifies the new node for the instance",
1013
                               metavar="NODE", default=None,
1014
                               completion_suggest=OPT_COMPL_ONE_NODE)
1015

    
1016
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1017
                               help="Specifies the new secondary node",
1018
                               metavar="NODE", default=None,
1019
                               completion_suggest=OPT_COMPL_ONE_NODE)
1020

    
1021
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1022
                            default=False, action="store_true",
1023
                            help="Replace the disk(s) on the primary"
1024
                                 " node (applies only to internally mirrored"
1025
                                 " disk templates, e.g. %s)" %
1026
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1027

    
1028
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1029
                              default=False, action="store_true",
1030
                              help="Replace the disk(s) on the secondary"
1031
                                   " node (applies only to internally mirrored"
1032
                                   " disk templates, e.g. %s)" %
1033
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1034

    
1035
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1036
                              default=False, action="store_true",
1037
                              help="Lock all nodes and auto-promote as needed"
1038
                              " to MC status")
1039

    
1040
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1041
                              default=False, action="store_true",
1042
                              help="Automatically replace faulty disks"
1043
                                   " (applies only to internally mirrored"
1044
                                   " disk templates, e.g. %s)" %
1045
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1046

    
1047
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1048
                             default=False, action="store_true",
1049
                             help="Ignore current recorded size"
1050
                             " (useful for forcing activation when"
1051
                             " the recorded size is wrong)")
1052

    
1053
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1054
                          metavar="<node>",
1055
                          completion_suggest=OPT_COMPL_ONE_NODE)
1056

    
1057
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1058
                         metavar="<dir>")
1059

    
1060
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1061
                              help="Specify the secondary ip for the node",
1062
                              metavar="ADDRESS", default=None)
1063

    
1064
READD_OPT = cli_option("--readd", dest="readd",
1065
                       default=False, action="store_true",
1066
                       help="Readd old node after replacing it")
1067

    
1068
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1069
                                default=True, action="store_false",
1070
                                help="Disable SSH key fingerprint checking")
1071

    
1072
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1073
                                 default=False, action="store_true",
1074
                                 help="Force the joining of a node")
1075

    
1076
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1077
                    type="bool", default=None, metavar=_YORNO,
1078
                    help="Set the master_candidate flag on the node")
1079

    
1080
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1081
                         type="bool", default=None,
1082
                         help=("Set the offline flag on the node"
1083
                               " (cluster does not communicate with offline"
1084
                               " nodes)"))
1085

    
1086
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1087
                         type="bool", default=None,
1088
                         help=("Set the drained flag on the node"
1089
                               " (excluded from allocation operations)"))
1090

    
1091
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1092
                              type="bool", default=None, metavar=_YORNO,
1093
                              help="Set the master_capable flag on the node")
1094

    
1095
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1096
                          type="bool", default=None, metavar=_YORNO,
1097
                          help="Set the vm_capable flag on the node")
1098

    
1099
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1100
                             type="bool", default=None, metavar=_YORNO,
1101
                             help="Set the allocatable flag on a volume")
1102

    
1103
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1104
                               help="Disable support for lvm based instances"
1105
                               " (cluster-wide)",
1106
                               action="store_false", default=True)
1107

    
1108
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1109
                            dest="enabled_hypervisors",
1110
                            help="Comma-separated list of hypervisors",
1111
                            type="string", default=None)
1112

    
1113
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1114
                            type="keyval", default={},
1115
                            help="NIC parameters")
1116

    
1117
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1118
                         dest="candidate_pool_size", type="int",
1119
                         help="Set the candidate pool size")
1120

    
1121
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1122
                         help=("Enables LVM and specifies the volume group"
1123
                               " name (cluster-wide) for disk allocation"
1124
                               " [%s]" % constants.DEFAULT_VG),
1125
                         metavar="VG", default=None)
1126

    
1127
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1128
                          help="Destroy cluster", action="store_true")
1129

    
1130
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1131
                          help="Skip node agreement check (dangerous)",
1132
                          action="store_true", default=False)
1133

    
1134
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1135
                            help="Specify the mac prefix for the instance IP"
1136
                            " addresses, in the format XX:XX:XX",
1137
                            metavar="PREFIX",
1138
                            default=None)
1139

    
1140
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1141
                               help="Specify the node interface (cluster-wide)"
1142
                               " on which the master IP address will be added"
1143
                               " (cluster init default: %s)" %
1144
                               constants.DEFAULT_BRIDGE,
1145
                               metavar="NETDEV",
1146
                               default=None)
1147

    
1148
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1149
                                help="Specify the netmask of the master IP",
1150
                                metavar="NETMASK",
1151
                                default=None)
1152

    
1153
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1154
                                     dest="use_external_mip_script",
1155
                                     help="Specify whether to run a"
1156
                                     " user-provided script for the master"
1157
                                     " IP address turnup and"
1158
                                     " turndown operations",
1159
                                     type="bool", metavar=_YORNO, default=None)
1160

    
1161
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1162
                                help="Specify the default directory (cluster-"
1163
                                "wide) for storing the file-based disks [%s]" %
1164
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1165
                                metavar="DIR",
1166
                                default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1167

    
1168
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1169
  "--shared-file-storage-dir",
1170
  dest="shared_file_storage_dir",
1171
  help="Specify the default directory (cluster-wide) for storing the"
1172
  " shared file-based disks [%s]" %
1173
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1174
  metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1175

    
1176
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1177
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1178
                                   action="store_false", default=True)
1179

    
1180
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1181
                                    help="Don't initialize SSH keys",
1182
                                    action="store_false", default=True)
1183

    
1184
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1185
                             help="Enable parseable error messages",
1186
                             action="store_true", default=False)
1187

    
1188
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1189
                          help="Skip N+1 memory redundancy tests",
1190
                          action="store_true", default=False)
1191

    
1192
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1193
                             help="Type of reboot: soft/hard/full",
1194
                             default=constants.INSTANCE_REBOOT_HARD,
1195
                             metavar="<REBOOT>",
1196
                             choices=list(constants.REBOOT_TYPES))
1197

    
1198
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1199
                                    dest="ignore_secondaries",
1200
                                    default=False, action="store_true",
1201
                                    help="Ignore errors from secondaries")
1202

    
1203
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1204
                            action="store_false", default=True,
1205
                            help="Don't shutdown the instance (unsafe)")
1206

    
1207
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1208
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1209
                         help="Maximum time to wait")
1210

    
1211
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1212
                                  dest="shutdown_timeout", type="int",
1213
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1214
                                  help="Maximum time to wait for instance"
1215
                                  " shutdown")
1216

    
1217
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1218
                          default=None,
1219
                          help=("Number of seconds between repetions of the"
1220
                                " command"))
1221

    
1222
EARLY_RELEASE_OPT = cli_option("--early-release",
1223
                               dest="early_release", default=False,
1224
                               action="store_true",
1225
                               help="Release the locks on the secondary"
1226
                               " node(s) early")
1227

    
1228
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1229
                                  dest="new_cluster_cert",
1230
                                  default=False, action="store_true",
1231
                                  help="Generate a new cluster certificate")
1232

    
1233
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1234
                           default=None,
1235
                           help="File containing new RAPI certificate")
1236

    
1237
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1238
                               default=None, action="store_true",
1239
                               help=("Generate a new self-signed RAPI"
1240
                                     " certificate"))
1241

    
1242
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1243
                            default=None,
1244
                            help="File containing new SPICE certificate")
1245

    
1246
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1247
                              default=None,
1248
                              help="File containing the certificate of the CA"
1249
                              " which signed the SPICE certificate")
1250

    
1251
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1252
                                dest="new_spice_cert", default=None,
1253
                                action="store_true",
1254
                                help=("Generate a new self-signed SPICE"
1255
                                      " certificate"))
1256

    
1257
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1258
                                    dest="new_confd_hmac_key",
1259
                                    default=False, action="store_true",
1260
                                    help=("Create a new HMAC key for %s" %
1261
                                          constants.CONFD))
1262

    
1263
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1264
                                       dest="cluster_domain_secret",
1265
                                       default=None,
1266
                                       help=("Load new new cluster domain"
1267
                                             " secret from file"))
1268

    
1269
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1270
                                           dest="new_cluster_domain_secret",
1271
                                           default=False, action="store_true",
1272
                                           help=("Create a new cluster domain"
1273
                                                 " secret"))
1274

    
1275
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1276
                              dest="use_replication_network",
1277
                              help="Whether to use the replication network"
1278
                              " for talking to the nodes",
1279
                              action="store_true", default=False)
1280

    
1281
MAINTAIN_NODE_HEALTH_OPT = \
1282
    cli_option("--maintain-node-health", dest="maintain_node_health",
1283
               metavar=_YORNO, default=None, type="bool",
1284
               help="Configure the cluster to automatically maintain node"
1285
               " health, by shutting down unknown instances, shutting down"
1286
               " unknown DRBD devices, etc.")
1287

    
1288
IDENTIFY_DEFAULTS_OPT = \
1289
    cli_option("--identify-defaults", dest="identify_defaults",
1290
               default=False, action="store_true",
1291
               help="Identify which saved instance parameters are equal to"
1292
               " the current cluster defaults and set them as such, instead"
1293
               " of marking them as overridden")
1294

    
1295
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1296
                         action="store", dest="uid_pool",
1297
                         help=("A list of user-ids or user-id"
1298
                               " ranges separated by commas"))
1299

    
1300
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1301
                          action="store", dest="add_uids",
1302
                          help=("A list of user-ids or user-id"
1303
                                " ranges separated by commas, to be"
1304
                                " added to the user-id pool"))
1305

    
1306
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1307
                             action="store", dest="remove_uids",
1308
                             help=("A list of user-ids or user-id"
1309
                                   " ranges separated by commas, to be"
1310
                                   " removed from the user-id pool"))
1311

    
1312
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1313
                              action="store", dest="reserved_lvs",
1314
                              help=("A comma-separated list of reserved"
1315
                                    " logical volumes names, that will be"
1316
                                    " ignored by cluster verify"))
1317

    
1318
ROMAN_OPT = cli_option("--roman",
1319
                       dest="roman_integers", default=False,
1320
                       action="store_true",
1321
                       help="Use roman numbers for positive integers")
1322

    
1323
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1324
                             action="store", default=None,
1325
                             help="Specifies usermode helper for DRBD")
1326

    
1327
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1328
                                action="store_false", default=True,
1329
                                help="Disable support for DRBD")
1330

    
1331
PRIMARY_IP_VERSION_OPT = \
1332
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1333
               action="store", dest="primary_ip_version",
1334
               metavar="%d|%d" % (constants.IP4_VERSION,
1335
                                  constants.IP6_VERSION),
1336
               help="Cluster-wide IP version for primary IP")
1337

    
1338
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1339
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1340
                          choices=_PRIONAME_TO_VALUE.keys(),
1341
                          help="Priority for opcode processing")
1342

    
1343
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1344
                        type="bool", default=None, metavar=_YORNO,
1345
                        help="Sets the hidden flag on the OS")
1346

    
1347
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1348
                        type="bool", default=None, metavar=_YORNO,
1349
                        help="Sets the blacklisted flag on the OS")
1350

    
1351
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1352
                                     type="bool", metavar=_YORNO,
1353
                                     dest="prealloc_wipe_disks",
1354
                                     help=("Wipe disks prior to instance"
1355
                                           " creation"))
1356

    
1357
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1358
                             type="keyval", default=None,
1359
                             help="Node parameters")
1360

    
1361
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1362
                              action="store", metavar="POLICY", default=None,
1363
                              help="Allocation policy for the node group")
1364

    
1365
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1366
                              type="bool", metavar=_YORNO,
1367
                              dest="node_powered",
1368
                              help="Specify if the SoR for node is powered")
1369

    
1370
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1371
                             default=constants.OOB_TIMEOUT,
1372
                             help="Maximum time to wait for out-of-band helper")
1373

    
1374
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1375
                             default=constants.OOB_POWER_DELAY,
1376
                             help="Time in seconds to wait between power-ons")
1377

    
1378
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1379
                              action="store_true", default=False,
1380
                              help=("Whether command argument should be treated"
1381
                                    " as filter"))
1382

    
1383
NO_REMEMBER_OPT = cli_option("--no-remember",
1384
                             dest="no_remember",
1385
                             action="store_true", default=False,
1386
                             help="Perform but do not record the change"
1387
                             " in the configuration")
1388

    
1389
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1390
                              default=False, action="store_true",
1391
                              help="Evacuate primary instances only")
1392

    
1393
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1394
                                default=False, action="store_true",
1395
                                help="Evacuate secondary instances only"
1396
                                     " (applies only to internally mirrored"
1397
                                     " disk templates, e.g. %s)" %
1398
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1399

    
1400
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1401
                                action="store_true", default=False,
1402
                                help="Pause instance at startup")
1403

    
1404
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1405
                          help="Destination node group (name or uuid)",
1406
                          default=None, action="append",
1407
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1408

    
1409
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1410
                               action="append", dest="ignore_errors",
1411
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1412
                               help="Error code to be ignored")
1413

    
1414
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1415
                            action="append",
1416
                            help=("Specify disk state information in the"
1417
                                  " format"
1418
                                  " storage_type/identifier:option=value,...;"
1419
                                  " note this is unused for now"),
1420
                            type="identkeyval")
1421

    
1422
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1423
                          action="append",
1424
                          help=("Specify hypervisor state information in the"
1425
                                " format hypervisor:option=value,...;"
1426
                                " note this is unused for now"),
1427
                          type="identkeyval")
1428

    
1429
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1430
                                action="store_true", default=False,
1431
                                help="Ignore instance policy violations")
1432

    
1433
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1434
                             help="Sets the instance's runtime memory,"
1435
                             " ballooning it up or down to the new value",
1436
                             default=None, type="unit", metavar="<size>")
1437

    
1438
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1439
                          action="store_true", default=False,
1440
                          help="Marks the grow as absolute instead of the"
1441
                          " (default) relative mode")
1442

    
1443
#: Options provided by all commands
1444
COMMON_OPTS = [DEBUG_OPT]
1445

    
1446
# common options for creating instances. add and import then add their own
1447
# specific ones.
1448
COMMON_CREATE_OPTS = [
1449
  BACKEND_OPT,
1450
  DISK_OPT,
1451
  DISK_TEMPLATE_OPT,
1452
  FILESTORE_DIR_OPT,
1453
  FILESTORE_DRIVER_OPT,
1454
  HYPERVISOR_OPT,
1455
  IALLOCATOR_OPT,
1456
  NET_OPT,
1457
  NODE_PLACEMENT_OPT,
1458
  NOIPCHECK_OPT,
1459
  NONAMECHECK_OPT,
1460
  NONICS_OPT,
1461
  NWSYNC_OPT,
1462
  OSPARAMS_OPT,
1463
  OS_SIZE_OPT,
1464
  SUBMIT_OPT,
1465
  TAG_ADD_OPT,
1466
  DRY_RUN_OPT,
1467
  PRIORITY_OPT,
1468
  ]
1469

    
1470
# common instance policy options
1471
INSTANCE_POLICY_OPTS = [
1472
  SPECS_CPU_COUNT_OPT,
1473
  SPECS_DISK_COUNT_OPT,
1474
  SPECS_DISK_SIZE_OPT,
1475
  SPECS_MEM_SIZE_OPT,
1476
  SPECS_NIC_COUNT_OPT,
1477
  IPOLICY_DISK_TEMPLATES,
1478
  IPOLICY_VCPU_RATIO,
1479
  IPOLICY_SPINDLE_RATIO,
1480
  ]
1481

    
1482

    
1483
class _ShowUsage(Exception):
1484
  """Exception class for L{_ParseArgs}.
1485

1486
  """
1487
  def __init__(self, exit_error):
1488
    """Initializes instances of this class.
1489

1490
    @type exit_error: bool
1491
    @param exit_error: Whether to report failure on exit
1492

1493
    """
1494
    Exception.__init__(self)
1495
    self.exit_error = exit_error
1496

    
1497

    
1498
class _ShowVersion(Exception):
1499
  """Exception class for L{_ParseArgs}.
1500

1501
  """
1502

    
1503

    
1504
def _ParseArgs(binary, argv, commands, aliases, env_override):
1505
  """Parser for the command line arguments.
1506

1507
  This function parses the arguments and returns the function which
1508
  must be executed together with its (modified) arguments.
1509

1510
  @param binary: Script name
1511
  @param argv: Command line arguments
1512
  @param commands: Dictionary containing command definitions
1513
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1514
  @param env_override: list of env variables allowed for default args
1515
  @raise _ShowUsage: If usage description should be shown
1516
  @raise _ShowVersion: If version should be shown
1517

1518
  """
1519
  assert not (env_override - set(commands))
1520
  assert not (set(aliases.keys()) & set(commands.keys()))
1521

    
1522
  if len(argv) > 1:
1523
    cmd = argv[1]
1524
  else:
1525
    # No option or command given
1526
    raise _ShowUsage(exit_error=True)
1527

    
1528
  if cmd == "--version":
1529
    raise _ShowVersion()
1530
  elif cmd == "--help":
1531
    raise _ShowUsage(exit_error=False)
1532
  elif not (cmd in commands or cmd in aliases):
1533
    raise _ShowUsage(exit_error=True)
1534

    
1535
  # get command, unalias it, and look it up in commands
1536
  if cmd in aliases:
1537
    if aliases[cmd] not in commands:
1538
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1539
                                   " command '%s'" % (cmd, aliases[cmd]))
1540

    
1541
    cmd = aliases[cmd]
1542

    
1543
  if cmd in env_override:
1544
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1545
    env_args = os.environ.get(args_env_name)
1546
    if env_args:
1547
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1548

    
1549
  func, args_def, parser_opts, usage, description = commands[cmd]
1550
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1551
                        description=description,
1552
                        formatter=TitledHelpFormatter(),
1553
                        usage="%%prog %s %s" % (cmd, usage))
1554
  parser.disable_interspersed_args()
1555
  options, args = parser.parse_args(args=argv[2:])
1556

    
1557
  if not _CheckArguments(cmd, args_def, args):
1558
    return None, None, None
1559

    
1560
  return func, options, args
1561

    
1562

    
1563
def _FormatUsage(binary, commands):
1564
  """Generates a nice description of all commands.
1565

1566
  @param binary: Script name
1567
  @param commands: Dictionary containing command definitions
1568

1569
  """
1570
  # compute the max line length for cmd + usage
1571
  mlen = min(60, max(map(len, commands)))
1572

    
1573
  yield "Usage: %s {command} [options...] [argument...]" % binary
1574
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1575
  yield ""
1576
  yield "Commands:"
1577

    
1578
  # and format a nice command list
1579
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1580
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1581
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1582
    for line in help_lines:
1583
      yield " %-*s   %s" % (mlen, "", line)
1584

    
1585
  yield ""
1586

    
1587

    
1588
def _CheckArguments(cmd, args_def, args):
1589
  """Verifies the arguments using the argument definition.
1590

1591
  Algorithm:
1592

1593
    1. Abort with error if values specified by user but none expected.
1594

1595
    1. For each argument in definition
1596

1597
      1. Keep running count of minimum number of values (min_count)
1598
      1. Keep running count of maximum number of values (max_count)
1599
      1. If it has an unlimited number of values
1600

1601
        1. Abort with error if it's not the last argument in the definition
1602

1603
    1. If last argument has limited number of values
1604

1605
      1. Abort with error if number of values doesn't match or is too large
1606

1607
    1. Abort with error if user didn't pass enough values (min_count)
1608

1609
  """
1610
  if args and not args_def:
1611
    ToStderr("Error: Command %s expects no arguments", cmd)
1612
    return False
1613

    
1614
  min_count = None
1615
  max_count = None
1616
  check_max = None
1617

    
1618
  last_idx = len(args_def) - 1
1619

    
1620
  for idx, arg in enumerate(args_def):
1621
    if min_count is None:
1622
      min_count = arg.min
1623
    elif arg.min is not None:
1624
      min_count += arg.min
1625

    
1626
    if max_count is None:
1627
      max_count = arg.max
1628
    elif arg.max is not None:
1629
      max_count += arg.max
1630

    
1631
    if idx == last_idx:
1632
      check_max = (arg.max is not None)
1633

    
1634
    elif arg.max is None:
1635
      raise errors.ProgrammerError("Only the last argument can have max=None")
1636

    
1637
  if check_max:
1638
    # Command with exact number of arguments
1639
    if (min_count is not None and max_count is not None and
1640
        min_count == max_count and len(args) != min_count):
1641
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1642
      return False
1643

    
1644
    # Command with limited number of arguments
1645
    if max_count is not None and len(args) > max_count:
1646
      ToStderr("Error: Command %s expects only %d argument(s)",
1647
               cmd, max_count)
1648
      return False
1649

    
1650
  # Command with some required arguments
1651
  if min_count is not None and len(args) < min_count:
1652
    ToStderr("Error: Command %s expects at least %d argument(s)",
1653
             cmd, min_count)
1654
    return False
1655

    
1656
  return True
1657

    
1658

    
1659
def SplitNodeOption(value):
1660
  """Splits the value of a --node option.
1661

1662
  """
1663
  if value and ":" in value:
1664
    return value.split(":", 1)
1665
  else:
1666
    return (value, None)
1667

    
1668

    
1669
def CalculateOSNames(os_name, os_variants):
1670
  """Calculates all the names an OS can be called, according to its variants.
1671

1672
  @type os_name: string
1673
  @param os_name: base name of the os
1674
  @type os_variants: list or None
1675
  @param os_variants: list of supported variants
1676
  @rtype: list
1677
  @return: list of valid names
1678

1679
  """
1680
  if os_variants:
1681
    return ["%s+%s" % (os_name, v) for v in os_variants]
1682
  else:
1683
    return [os_name]
1684

    
1685

    
1686
def ParseFields(selected, default):
1687
  """Parses the values of "--field"-like options.
1688

1689
  @type selected: string or None
1690
  @param selected: User-selected options
1691
  @type default: list
1692
  @param default: Default fields
1693

1694
  """
1695
  if selected is None:
1696
    return default
1697

    
1698
  if selected.startswith("+"):
1699
    return default + selected[1:].split(",")
1700

    
1701
  return selected.split(",")
1702

    
1703

    
1704
UsesRPC = rpc.RunWithRPC
1705

    
1706

    
1707
def AskUser(text, choices=None):
1708
  """Ask the user a question.
1709

1710
  @param text: the question to ask
1711

1712
  @param choices: list with elements tuples (input_char, return_value,
1713
      description); if not given, it will default to: [('y', True,
1714
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1715
      note that the '?' char is reserved for help
1716

1717
  @return: one of the return values from the choices list; if input is
1718
      not possible (i.e. not running with a tty, we return the last
1719
      entry from the list
1720

1721
  """
1722
  if choices is None:
1723
    choices = [("y", True, "Perform the operation"),
1724
               ("n", False, "Do not perform the operation")]
1725
  if not choices or not isinstance(choices, list):
1726
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1727
  for entry in choices:
1728
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1729
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1730

    
1731
  answer = choices[-1][1]
1732
  new_text = []
1733
  for line in text.splitlines():
1734
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1735
  text = "\n".join(new_text)
1736
  try:
1737
    f = file("/dev/tty", "a+")
1738
  except IOError:
1739
    return answer
1740
  try:
1741
    chars = [entry[0] for entry in choices]
1742
    chars[-1] = "[%s]" % chars[-1]
1743
    chars.append("?")
1744
    maps = dict([(entry[0], entry[1]) for entry in choices])
1745
    while True:
1746
      f.write(text)
1747
      f.write("\n")
1748
      f.write("/".join(chars))
1749
      f.write(": ")
1750
      line = f.readline(2).strip().lower()
1751
      if line in maps:
1752
        answer = maps[line]
1753
        break
1754
      elif line == "?":
1755
        for entry in choices:
1756
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1757
        f.write("\n")
1758
        continue
1759
  finally:
1760
    f.close()
1761
  return answer
1762

    
1763

    
1764
class JobSubmittedException(Exception):
1765
  """Job was submitted, client should exit.
1766

1767
  This exception has one argument, the ID of the job that was
1768
  submitted. The handler should print this ID.
1769

1770
  This is not an error, just a structured way to exit from clients.
1771

1772
  """
1773

    
1774

    
1775
def SendJob(ops, cl=None):
1776
  """Function to submit an opcode without waiting for the results.
1777

1778
  @type ops: list
1779
  @param ops: list of opcodes
1780
  @type cl: luxi.Client
1781
  @param cl: the luxi client to use for communicating with the master;
1782
             if None, a new client will be created
1783

1784
  """
1785
  if cl is None:
1786
    cl = GetClient()
1787

    
1788
  job_id = cl.SubmitJob(ops)
1789

    
1790
  return job_id
1791

    
1792

    
1793
def GenericPollJob(job_id, cbs, report_cbs):
1794
  """Generic job-polling function.
1795

1796
  @type job_id: number
1797
  @param job_id: Job ID
1798
  @type cbs: Instance of L{JobPollCbBase}
1799
  @param cbs: Data callbacks
1800
  @type report_cbs: Instance of L{JobPollReportCbBase}
1801
  @param report_cbs: Reporting callbacks
1802

1803
  """
1804
  prev_job_info = None
1805
  prev_logmsg_serial = None
1806

    
1807
  status = None
1808

    
1809
  while True:
1810
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1811
                                      prev_logmsg_serial)
1812
    if not result:
1813
      # job not found, go away!
1814
      raise errors.JobLost("Job with id %s lost" % job_id)
1815

    
1816
    if result == constants.JOB_NOTCHANGED:
1817
      report_cbs.ReportNotChanged(job_id, status)
1818

    
1819
      # Wait again
1820
      continue
1821

    
1822
    # Split result, a tuple of (field values, log entries)
1823
    (job_info, log_entries) = result
1824
    (status, ) = job_info
1825

    
1826
    if log_entries:
1827
      for log_entry in log_entries:
1828
        (serial, timestamp, log_type, message) = log_entry
1829
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1830
                                    log_type, message)
1831
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1832

    
1833
    # TODO: Handle canceled and archived jobs
1834
    elif status in (constants.JOB_STATUS_SUCCESS,
1835
                    constants.JOB_STATUS_ERROR,
1836
                    constants.JOB_STATUS_CANCELING,
1837
                    constants.JOB_STATUS_CANCELED):
1838
      break
1839

    
1840
    prev_job_info = job_info
1841

    
1842
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1843
  if not jobs:
1844
    raise errors.JobLost("Job with id %s lost" % job_id)
1845

    
1846
  status, opstatus, result = jobs[0]
1847

    
1848
  if status == constants.JOB_STATUS_SUCCESS:
1849
    return result
1850

    
1851
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1852
    raise errors.OpExecError("Job was canceled")
1853

    
1854
  has_ok = False
1855
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1856
    if status == constants.OP_STATUS_SUCCESS:
1857
      has_ok = True
1858
    elif status == constants.OP_STATUS_ERROR:
1859
      errors.MaybeRaise(msg)
1860

    
1861
      if has_ok:
1862
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1863
                                 (idx, msg))
1864

    
1865
      raise errors.OpExecError(str(msg))
1866

    
1867
  # default failure mode
1868
  raise errors.OpExecError(result)
1869

    
1870

    
1871
class JobPollCbBase:
1872
  """Base class for L{GenericPollJob} callbacks.
1873

1874
  """
1875
  def __init__(self):
1876
    """Initializes this class.
1877

1878
    """
1879

    
1880
  def WaitForJobChangeOnce(self, job_id, fields,
1881
                           prev_job_info, prev_log_serial):
1882
    """Waits for changes on a job.
1883

1884
    """
1885
    raise NotImplementedError()
1886

    
1887
  def QueryJobs(self, job_ids, fields):
1888
    """Returns the selected fields for the selected job IDs.
1889

1890
    @type job_ids: list of numbers
1891
    @param job_ids: Job IDs
1892
    @type fields: list of strings
1893
    @param fields: Fields
1894

1895
    """
1896
    raise NotImplementedError()
1897

    
1898

    
1899
class JobPollReportCbBase:
1900
  """Base class for L{GenericPollJob} reporting callbacks.
1901

1902
  """
1903
  def __init__(self):
1904
    """Initializes this class.
1905

1906
    """
1907

    
1908
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1909
    """Handles a log message.
1910

1911
    """
1912
    raise NotImplementedError()
1913

    
1914
  def ReportNotChanged(self, job_id, status):
1915
    """Called for if a job hasn't changed in a while.
1916

1917
    @type job_id: number
1918
    @param job_id: Job ID
1919
    @type status: string or None
1920
    @param status: Job status if available
1921

1922
    """
1923
    raise NotImplementedError()
1924

    
1925

    
1926
class _LuxiJobPollCb(JobPollCbBase):
1927
  def __init__(self, cl):
1928
    """Initializes this class.
1929

1930
    """
1931
    JobPollCbBase.__init__(self)
1932
    self.cl = cl
1933

    
1934
  def WaitForJobChangeOnce(self, job_id, fields,
1935
                           prev_job_info, prev_log_serial):
1936
    """Waits for changes on a job.
1937

1938
    """
1939
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1940
                                        prev_job_info, prev_log_serial)
1941

    
1942
  def QueryJobs(self, job_ids, fields):
1943
    """Returns the selected fields for the selected job IDs.
1944

1945
    """
1946
    return self.cl.QueryJobs(job_ids, fields)
1947

    
1948

    
1949
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1950
  def __init__(self, feedback_fn):
1951
    """Initializes this class.
1952

1953
    """
1954
    JobPollReportCbBase.__init__(self)
1955

    
1956
    self.feedback_fn = feedback_fn
1957

    
1958
    assert callable(feedback_fn)
1959

    
1960
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1961
    """Handles a log message.
1962

1963
    """
1964
    self.feedback_fn((timestamp, log_type, log_msg))
1965

    
1966
  def ReportNotChanged(self, job_id, status):
1967
    """Called if a job hasn't changed in a while.
1968

1969
    """
1970
    # Ignore
1971

    
1972

    
1973
class StdioJobPollReportCb(JobPollReportCbBase):
1974
  def __init__(self):
1975
    """Initializes this class.
1976

1977
    """
1978
    JobPollReportCbBase.__init__(self)
1979

    
1980
    self.notified_queued = False
1981
    self.notified_waitlock = False
1982

    
1983
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1984
    """Handles a log message.
1985

1986
    """
1987
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1988
             FormatLogMessage(log_type, log_msg))
1989

    
1990
  def ReportNotChanged(self, job_id, status):
1991
    """Called if a job hasn't changed in a while.
1992

1993
    """
1994
    if status is None:
1995
      return
1996

    
1997
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1998
      ToStderr("Job %s is waiting in queue", job_id)
1999
      self.notified_queued = True
2000

    
2001
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2002
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2003
      self.notified_waitlock = True
2004

    
2005

    
2006
def FormatLogMessage(log_type, log_msg):
2007
  """Formats a job message according to its type.
2008

2009
  """
2010
  if log_type != constants.ELOG_MESSAGE:
2011
    log_msg = str(log_msg)
2012

    
2013
  return utils.SafeEncode(log_msg)
2014

    
2015

    
2016
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2017
  """Function to poll for the result of a job.
2018

2019
  @type job_id: job identified
2020
  @param job_id: the job to poll for results
2021
  @type cl: luxi.Client
2022
  @param cl: the luxi client to use for communicating with the master;
2023
             if None, a new client will be created
2024

2025
  """
2026
  if cl is None:
2027
    cl = GetClient()
2028

    
2029
  if reporter is None:
2030
    if feedback_fn:
2031
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2032
    else:
2033
      reporter = StdioJobPollReportCb()
2034
  elif feedback_fn:
2035
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2036

    
2037
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2038

    
2039

    
2040
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2041
  """Legacy function to submit an opcode.
2042

2043
  This is just a simple wrapper over the construction of the processor
2044
  instance. It should be extended to better handle feedback and
2045
  interaction functions.
2046

2047
  """
2048
  if cl is None:
2049
    cl = GetClient()
2050

    
2051
  SetGenericOpcodeOpts([op], opts)
2052

    
2053
  job_id = SendJob([op], cl=cl)
2054

    
2055
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2056
                       reporter=reporter)
2057

    
2058
  return op_results[0]
2059

    
2060

    
2061
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2062
  """Wrapper around SubmitOpCode or SendJob.
2063

2064
  This function will decide, based on the 'opts' parameter, whether to
2065
  submit and wait for the result of the opcode (and return it), or
2066
  whether to just send the job and print its identifier. It is used in
2067
  order to simplify the implementation of the '--submit' option.
2068

2069
  It will also process the opcodes if we're sending the via SendJob
2070
  (otherwise SubmitOpCode does it).
2071

2072
  """
2073
  if opts and opts.submit_only:
2074
    job = [op]
2075
    SetGenericOpcodeOpts(job, opts)
2076
    job_id = SendJob(job, cl=cl)
2077
    raise JobSubmittedException(job_id)
2078
  else:
2079
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2080

    
2081

    
2082
def SetGenericOpcodeOpts(opcode_list, options):
2083
  """Processor for generic options.
2084

2085
  This function updates the given opcodes based on generic command
2086
  line options (like debug, dry-run, etc.).
2087

2088
  @param opcode_list: list of opcodes
2089
  @param options: command line options or None
2090
  @return: None (in-place modification)
2091

2092
  """
2093
  if not options:
2094
    return
2095
  for op in opcode_list:
2096
    op.debug_level = options.debug
2097
    if hasattr(options, "dry_run"):
2098
      op.dry_run = options.dry_run
2099
    if getattr(options, "priority", None) is not None:
2100
      op.priority = _PRIONAME_TO_VALUE[options.priority]
2101

    
2102

    
2103
def GetClient(query=False):
2104
  """Connects to the a luxi socket and returns a client.
2105

2106
  @type query: boolean
2107
  @param query: this signifies that the client will only be
2108
      used for queries; if the build-time parameter
2109
      enable-split-queries is enabled, then the client will be
2110
      connected to the query socket instead of the masterd socket
2111

2112
  """
2113
  if query and constants.ENABLE_SPLIT_QUERY:
2114
    address = pathutils.QUERY_SOCKET
2115
  else:
2116
    address = None
2117
  # TODO: Cache object?
2118
  try:
2119
    client = luxi.Client(address=address)
2120
  except luxi.NoMasterError:
2121
    ss = ssconf.SimpleStore()
2122

    
2123
    # Try to read ssconf file
2124
    try:
2125
      ss.GetMasterNode()
2126
    except errors.ConfigurationError:
2127
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2128
                                 " not part of a cluster",
2129
                                 errors.ECODE_INVAL)
2130

    
2131
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2132
    if master != myself:
2133
      raise errors.OpPrereqError("This is not the master node, please connect"
2134
                                 " to node '%s' and rerun the command" %
2135
                                 master, errors.ECODE_INVAL)
2136
    raise
2137
  return client
2138

    
2139

    
2140
def FormatError(err):
2141
  """Return a formatted error message for a given error.
2142

2143
  This function takes an exception instance and returns a tuple
2144
  consisting of two values: first, the recommended exit code, and
2145
  second, a string describing the error message (not
2146
  newline-terminated).
2147

2148
  """
2149
  retcode = 1
2150
  obuf = StringIO()
2151
  msg = str(err)
2152
  if isinstance(err, errors.ConfigurationError):
2153
    txt = "Corrupt configuration file: %s" % msg
2154
    logging.error(txt)
2155
    obuf.write(txt + "\n")
2156
    obuf.write("Aborting.")
2157
    retcode = 2
2158
  elif isinstance(err, errors.HooksAbort):
2159
    obuf.write("Failure: hooks execution failed:\n")
2160
    for node, script, out in err.args[0]:
2161
      if out:
2162
        obuf.write("  node: %s, script: %s, output: %s\n" %
2163
                   (node, script, out))
2164
      else:
2165
        obuf.write("  node: %s, script: %s (no output)\n" %
2166
                   (node, script))
2167
  elif isinstance(err, errors.HooksFailure):
2168
    obuf.write("Failure: hooks general failure: %s" % msg)
2169
  elif isinstance(err, errors.ResolverError):
2170
    this_host = netutils.Hostname.GetSysName()
2171
    if err.args[0] == this_host:
2172
      msg = "Failure: can't resolve my own hostname ('%s')"
2173
    else:
2174
      msg = "Failure: can't resolve hostname '%s'"
2175
    obuf.write(msg % err.args[0])
2176
  elif isinstance(err, errors.OpPrereqError):
2177
    if len(err.args) == 2:
2178
      obuf.write("Failure: prerequisites not met for this"
2179
                 " operation:\nerror type: %s, error details:\n%s" %
2180
                 (err.args[1], err.args[0]))
2181
    else:
2182
      obuf.write("Failure: prerequisites not met for this"
2183
                 " operation:\n%s" % msg)
2184
  elif isinstance(err, errors.OpExecError):
2185
    obuf.write("Failure: command execution error:\n%s" % msg)
2186
  elif isinstance(err, errors.TagError):
2187
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2188
  elif isinstance(err, errors.JobQueueDrainError):
2189
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2190
               " accept new requests\n")
2191
  elif isinstance(err, errors.JobQueueFull):
2192
    obuf.write("Failure: the job queue is full and doesn't accept new"
2193
               " job submissions until old jobs are archived\n")
2194
  elif isinstance(err, errors.TypeEnforcementError):
2195
    obuf.write("Parameter Error: %s" % msg)
2196
  elif isinstance(err, errors.ParameterError):
2197
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2198
  elif isinstance(err, luxi.NoMasterError):
2199
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
2200
               " and listening for connections?")
2201
  elif isinstance(err, luxi.TimeoutError):
2202
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2203
               " been submitted and will continue to run even if the call"
2204
               " timed out. Useful commands in this situation are \"gnt-job"
2205
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2206
    obuf.write(msg)
2207
  elif isinstance(err, luxi.PermissionError):
2208
    obuf.write("It seems you don't have permissions to connect to the"
2209
               " master daemon.\nPlease retry as a different user.")
2210
  elif isinstance(err, luxi.ProtocolError):
2211
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2212
               "%s" % msg)
2213
  elif isinstance(err, errors.JobLost):
2214
    obuf.write("Error checking job status: %s" % msg)
2215
  elif isinstance(err, errors.QueryFilterParseError):
2216
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2217
    obuf.write("\n".join(err.GetDetails()))
2218
  elif isinstance(err, errors.GenericError):
2219
    obuf.write("Unhandled Ganeti error: %s" % msg)
2220
  elif isinstance(err, JobSubmittedException):
2221
    obuf.write("JobID: %s\n" % err.args[0])
2222
    retcode = 0
2223
  else:
2224
    obuf.write("Unhandled exception: %s" % msg)
2225
  return retcode, obuf.getvalue().rstrip("\n")
2226

    
2227

    
2228
def GenericMain(commands, override=None, aliases=None,
2229
                env_override=frozenset()):
2230
  """Generic main function for all the gnt-* commands.
2231

2232
  @param commands: a dictionary with a special structure, see the design doc
2233
                   for command line handling.
2234
  @param override: if not None, we expect a dictionary with keys that will
2235
                   override command line options; this can be used to pass
2236
                   options from the scripts to generic functions
2237
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2238
  @param env_override: list of environment names which are allowed to submit
2239
                       default args for commands
2240

2241
  """
2242
  # save the program name and the entire command line for later logging
2243
  if sys.argv:
2244
    binary = os.path.basename(sys.argv[0])
2245
    if not binary:
2246
      binary = sys.argv[0]
2247

    
2248
    if len(sys.argv) >= 2:
2249
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2250
    else:
2251
      logname = binary
2252

    
2253
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2254
  else:
2255
    binary = "<unknown program>"
2256
    cmdline = "<unknown>"
2257

    
2258
  if aliases is None:
2259
    aliases = {}
2260

    
2261
  try:
2262
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2263
                                       env_override)
2264
  except _ShowVersion:
2265
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2266
             constants.RELEASE_VERSION)
2267
    return constants.EXIT_SUCCESS
2268
  except _ShowUsage, err:
2269
    for line in _FormatUsage(binary, commands):
2270
      ToStdout(line)
2271

    
2272
    if err.exit_error:
2273
      return constants.EXIT_FAILURE
2274
    else:
2275
      return constants.EXIT_SUCCESS
2276
  except errors.ParameterError, err:
2277
    result, err_msg = FormatError(err)
2278
    ToStderr(err_msg)
2279
    return 1
2280

    
2281
  if func is None: # parse error
2282
    return 1
2283

    
2284
  if override is not None:
2285
    for key, val in override.iteritems():
2286
      setattr(options, key, val)
2287

    
2288
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2289
                     stderr_logging=True)
2290

    
2291
  logging.info("Command line: %s", cmdline)
2292

    
2293
  try:
2294
    result = func(options, args)
2295
  except (errors.GenericError, luxi.ProtocolError,
2296
          JobSubmittedException), err:
2297
    result, err_msg = FormatError(err)
2298
    logging.exception("Error during command processing")
2299
    ToStderr(err_msg)
2300
  except KeyboardInterrupt:
2301
    result = constants.EXIT_FAILURE
2302
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2303
             " might have been submitted and"
2304
             " will continue to run in the background.")
2305
  except IOError, err:
2306
    if err.errno == errno.EPIPE:
2307
      # our terminal went away, we'll exit
2308
      sys.exit(constants.EXIT_FAILURE)
2309
    else:
2310
      raise
2311

    
2312
  return result
2313

    
2314

    
2315
def ParseNicOption(optvalue):
2316
  """Parses the value of the --net option(s).
2317

2318
  """
2319
  try:
2320
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2321
  except (TypeError, ValueError), err:
2322
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2323
                               errors.ECODE_INVAL)
2324

    
2325
  nics = [{}] * nic_max
2326
  for nidx, ndict in optvalue:
2327
    nidx = int(nidx)
2328

    
2329
    if not isinstance(ndict, dict):
2330
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2331
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2332

    
2333
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2334

    
2335
    nics[nidx] = ndict
2336

    
2337
  return nics
2338

    
2339

    
2340
def GenericInstanceCreate(mode, opts, args):
2341
  """Add an instance to the cluster via either creation or import.
2342

2343
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2344
  @param opts: the command line options selected by the user
2345
  @type args: list
2346
  @param args: should contain only one element, the new instance name
2347
  @rtype: int
2348
  @return: the desired exit code
2349

2350
  """
2351
  instance = args[0]
2352

    
2353
  (pnode, snode) = SplitNodeOption(opts.node)
2354

    
2355
  hypervisor = None
2356
  hvparams = {}
2357
  if opts.hypervisor:
2358
    hypervisor, hvparams = opts.hypervisor
2359

    
2360
  if opts.nics:
2361
    nics = ParseNicOption(opts.nics)
2362
  elif opts.no_nics:
2363
    # no nics
2364
    nics = []
2365
  elif mode == constants.INSTANCE_CREATE:
2366
    # default of one nic, all auto
2367
    nics = [{}]
2368
  else:
2369
    # mode == import
2370
    nics = []
2371

    
2372
  if opts.disk_template == constants.DT_DISKLESS:
2373
    if opts.disks or opts.sd_size is not None:
2374
      raise errors.OpPrereqError("Diskless instance but disk"
2375
                                 " information passed", errors.ECODE_INVAL)
2376
    disks = []
2377
  else:
2378
    if (not opts.disks and not opts.sd_size
2379
        and mode == constants.INSTANCE_CREATE):
2380
      raise errors.OpPrereqError("No disk information specified",
2381
                                 errors.ECODE_INVAL)
2382
    if opts.disks and opts.sd_size is not None:
2383
      raise errors.OpPrereqError("Please use either the '--disk' or"
2384
                                 " '-s' option", errors.ECODE_INVAL)
2385
    if opts.sd_size is not None:
2386
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2387

    
2388
    if opts.disks:
2389
      try:
2390
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2391
      except ValueError, err:
2392
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2393
                                   errors.ECODE_INVAL)
2394
      disks = [{}] * disk_max
2395
    else:
2396
      disks = []
2397
    for didx, ddict in opts.disks:
2398
      didx = int(didx)
2399
      if not isinstance(ddict, dict):
2400
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2401
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2402
      elif constants.IDISK_SIZE in ddict:
2403
        if constants.IDISK_ADOPT in ddict:
2404
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2405
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2406
        try:
2407
          ddict[constants.IDISK_SIZE] = \
2408
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2409
        except ValueError, err:
2410
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2411
                                     (didx, err), errors.ECODE_INVAL)
2412
      elif constants.IDISK_ADOPT in ddict:
2413
        if mode == constants.INSTANCE_IMPORT:
2414
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2415
                                     " import", errors.ECODE_INVAL)
2416
        ddict[constants.IDISK_SIZE] = 0
2417
      else:
2418
        raise errors.OpPrereqError("Missing size or adoption source for"
2419
                                   " disk %d" % didx, errors.ECODE_INVAL)
2420
      disks[didx] = ddict
2421

    
2422
  if opts.tags is not None:
2423
    tags = opts.tags.split(",")
2424
  else:
2425
    tags = []
2426

    
2427
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2428
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2429

    
2430
  if mode == constants.INSTANCE_CREATE:
2431
    start = opts.start
2432
    os_type = opts.os
2433
    force_variant = opts.force_variant
2434
    src_node = None
2435
    src_path = None
2436
    no_install = opts.no_install
2437
    identify_defaults = False
2438
  elif mode == constants.INSTANCE_IMPORT:
2439
    start = False
2440
    os_type = None
2441
    force_variant = False
2442
    src_node = opts.src_node
2443
    src_path = opts.src_dir
2444
    no_install = None
2445
    identify_defaults = opts.identify_defaults
2446
  else:
2447
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2448

    
2449
  op = opcodes.OpInstanceCreate(instance_name=instance,
2450
                                disks=disks,
2451
                                disk_template=opts.disk_template,
2452
                                nics=nics,
2453
                                pnode=pnode, snode=snode,
2454
                                ip_check=opts.ip_check,
2455
                                name_check=opts.name_check,
2456
                                wait_for_sync=opts.wait_for_sync,
2457
                                file_storage_dir=opts.file_storage_dir,
2458
                                file_driver=opts.file_driver,
2459
                                iallocator=opts.iallocator,
2460
                                hypervisor=hypervisor,
2461
                                hvparams=hvparams,
2462
                                beparams=opts.beparams,
2463
                                osparams=opts.osparams,
2464
                                mode=mode,
2465
                                start=start,
2466
                                os_type=os_type,
2467
                                force_variant=force_variant,
2468
                                src_node=src_node,
2469
                                src_path=src_path,
2470
                                tags=tags,
2471
                                no_install=no_install,
2472
                                identify_defaults=identify_defaults,
2473
                                ignore_ipolicy=opts.ignore_ipolicy)
2474

    
2475
  SubmitOrSend(op, opts)
2476
  return 0
2477

    
2478

    
2479
class _RunWhileClusterStoppedHelper:
2480
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2481

2482
  """
2483
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2484
    """Initializes this class.
2485

2486
    @type feedback_fn: callable
2487
    @param feedback_fn: Feedback function
2488
    @type cluster_name: string
2489
    @param cluster_name: Cluster name
2490
    @type master_node: string
2491
    @param master_node Master node name
2492
    @type online_nodes: list
2493
    @param online_nodes: List of names of online nodes
2494

2495
    """
2496
    self.feedback_fn = feedback_fn
2497
    self.cluster_name = cluster_name
2498
    self.master_node = master_node
2499
    self.online_nodes = online_nodes
2500

    
2501
    self.ssh = ssh.SshRunner(self.cluster_name)
2502

    
2503
    self.nonmaster_nodes = [name for name in online_nodes
2504
                            if name != master_node]
2505

    
2506
    assert self.master_node not in self.nonmaster_nodes
2507

    
2508
  def _RunCmd(self, node_name, cmd):
2509
    """Runs a command on the local or a remote machine.
2510

2511
    @type node_name: string
2512
    @param node_name: Machine name
2513
    @type cmd: list
2514
    @param cmd: Command
2515

2516
    """
2517
    if node_name is None or node_name == self.master_node:
2518
      # No need to use SSH
2519
      result = utils.RunCmd(cmd)
2520
    else:
2521
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2522
                            utils.ShellQuoteArgs(cmd))
2523

    
2524
    if result.failed:
2525
      errmsg = ["Failed to run command %s" % result.cmd]
2526
      if node_name:
2527
        errmsg.append("on node %s" % node_name)
2528
      errmsg.append(": exitcode %s and error %s" %
2529
                    (result.exit_code, result.output))
2530
      raise errors.OpExecError(" ".join(errmsg))
2531

    
2532
  def Call(self, fn, *args):
2533
    """Call function while all daemons are stopped.
2534

2535
    @type fn: callable
2536
    @param fn: Function to be called
2537

2538
    """
2539
    # Pause watcher by acquiring an exclusive lock on watcher state file
2540
    self.feedback_fn("Blocking watcher")
2541
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2542
    try:
2543
      # TODO: Currently, this just blocks. There's no timeout.
2544
      # TODO: Should it be a shared lock?
2545
      watcher_block.Exclusive(blocking=True)
2546

    
2547
      # Stop master daemons, so that no new jobs can come in and all running
2548
      # ones are finished
2549
      self.feedback_fn("Stopping master daemons")
2550
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2551
      try:
2552
        # Stop daemons on all nodes
2553
        for node_name in self.online_nodes:
2554
          self.feedback_fn("Stopping daemons on %s" % node_name)
2555
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2556

    
2557
        # All daemons are shut down now
2558
        try:
2559
          return fn(self, *args)
2560
        except Exception, err:
2561
          _, errmsg = FormatError(err)
2562
          logging.exception("Caught exception")
2563
          self.feedback_fn(errmsg)
2564
          raise
2565
      finally:
2566
        # Start cluster again, master node last
2567
        for node_name in self.nonmaster_nodes + [self.master_node]:
2568
          self.feedback_fn("Starting daemons on %s" % node_name)
2569
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2570
    finally:
2571
      # Resume watcher
2572
      watcher_block.Close()
2573

    
2574

    
2575
def RunWhileClusterStopped(feedback_fn, fn, *args):
2576
  """Calls a function while all cluster daemons are stopped.
2577

2578
  @type feedback_fn: callable
2579
  @param feedback_fn: Feedback function
2580
  @type fn: callable
2581
  @param fn: Function to be called when daemons are stopped
2582

2583
  """
2584
  feedback_fn("Gathering cluster information")
2585

    
2586
  # This ensures we're running on the master daemon
2587
  cl = GetClient()
2588

    
2589
  (cluster_name, master_node) = \
2590
    cl.QueryConfigValues(["cluster_name", "master_node"])
2591

    
2592
  online_nodes = GetOnlineNodes([], cl=cl)
2593

    
2594
  # Don't keep a reference to the client. The master daemon will go away.
2595
  del cl
2596

    
2597
  assert master_node in online_nodes
2598

    
2599
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2600
                                       online_nodes).Call(fn, *args)
2601

    
2602

    
2603
def GenerateTable(headers, fields, separator, data,
2604
                  numfields=None, unitfields=None,
2605
                  units=None):
2606
  """Prints a table with headers and different fields.
2607

2608
  @type headers: dict
2609
  @param headers: dictionary mapping field names to headers for
2610
      the table
2611
  @type fields: list
2612
  @param fields: the field names corresponding to each row in
2613
      the data field
2614
  @param separator: the separator to be used; if this is None,
2615
      the default 'smart' algorithm is used which computes optimal
2616
      field width, otherwise just the separator is used between
2617
      each field
2618
  @type data: list
2619
  @param data: a list of lists, each sublist being one row to be output
2620
  @type numfields: list
2621
  @param numfields: a list with the fields that hold numeric
2622
      values and thus should be right-aligned
2623
  @type unitfields: list
2624
  @param unitfields: a list with the fields that hold numeric
2625
      values that should be formatted with the units field
2626
  @type units: string or None
2627
  @param units: the units we should use for formatting, or None for
2628
      automatic choice (human-readable for non-separator usage, otherwise
2629
      megabytes); this is a one-letter string
2630

2631
  """
2632
  if units is None:
2633
    if separator:
2634
      units = "m"
2635
    else:
2636
      units = "h"
2637

    
2638
  if numfields is None:
2639
    numfields = []
2640
  if unitfields is None:
2641
    unitfields = []
2642

    
2643
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2644
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2645

    
2646
  format_fields = []
2647
  for field in fields:
2648
    if headers and field not in headers:
2649
      # TODO: handle better unknown fields (either revert to old
2650
      # style of raising exception, or deal more intelligently with
2651
      # variable fields)
2652
      headers[field] = field
2653
    if separator is not None:
2654
      format_fields.append("%s")
2655
    elif numfields.Matches(field):
2656
      format_fields.append("%*s")
2657
    else:
2658
      format_fields.append("%-*s")
2659

    
2660
  if separator is None:
2661
    mlens = [0 for name in fields]
2662
    format_str = " ".join(format_fields)
2663
  else:
2664
    format_str = separator.replace("%", "%%").join(format_fields)
2665

    
2666
  for row in data:
2667
    if row is None:
2668
      continue
2669
    for idx, val in enumerate(row):
2670
      if unitfields.Matches(fields[idx]):
2671
        try:
2672
          val = int(val)
2673
        except (TypeError, ValueError):
2674
          pass
2675
        else:
2676
          val = row[idx] = utils.FormatUnit(val, units)
2677
      val = row[idx] = str(val)
2678
      if separator is None:
2679
        mlens[idx] = max(mlens[idx], len(val))
2680

    
2681
  result = []
2682
  if headers:
2683
    args = []
2684
    for idx, name in enumerate(fields):
2685
      hdr = headers[name]
2686
      if separator is None:
2687
        mlens[idx] = max(mlens[idx], len(hdr))
2688
        args.append(mlens[idx])
2689
      args.append(hdr)
2690
    result.append(format_str % tuple(args))
2691

    
2692
  if separator is None:
2693
    assert len(mlens) == len(fields)
2694

    
2695
    if fields and not numfields.Matches(fields[-1]):
2696
      mlens[-1] = 0
2697

    
2698
  for line in data:
2699
    args = []
2700
    if line is None:
2701
      line = ["-" for _ in fields]
2702
    for idx in range(len(fields)):
2703
      if separator is None:
2704
        args.append(mlens[idx])
2705
      args.append(line[idx])
2706
    result.append(format_str % tuple(args))
2707

    
2708
  return result
2709

    
2710

    
2711
def _FormatBool(value):
2712
  """Formats a boolean value as a string.
2713

2714
  """
2715
  if value:
2716
    return "Y"
2717
  return "N"
2718

    
2719

    
2720
#: Default formatting for query results; (callback, align right)
2721
_DEFAULT_FORMAT_QUERY = {
2722
  constants.QFT_TEXT: (str, False),
2723
  constants.QFT_BOOL: (_FormatBool, False),
2724
  constants.QFT_NUMBER: (str, True),
2725
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2726
  constants.QFT_OTHER: (str, False),
2727
  constants.QFT_UNKNOWN: (str, False),
2728
  }
2729

    
2730

    
2731
def _GetColumnFormatter(fdef, override, unit):
2732
  """Returns formatting function for a field.
2733

2734
  @type fdef: L{objects.QueryFieldDefinition}
2735
  @type override: dict
2736
  @param override: Dictionary for overriding field formatting functions,
2737
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2738
  @type unit: string
2739
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2740
  @rtype: tuple; (callable, bool)
2741
  @return: Returns the function to format a value (takes one parameter) and a
2742
    boolean for aligning the value on the right-hand side
2743

2744
  """
2745
  fmt = override.get(fdef.name, None)
2746
  if fmt is not None:
2747
    return fmt
2748

    
2749
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2750

    
2751
  if fdef.kind == constants.QFT_UNIT:
2752
    # Can't keep this information in the static dictionary
2753
    return (lambda value: utils.FormatUnit(value, unit), True)
2754

    
2755
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2756
  if fmt is not None:
2757
    return fmt
2758

    
2759
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2760

    
2761

    
2762
class _QueryColumnFormatter:
2763
  """Callable class for formatting fields of a query.
2764

2765
  """
2766
  def __init__(self, fn, status_fn, verbose):
2767
    """Initializes this class.
2768

2769
    @type fn: callable
2770
    @param fn: Formatting function
2771
    @type status_fn: callable
2772
    @param status_fn: Function to report fields' status
2773
    @type verbose: boolean
2774
    @param verbose: whether to use verbose field descriptions or not
2775

2776
    """
2777
    self._fn = fn
2778
    self._status_fn = status_fn
2779
    self._verbose = verbose
2780

    
2781
  def __call__(self, data):
2782
    """Returns a field's string representation.
2783

2784
    """
2785
    (status, value) = data
2786

    
2787
    # Report status
2788
    self._status_fn(status)
2789

    
2790
    if status == constants.RS_NORMAL:
2791
      return self._fn(value)
2792

    
2793
    assert value is None, \
2794
           "Found value %r for abnormal status %s" % (value, status)
2795

    
2796
    return FormatResultError(status, self._verbose)
2797

    
2798

    
2799
def FormatResultError(status, verbose):
2800
  """Formats result status other than L{constants.RS_NORMAL}.
2801

2802
  @param status: The result status
2803
  @type verbose: boolean
2804
  @param verbose: Whether to return the verbose text
2805
  @return: Text of result status
2806

2807
  """
2808
  assert status != constants.RS_NORMAL, \
2809
         "FormatResultError called with status equal to constants.RS_NORMAL"
2810
  try:
2811
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2812
  except KeyError:
2813
    raise NotImplementedError("Unknown status %s" % status)
2814
  else:
2815
    if verbose:
2816
      return verbose_text
2817
    return normal_text
2818

    
2819

    
2820
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2821
                      header=False, verbose=False):
2822
  """Formats data in L{objects.QueryResponse}.
2823

2824
  @type result: L{objects.QueryResponse}
2825
  @param result: result of query operation
2826
  @type unit: string
2827
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2828
    see L{utils.text.FormatUnit}
2829
  @type format_override: dict
2830
  @param format_override: Dictionary for overriding field formatting functions,
2831
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2832
  @type separator: string or None
2833
  @param separator: String used to separate fields
2834
  @type header: bool
2835
  @param header: Whether to output header row
2836
  @type verbose: boolean
2837
  @param verbose: whether to use verbose field descriptions or not
2838

2839
  """
2840
  if unit is None:
2841
    if separator:
2842
      unit = "m"
2843
    else:
2844
      unit = "h"
2845

    
2846
  if format_override is None:
2847
    format_override = {}
2848

    
2849
  stats = dict.fromkeys(constants.RS_ALL, 0)
2850

    
2851
  def _RecordStatus(status):
2852
    if status in stats:
2853
      stats[status] += 1
2854

    
2855
  columns = []
2856
  for fdef in result.fields:
2857
    assert fdef.title and fdef.name
2858
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2859
    columns.append(TableColumn(fdef.title,
2860
                               _QueryColumnFormatter(fn, _RecordStatus,
2861
                                                     verbose),
2862
                               align_right))
2863

    
2864
  table = FormatTable(result.data, columns, header, separator)
2865

    
2866
  # Collect statistics
2867
  assert len(stats) == len(constants.RS_ALL)
2868
  assert compat.all(count >= 0 for count in stats.values())
2869

    
2870
  # Determine overall status. If there was no data, unknown fields must be
2871
  # detected via the field definitions.
2872
  if (stats[constants.RS_UNKNOWN] or
2873
      (not result.data and _GetUnknownFields(result.fields))):
2874
    status = QR_UNKNOWN
2875
  elif compat.any(count > 0 for key, count in stats.items()
2876
                  if key != constants.RS_NORMAL):
2877
    status = QR_INCOMPLETE
2878
  else:
2879
    status = QR_NORMAL
2880

    
2881
  return (status, table)
2882

    
2883

    
2884
def _GetUnknownFields(fdefs):
2885
  """Returns list of unknown fields included in C{fdefs}.
2886

2887
  @type fdefs: list of L{objects.QueryFieldDefinition}
2888

2889
  """
2890
  return [fdef for fdef in fdefs
2891
          if fdef.kind == constants.QFT_UNKNOWN]
2892

    
2893

    
2894
def _WarnUnknownFields(fdefs):
2895
  """Prints a warning to stderr if a query included unknown fields.
2896

2897
  @type fdefs: list of L{objects.QueryFieldDefinition}
2898

2899
  """
2900
  unknown = _GetUnknownFields(fdefs)
2901
  if unknown:
2902
    ToStderr("Warning: Queried for unknown fields %s",
2903
             utils.CommaJoin(fdef.name for fdef in unknown))
2904
    return True
2905

    
2906
  return False
2907

    
2908

    
2909
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2910
                format_override=None, verbose=False, force_filter=False,
2911
                namefield=None, qfilter=None, isnumeric=False):
2912
  """Generic implementation for listing all items of a resource.
2913

2914
  @param resource: One of L{constants.QR_VIA_LUXI}
2915
  @type fields: list of strings
2916
  @param fields: List of fields to query for
2917
  @type names: list of strings
2918
  @param names: Names of items to query for
2919
  @type unit: string or None
2920
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2921
    None for automatic choice (human-readable for non-separator usage,
2922
    otherwise megabytes); this is a one-letter string
2923
  @type separator: string or None
2924
  @param separator: String used to separate fields
2925
  @type header: bool
2926
  @param header: Whether to show header row
2927
  @type force_filter: bool
2928
  @param force_filter: Whether to always treat names as filter
2929
  @type format_override: dict
2930
  @param format_override: Dictionary for overriding field formatting functions,
2931
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2932
  @type verbose: boolean
2933
  @param verbose: whether to use verbose field descriptions or not
2934
  @type namefield: string
2935
  @param namefield: Name of field to use for simple filters (see
2936
    L{qlang.MakeFilter} for details)
2937
  @type qfilter: list or None
2938
  @param qfilter: Query filter (in addition to names)
2939
  @param isnumeric: bool
2940
  @param isnumeric: Whether the namefield's type is numeric, and therefore
2941
    any simple filters built by namefield should use integer values to
2942
    reflect that
2943

2944
  """
2945
  if not names:
2946
    names = None
2947

    
2948
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
2949
                                isnumeric=isnumeric)
2950

    
2951
  if qfilter is None:
2952
    qfilter = namefilter
2953
  elif namefilter is not None:
2954
    qfilter = [qlang.OP_AND, namefilter, qfilter]
2955

    
2956
  if cl is None:
2957
    cl = GetClient()
2958

    
2959
  response = cl.Query(resource, fields, qfilter)
2960

    
2961
  found_unknown = _WarnUnknownFields(response.fields)
2962

    
2963
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2964
                                     header=header,
2965
                                     format_override=format_override,
2966
                                     verbose=verbose)
2967

    
2968
  for line in data:
2969
    ToStdout(line)
2970

    
2971
  assert ((found_unknown and status == QR_UNKNOWN) or
2972
          (not found_unknown and status != QR_UNKNOWN))
2973

    
2974
  if status == QR_UNKNOWN:
2975
    return constants.EXIT_UNKNOWN_FIELD
2976

    
2977
  # TODO: Should the list command fail if not all data could be collected?
2978
  return constants.EXIT_SUCCESS
2979

    
2980

    
2981
def GenericListFields(resource, fields, separator, header, cl=None):
2982
  """Generic implementation for listing fields for a resource.
2983

2984
  @param resource: One of L{constants.QR_VIA_LUXI}
2985
  @type fields: list of strings
2986
  @param fields: List of fields to query for
2987
  @type separator: string or None
2988
  @param separator: String used to separate fields
2989
  @type header: bool
2990
  @param header: Whether to show header row
2991

2992
  """
2993
  if cl is None:
2994
    cl = GetClient()
2995

    
2996
  if not fields:
2997
    fields = None
2998

    
2999
  response = cl.QueryFields(resource, fields)
3000

    
3001
  found_unknown = _WarnUnknownFields(response.fields)
3002

    
3003
  columns = [
3004
    TableColumn("Name", str, False),
3005
    TableColumn("Title", str, False),
3006
    TableColumn("Description", str, False),
3007
    ]
3008

    
3009
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
3010

    
3011
  for line in FormatTable(rows, columns, header, separator):
3012
    ToStdout(line)
3013

    
3014
  if found_unknown:
3015
    return constants.EXIT_UNKNOWN_FIELD
3016

    
3017
  return constants.EXIT_SUCCESS
3018

    
3019

    
3020
class TableColumn:
3021
  """Describes a column for L{FormatTable}.
3022

3023
  """
3024
  def __init__(self, title, fn, align_right):
3025
    """Initializes this class.
3026

3027
    @type title: string
3028
    @param title: Column title
3029
    @type fn: callable
3030
    @param fn: Formatting function
3031
    @type align_right: bool
3032
    @param align_right: Whether to align values on the right-hand side
3033

3034
    """
3035
    self.title = title
3036
    self.format = fn
3037
    self.align_right = align_right
3038

    
3039

    
3040
def _GetColFormatString(width, align_right):
3041
  """Returns the format string for a field.
3042

3043
  """
3044
  if align_right:
3045
    sign = ""
3046
  else:
3047
    sign = "-"
3048

    
3049
  return "%%%s%ss" % (sign, width)
3050

    
3051

    
3052
def FormatTable(rows, columns, header, separator):
3053
  """Formats data as a table.
3054

3055
  @type rows: list of lists
3056
  @param rows: Row data, one list per row
3057
  @type columns: list of L{TableColumn}
3058
  @param columns: Column descriptions
3059
  @type header: bool
3060
  @param header: Whether to show header row
3061
  @type separator: string or None
3062
  @param separator: String used to separate columns
3063

3064
  """
3065
  if header:
3066
    data = [[col.title for col in columns]]
3067
    colwidth = [len(col.title) for col in columns]
3068
  else:
3069
    data = []
3070
    colwidth = [0 for _ in columns]
3071

    
3072
  # Format row data
3073
  for row in rows:
3074
    assert len(row) == len(columns)
3075

    
3076
    formatted = [col.format(value) for value, col in zip(row, columns)]
3077

    
3078
    if separator is None:
3079
      # Update column widths
3080
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3081
        # Modifying a list's items while iterating is fine
3082
        colwidth[idx] = max(oldwidth, len(value))
3083

    
3084
    data.append(formatted)
3085

    
3086
  if separator is not None:
3087
    # Return early if a separator is used
3088
    return [separator.join(row) for row in data]
3089

    
3090
  if columns and not columns[-1].align_right:
3091
    # Avoid unnecessary spaces at end of line
3092
    colwidth[-1] = 0
3093

    
3094
  # Build format string
3095
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3096
                  for col, width in zip(columns, colwidth)])
3097

    
3098
  return [fmt % tuple(row) for row in data]
3099

    
3100

    
3101
def FormatTimestamp(ts):
3102
  """Formats a given timestamp.
3103

3104
  @type ts: timestamp
3105
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3106

3107
  @rtype: string
3108
  @return: a string with the formatted timestamp
3109

3110
  """
3111
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3112
    return "?"
3113

    
3114
  (sec, usecs) = ts
3115
  return utils.FormatTime(sec, usecs=usecs)
3116

    
3117

    
3118
def ParseTimespec(value):
3119
  """Parse a time specification.
3120

3121
  The following suffixed will be recognized:
3122

3123
    - s: seconds
3124
    - m: minutes
3125
    - h: hours
3126
    - d: day
3127
    - w: weeks
3128

3129
  Without any suffix, the value will be taken to be in seconds.
3130

3131
  """
3132
  value = str(value)
3133
  if not value:
3134
    raise errors.OpPrereqError("Empty time specification passed",
3135
                               errors.ECODE_INVAL)
3136
  suffix_map = {
3137
    "s": 1,
3138
    "m": 60,
3139
    "h": 3600,
3140
    "d": 86400,
3141
    "w": 604800,
3142
    }
3143
  if value[-1] not in suffix_map:
3144
    try:
3145
      value = int(value)
3146
    except (TypeError, ValueError):
3147
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3148
                                 errors.ECODE_INVAL)
3149
  else:
3150
    multiplier = suffix_map[value[-1]]
3151
    value = value[:-1]
3152
    if not value: # no data left after stripping the suffix
3153
      raise errors.OpPrereqError("Invalid time specification (only"
3154
                                 " suffix passed)", errors.ECODE_INVAL)
3155
    try:
3156
      value = int(value) * multiplier
3157
    except (TypeError, ValueError):
3158
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3159
                                 errors.ECODE_INVAL)
3160
  return value
3161

    
3162

    
3163
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3164
                   filter_master=False, nodegroup=None):
3165
  """Returns the names of online nodes.
3166

3167
  This function will also log a warning on stderr with the names of
3168
  the online nodes.
3169

3170
  @param nodes: if not empty, use only this subset of nodes (minus the
3171
      offline ones)
3172
  @param cl: if not None, luxi client to use
3173
  @type nowarn: boolean
3174
  @param nowarn: by default, this function will output a note with the
3175
      offline nodes that are skipped; if this parameter is True the
3176
      note is not displayed
3177
  @type secondary_ips: boolean
3178
  @param secondary_ips: if True, return the secondary IPs instead of the
3179
      names, useful for doing network traffic over the replication interface
3180
      (if any)
3181
  @type filter_master: boolean
3182
  @param filter_master: if True, do not return the master node in the list
3183
      (useful in coordination with secondary_ips where we cannot check our
3184
      node name against the list)
3185
  @type nodegroup: string
3186
  @param nodegroup: If set, only return nodes in this node group
3187

3188
  """
3189
  if cl is None:
3190
    cl = GetClient()
3191

    
3192
  qfilter = []
3193

    
3194
  if nodes:
3195
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3196

    
3197
  if nodegroup is not None:
3198
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3199
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3200

    
3201
  if filter_master:
3202
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3203

    
3204
  if qfilter:
3205
    if len(qfilter) > 1:
3206
      final_filter = [qlang.OP_AND] + qfilter
3207
    else:
3208
      assert len(qfilter) == 1
3209
      final_filter = qfilter[0]
3210
  else:
3211
    final_filter = None
3212

    
3213
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3214

    
3215
  def _IsOffline(row):
3216
    (_, (_, offline), _) = row
3217
    return offline
3218

    
3219
  def _GetName(row):
3220
    ((_, name), _, _) = row
3221
    return name
3222

    
3223
  def _GetSip(row):
3224
    (_, _, (_, sip)) = row
3225
    return sip
3226

    
3227
  (offline, online) = compat.partition(result.data, _IsOffline)
3228

    
3229
  if offline and not nowarn:
3230
    ToStderr("Note: skipping offline node(s): %s" %
3231
             utils.CommaJoin(map(_GetName, offline)))
3232

    
3233
  if secondary_ips:
3234
    fn = _GetSip
3235
  else:
3236
    fn = _GetName
3237

    
3238
  return map(fn, online)
3239

    
3240

    
3241
def _ToStream(stream, txt, *args):
3242
  """Write a message to a stream, bypassing the logging system
3243

3244
  @type stream: file object
3245
  @param stream: the file to which we should write
3246
  @type txt: str
3247
  @param txt: the message
3248

3249
  """
3250
  try:
3251
    if args:
3252
      args = tuple(args)
3253
      stream.write(txt % args)
3254
    else:
3255
      stream.write(txt)
3256
    stream.write("\n")
3257
    stream.flush()
3258
  except IOError, err:
3259
    if err.errno == errno.EPIPE:
3260
      # our terminal went away, we'll exit
3261
      sys.exit(constants.EXIT_FAILURE)
3262
    else:
3263
      raise
3264

    
3265

    
3266
def ToStdout(txt, *args):
3267
  """Write a message to stdout only, bypassing the logging system
3268

3269
  This is just a wrapper over _ToStream.
3270

3271
  @type txt: str
3272
  @param txt: the message
3273

3274
  """
3275
  _ToStream(sys.stdout, txt, *args)
3276

    
3277

    
3278
def ToStderr(txt, *args):
3279
  """Write a message to stderr only, bypassing the logging system
3280

3281
  This is just a wrapper over _ToStream.
3282

3283
  @type txt: str
3284
  @param txt: the message
3285

3286
  """
3287
  _ToStream(sys.stderr, txt, *args)
3288

    
3289

    
3290
class JobExecutor(object):
3291
  """Class which manages the submission and execution of multiple jobs.
3292

3293
  Note that instances of this class should not be reused between
3294
  GetResults() calls.
3295

3296
  """
3297
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3298
    self.queue = []
3299
    if cl is None:
3300
      cl = GetClient()
3301
    self.cl = cl
3302
    self.verbose = verbose
3303
    self.jobs = []
3304
    self.opts = opts
3305
    self.feedback_fn = feedback_fn
3306
    self._counter = itertools.count()
3307

    
3308
  @staticmethod
3309
  def _IfName(name, fmt):
3310
    """Helper function for formatting name.
3311

3312
    """
3313
    if name:
3314
      return fmt % name
3315

    
3316
    return ""
3317

    
3318
  def QueueJob(self, name, *ops):
3319
    """Record a job for later submit.
3320

3321
    @type name: string
3322
    @param name: a description of the job, will be used in WaitJobSet
3323

3324
    """
3325
    SetGenericOpcodeOpts(ops, self.opts)
3326
    self.queue.append((self._counter.next(), name, ops))
3327

    
3328
  def AddJobId(self, name, status, job_id):
3329
    """Adds a job ID to the internal queue.
3330

3331
    """
3332
    self.jobs.append((self._counter.next(), status, job_id, name))
3333

    
3334
  def SubmitPending(self, each=False):
3335
    """Submit all pending jobs.
3336

3337
    """
3338
    if each:
3339
      results = []
3340
      for (_, _, ops) in self.queue:
3341
        # SubmitJob will remove the success status, but raise an exception if
3342
        # the submission fails, so we'll notice that anyway.
3343
        results.append([True, self.cl.SubmitJob(ops)[0]])
3344
    else:
3345
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3346
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3347
      self.jobs.append((idx, status, data, name))
3348

    
3349
  def _ChooseJob(self):
3350
    """Choose a non-waiting/queued job to poll next.
3351

3352
    """
3353
    assert self.jobs, "_ChooseJob called with empty job list"
3354

    
3355
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3356
                               ["status"])
3357
    assert result
3358

    
3359
    for job_data, status in zip(self.jobs, result):
3360
      if (isinstance(status, list) and status and
3361
          status[0] in (constants.JOB_STATUS_QUEUED,
3362
                        constants.JOB_STATUS_WAITING,
3363
                        constants.JOB_STATUS_CANCELING)):
3364
        # job is still present and waiting
3365
        continue
3366
      # good candidate found (either running job or lost job)
3367
      self.jobs.remove(job_data)
3368
      return job_data
3369

    
3370
    # no job found
3371
    return self.jobs.pop(0)
3372

    
3373
  def GetResults(self):
3374
    """Wait for and return the results of all jobs.
3375

3376
    @rtype: list
3377
    @return: list of tuples (success, job results), in the same order
3378
        as the submitted jobs; if a job has failed, instead of the result
3379
        there will be the error message
3380

3381
    """
3382
    if not self.jobs:
3383
      self.SubmitPending()
3384
    results = []
3385
    if self.verbose:
3386
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3387
      if ok_jobs:
3388
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3389

    
3390
    # first, remove any non-submitted jobs
3391
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3392
    for idx, _, jid, name in failures:
3393
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3394
      results.append((idx, False, jid))
3395

    
3396
    while self.jobs:
3397
      (idx, _, jid, name) = self._ChooseJob()
3398
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3399
      try:
3400
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3401
        success = True
3402
      except errors.JobLost, err:
3403
        _, job_result = FormatError(err)
3404
        ToStderr("Job %s%s has been archived, cannot check its result",
3405
                 jid, self._IfName(name, " for %s"))
3406
        success = False
3407
      except (errors.GenericError, luxi.ProtocolError), err:
3408
        _, job_result = FormatError(err)
3409
        success = False
3410
        # the error message will always be shown, verbose or not
3411
        ToStderr("Job %s%s has failed: %s",
3412
                 jid, self._IfName(name, " for %s"), job_result)
3413

    
3414
      results.append((idx, success, job_result))
3415

    
3416
    # sort based on the index, then drop it
3417
    results.sort()
3418
    results = [i[1:] for i in results]
3419

    
3420
    return results
3421

    
3422
  def WaitOrShow(self, wait):
3423
    """Wait for job results or only print the job IDs.
3424

3425
    @type wait: boolean
3426
    @param wait: whether to wait or not
3427

3428
    """
3429
    if wait:
3430
      return self.GetResults()
3431
    else:
3432
      if not self.jobs:
3433
        self.SubmitPending()
3434
      for _, status, result, name in self.jobs:
3435
        if status:
3436
          ToStdout("%s: %s", result, name)
3437
        else:
3438
          ToStderr("Failure for %s: %s", name, result)
3439
      return [row[1:3] for row in self.jobs]
3440

    
3441

    
3442
def FormatParameterDict(buf, param_dict, actual, level=1):
3443
  """Formats a parameter dictionary.
3444

3445
  @type buf: L{StringIO}
3446
  @param buf: the buffer into which to write
3447
  @type param_dict: dict
3448
  @param param_dict: the own parameters
3449
  @type actual: dict
3450
  @param actual: the current parameter set (including defaults)
3451
  @param level: Level of indent
3452

3453
  """
3454
  indent = "  " * level
3455

    
3456
  for key in sorted(actual):
3457
    data = actual[key]
3458
    buf.write("%s- %s:" % (indent, key))
3459

    
3460
    if isinstance(data, dict) and data:
3461
      buf.write("\n")
3462
      FormatParameterDict(buf, param_dict.get(key, {}), data,
3463
                          level=level + 1)
3464
    else:
3465
      val = param_dict.get(key, "default (%s)" % data)
3466
      buf.write(" %s\n" % val)
3467

    
3468

    
3469
def ConfirmOperation(names, list_type, text, extra=""):
3470
  """Ask the user to confirm an operation on a list of list_type.
3471

3472
  This function is used to request confirmation for doing an operation
3473
  on a given list of list_type.
3474

3475
  @type names: list
3476
  @param names: the list of names that we display when
3477
      we ask for confirmation
3478
  @type list_type: str
3479
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3480
  @type text: str
3481
  @param text: the operation that the user should confirm
3482
  @rtype: boolean
3483
  @return: True or False depending on user's confirmation.
3484

3485
  """
3486
  count = len(names)
3487
  msg = ("The %s will operate on %d %s.\n%s"
3488
         "Do you want to continue?" % (text, count, list_type, extra))
3489
  affected = (("\nAffected %s:\n" % list_type) +
3490
              "\n".join(["  %s" % name for name in names]))
3491

    
3492
  choices = [("y", True, "Yes, execute the %s" % text),
3493
             ("n", False, "No, abort the %s" % text)]
3494

    
3495
  if count > 20:
3496
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3497
    question = msg
3498
  else:
3499
    question = msg + affected
3500

    
3501
  choice = AskUser(question, choices)
3502
  if choice == "v":
3503
    choices.pop(1)
3504
    choice = AskUser(msg + affected, choices)
3505
  return choice
3506

    
3507

    
3508
def _MaybeParseUnit(elements):
3509
  """Parses and returns an array of potential values with units.
3510

3511
  """
3512
  parsed = {}
3513
  for k, v in elements.items():
3514
    if v == constants.VALUE_DEFAULT:
3515
      parsed[k] = v
3516
    else:
3517
      parsed[k] = utils.ParseUnit(v)
3518
  return parsed
3519

    
3520

    
3521
def CreateIPolicyFromOpts(ispecs_mem_size=None,
3522
                          ispecs_cpu_count=None,
3523
                          ispecs_disk_count=None,
3524
                          ispecs_disk_size=None,
3525
                          ispecs_nic_count=None,
3526
                          ipolicy_disk_templates=None,
3527
                          ipolicy_vcpu_ratio=None,
3528
                          ipolicy_spindle_ratio=None,
3529
                          group_ipolicy=False,
3530
                          allowed_values=None,
3531
                          fill_all=False):
3532
  """Creation of instance policy based on command line options.
3533

3534
  @param fill_all: whether for cluster policies we should ensure that
3535
    all values are filled
3536

3537

3538
  """
3539
  try:
3540
    if ispecs_mem_size:
3541
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3542
    if ispecs_disk_size:
3543
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3544
  except (TypeError, ValueError, errors.UnitParseError), err:
3545
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3546
                               " in policy: %s" %
3547
                               (ispecs_disk_size, ispecs_mem_size, err),
3548
                               errors.ECODE_INVAL)
3549

    
3550
  # prepare ipolicy dict
3551
  ipolicy_transposed = {
3552
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3553
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3554
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3555
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3556
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3557
    }
3558

    
3559
  # first, check that the values given are correct
3560
  if group_ipolicy:
3561
    forced_type = TISPECS_GROUP_TYPES
3562
  else:
3563
    forced_type = TISPECS_CLUSTER_TYPES
3564

    
3565
  for specs in ipolicy_transposed.values():
3566
    utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3567

    
3568
  # then transpose
3569
  ipolicy_out = objects.MakeEmptyIPolicy()
3570
  for name, specs in ipolicy_transposed.iteritems():
3571
    assert name in constants.ISPECS_PARAMETERS
3572
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3573
      ipolicy_out[key][name] = val
3574

    
3575
  # no filldict for non-dicts
3576
  if not group_ipolicy and fill_all:
3577
    if ipolicy_disk_templates is None:
3578
      ipolicy_disk_templates = constants.DISK_TEMPLATES
3579
    if ipolicy_vcpu_ratio is None:
3580
      ipolicy_vcpu_ratio = \
3581
        constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
3582
    if ipolicy_spindle_ratio is None:
3583
      ipolicy_spindle_ratio = \
3584
        constants.IPOLICY_DEFAULTS[constants.IPOLICY_SPINDLE_RATIO]
3585
  if ipolicy_disk_templates is not None:
3586
    ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3587
  if ipolicy_vcpu_ratio is not None:
3588
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3589
  if ipolicy_spindle_ratio is not None:
3590
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
3591

    
3592
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
3593

    
3594
  return ipolicy_out