Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ bcd35e09

History | View | Annotate | Download (118.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ALLOCATABLE_OPT",
58
  "ALLOC_POLICY_OPT",
59
  "ALL_OPT",
60
  "ALLOW_FAILOVER_OPT",
61
  "AUTO_PROMOTE_OPT",
62
  "AUTO_REPLACE_OPT",
63
  "BACKEND_OPT",
64
  "BLK_OS_OPT",
65
  "CAPAB_MASTER_OPT",
66
  "CAPAB_VM_OPT",
67
  "CLEANUP_OPT",
68
  "CLUSTER_DOMAIN_SECRET_OPT",
69
  "CONFIRM_OPT",
70
  "CP_SIZE_OPT",
71
  "DEBUG_OPT",
72
  "DEBUG_SIMERR_OPT",
73
  "DISKIDX_OPT",
74
  "DISK_OPT",
75
  "DISK_PARAMS_OPT",
76
  "DISK_TEMPLATE_OPT",
77
  "DRAINED_OPT",
78
  "DRY_RUN_OPT",
79
  "DRBD_HELPER_OPT",
80
  "DST_NODE_OPT",
81
  "EARLY_RELEASE_OPT",
82
  "ENABLED_HV_OPT",
83
  "ERROR_CODES_OPT",
84
  "FIELDS_OPT",
85
  "FILESTORE_DIR_OPT",
86
  "FILESTORE_DRIVER_OPT",
87
  "FORCE_FILTER_OPT",
88
  "FORCE_OPT",
89
  "FORCE_VARIANT_OPT",
90
  "GLOBAL_FILEDIR_OPT",
91
  "HID_OS_OPT",
92
  "GLOBAL_SHARED_FILEDIR_OPT",
93
  "HVLIST_OPT",
94
  "HVOPTS_OPT",
95
  "HYPERVISOR_OPT",
96
  "IALLOCATOR_OPT",
97
  "DEFAULT_IALLOCATOR_OPT",
98
  "IDENTIFY_DEFAULTS_OPT",
99
  "IGNORE_CONSIST_OPT",
100
  "IGNORE_ERRORS_OPT",
101
  "IGNORE_FAILURES_OPT",
102
  "IGNORE_OFFLINE_OPT",
103
  "IGNORE_REMOVE_FAILURES_OPT",
104
  "IGNORE_SECONDARIES_OPT",
105
  "IGNORE_SIZE_OPT",
106
  "INTERVAL_OPT",
107
  "MAC_PREFIX_OPT",
108
  "MAINTAIN_NODE_HEALTH_OPT",
109
  "MASTER_NETDEV_OPT",
110
  "MASTER_NETMASK_OPT",
111
  "MC_OPT",
112
  "MIGRATION_MODE_OPT",
113
  "NET_OPT",
114
  "NEW_CLUSTER_CERT_OPT",
115
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
116
  "NEW_CONFD_HMAC_KEY_OPT",
117
  "NEW_RAPI_CERT_OPT",
118
  "NEW_SECONDARY_OPT",
119
  "NEW_SPICE_CERT_OPT",
120
  "NIC_PARAMS_OPT",
121
  "NODE_FORCE_JOIN_OPT",
122
  "NODE_LIST_OPT",
123
  "NODE_PLACEMENT_OPT",
124
  "NODEGROUP_OPT",
125
  "NODE_PARAMS_OPT",
126
  "NODE_POWERED_OPT",
127
  "NODRBD_STORAGE_OPT",
128
  "NOHDR_OPT",
129
  "NOIPCHECK_OPT",
130
  "NO_INSTALL_OPT",
131
  "NONAMECHECK_OPT",
132
  "NOLVM_STORAGE_OPT",
133
  "NOMODIFY_ETCHOSTS_OPT",
134
  "NOMODIFY_SSH_SETUP_OPT",
135
  "NONICS_OPT",
136
  "NONLIVE_OPT",
137
  "NONPLUS1_OPT",
138
  "NORUNTIME_CHGS_OPT",
139
  "NOSHUTDOWN_OPT",
140
  "NOSTART_OPT",
141
  "NOSSH_KEYCHECK_OPT",
142
  "NOVOTING_OPT",
143
  "NO_REMEMBER_OPT",
144
  "NWSYNC_OPT",
145
  "OFFLINE_INST_OPT",
146
  "ONLINE_INST_OPT",
147
  "ON_PRIMARY_OPT",
148
  "ON_SECONDARY_OPT",
149
  "OFFLINE_OPT",
150
  "OSPARAMS_OPT",
151
  "OS_OPT",
152
  "OS_SIZE_OPT",
153
  "OOB_TIMEOUT_OPT",
154
  "POWER_DELAY_OPT",
155
  "PREALLOC_WIPE_DISKS_OPT",
156
  "PRIMARY_IP_VERSION_OPT",
157
  "PRIMARY_ONLY_OPT",
158
  "PRIORITY_OPT",
159
  "RAPI_CERT_OPT",
160
  "READD_OPT",
161
  "REBOOT_TYPE_OPT",
162
  "REMOVE_INSTANCE_OPT",
163
  "REMOVE_UIDS_OPT",
164
  "RESERVED_LVS_OPT",
165
  "RUNTIME_MEM_OPT",
166
  "ROMAN_OPT",
167
  "SECONDARY_IP_OPT",
168
  "SECONDARY_ONLY_OPT",
169
  "SELECT_OS_OPT",
170
  "SEP_OPT",
171
  "SHOWCMD_OPT",
172
  "SHUTDOWN_TIMEOUT_OPT",
173
  "SINGLE_NODE_OPT",
174
  "SPECS_CPU_COUNT_OPT",
175
  "SPECS_DISK_COUNT_OPT",
176
  "SPECS_DISK_SIZE_OPT",
177
  "SPECS_MEM_SIZE_OPT",
178
  "SPECS_NIC_COUNT_OPT",
179
  "IPOLICY_DISK_TEMPLATES",
180
  "IPOLICY_VCPU_RATIO",
181
  "SPICE_CACERT_OPT",
182
  "SPICE_CERT_OPT",
183
  "SRC_DIR_OPT",
184
  "SRC_NODE_OPT",
185
  "SUBMIT_OPT",
186
  "STARTUP_PAUSED_OPT",
187
  "STATIC_OPT",
188
  "SYNC_OPT",
189
  "TAG_ADD_OPT",
190
  "TAG_SRC_OPT",
191
  "TIMEOUT_OPT",
192
  "TO_GROUP_OPT",
193
  "UIDPOOL_OPT",
194
  "USEUNITS_OPT",
195
  "USE_EXTERNAL_MIP_SCRIPT",
196
  "USE_REPL_NET_OPT",
197
  "VERBOSE_OPT",
198
  "VG_NAME_OPT",
199
  "WFSYNC_OPT",
200
  "YES_DOIT_OPT",
201
  "DISK_STATE_OPT",
202
  "HV_STATE_OPT",
203
  "IGNORE_IPOLICY_OPT",
204
  "INSTANCE_POLICY_OPTS",
205
  # Generic functions for CLI programs
206
  "ConfirmOperation",
207
  "CreateIPolicyFromOpts",
208
  "GenericMain",
209
  "GenericInstanceCreate",
210
  "GenericList",
211
  "GenericListFields",
212
  "GetClient",
213
  "GetOnlineNodes",
214
  "JobExecutor",
215
  "JobSubmittedException",
216
  "ParseTimespec",
217
  "RunWhileClusterStopped",
218
  "SubmitOpCode",
219
  "SubmitOrSend",
220
  "UsesRPC",
221
  # Formatting functions
222
  "ToStderr", "ToStdout",
223
  "FormatError",
224
  "FormatQueryResult",
225
  "FormatParameterDict",
226
  "GenerateTable",
227
  "AskUser",
228
  "FormatTimestamp",
229
  "FormatLogMessage",
230
  # Tags functions
231
  "ListTags",
232
  "AddTags",
233
  "RemoveTags",
234
  # command line options support infrastructure
235
  "ARGS_MANY_INSTANCES",
236
  "ARGS_MANY_NODES",
237
  "ARGS_MANY_GROUPS",
238
  "ARGS_NONE",
239
  "ARGS_ONE_INSTANCE",
240
  "ARGS_ONE_NODE",
241
  "ARGS_ONE_GROUP",
242
  "ARGS_ONE_OS",
243
  "ArgChoice",
244
  "ArgCommand",
245
  "ArgFile",
246
  "ArgGroup",
247
  "ArgHost",
248
  "ArgInstance",
249
  "ArgJobId",
250
  "ArgNode",
251
  "ArgOs",
252
  "ArgSuggest",
253
  "ArgUnknown",
254
  "OPT_COMPL_INST_ADD_NODES",
255
  "OPT_COMPL_MANY_NODES",
256
  "OPT_COMPL_ONE_IALLOCATOR",
257
  "OPT_COMPL_ONE_INSTANCE",
258
  "OPT_COMPL_ONE_NODE",
259
  "OPT_COMPL_ONE_NODEGROUP",
260
  "OPT_COMPL_ONE_OS",
261
  "cli_option",
262
  "SplitNodeOption",
263
  "CalculateOSNames",
264
  "ParseFields",
265
  "COMMON_CREATE_OPTS",
266
  ]
267

    
268
NO_PREFIX = "no_"
269
UN_PREFIX = "-"
270

    
271
#: Priorities (sorted)
272
_PRIORITY_NAMES = [
273
  ("low", constants.OP_PRIO_LOW),
274
  ("normal", constants.OP_PRIO_NORMAL),
275
  ("high", constants.OP_PRIO_HIGH),
276
  ]
277

    
278
#: Priority dictionary for easier lookup
279
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
280
# we migrate to Python 2.6
281
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
282

    
283
# Query result status for clients
284
(QR_NORMAL,
285
 QR_UNKNOWN,
286
 QR_INCOMPLETE) = range(3)
287

    
288
#: Maximum batch size for ChooseJob
289
_CHOOSE_BATCH = 25
290

    
291

    
292
# constants used to create InstancePolicy dictionary
293
TISPECS_GROUP_TYPES = {
294
  constants.ISPECS_MIN: constants.VTYPE_INT,
295
  constants.ISPECS_MAX: constants.VTYPE_INT,
296
  }
297

    
298
TISPECS_CLUSTER_TYPES = {
299
  constants.ISPECS_MIN: constants.VTYPE_INT,
300
  constants.ISPECS_MAX: constants.VTYPE_INT,
301
  constants.ISPECS_STD: constants.VTYPE_INT,
302
  }
303

    
304

    
305
class _Argument:
306
  def __init__(self, min=0, max=None): # pylint: disable=W0622
307
    self.min = min
308
    self.max = max
309

    
310
  def __repr__(self):
311
    return ("<%s min=%s max=%s>" %
312
            (self.__class__.__name__, self.min, self.max))
313

    
314

    
315
class ArgSuggest(_Argument):
316
  """Suggesting argument.
317

318
  Value can be any of the ones passed to the constructor.
319

320
  """
321
  # pylint: disable=W0622
322
  def __init__(self, min=0, max=None, choices=None):
323
    _Argument.__init__(self, min=min, max=max)
324
    self.choices = choices
325

    
326
  def __repr__(self):
327
    return ("<%s min=%s max=%s choices=%r>" %
328
            (self.__class__.__name__, self.min, self.max, self.choices))
329

    
330

    
331
class ArgChoice(ArgSuggest):
332
  """Choice argument.
333

334
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
335
  but value must be one of the choices.
336

337
  """
338

    
339

    
340
class ArgUnknown(_Argument):
341
  """Unknown argument to program (e.g. determined at runtime).
342

343
  """
344

    
345

    
346
class ArgInstance(_Argument):
347
  """Instances argument.
348

349
  """
350

    
351

    
352
class ArgNode(_Argument):
353
  """Node argument.
354

355
  """
356

    
357

    
358
class ArgGroup(_Argument):
359
  """Node group argument.
360

361
  """
362

    
363

    
364
class ArgJobId(_Argument):
365
  """Job ID argument.
366

367
  """
368

    
369

    
370
class ArgFile(_Argument):
371
  """File path argument.
372

373
  """
374

    
375

    
376
class ArgCommand(_Argument):
377
  """Command argument.
378

379
  """
380

    
381

    
382
class ArgHost(_Argument):
383
  """Host argument.
384

385
  """
386

    
387

    
388
class ArgOs(_Argument):
389
  """OS argument.
390

391
  """
392

    
393

    
394
ARGS_NONE = []
395
ARGS_MANY_INSTANCES = [ArgInstance()]
396
ARGS_MANY_NODES = [ArgNode()]
397
ARGS_MANY_GROUPS = [ArgGroup()]
398
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
399
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
400
# TODO
401
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
402
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
403

    
404

    
405
def _ExtractTagsObject(opts, args):
406
  """Extract the tag type object.
407

408
  Note that this function will modify its args parameter.
409

410
  """
411
  if not hasattr(opts, "tag_type"):
412
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
413
  kind = opts.tag_type
414
  if kind == constants.TAG_CLUSTER:
415
    retval = kind, None
416
  elif kind in (constants.TAG_NODEGROUP,
417
                constants.TAG_NODE,
418
                constants.TAG_INSTANCE):
419
    if not args:
420
      raise errors.OpPrereqError("no arguments passed to the command",
421
                                 errors.ECODE_INVAL)
422
    name = args.pop(0)
423
    retval = kind, name
424
  else:
425
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
426
  return retval
427

    
428

    
429
def _ExtendTags(opts, args):
430
  """Extend the args if a source file has been given.
431

432
  This function will extend the tags with the contents of the file
433
  passed in the 'tags_source' attribute of the opts parameter. A file
434
  named '-' will be replaced by stdin.
435

436
  """
437
  fname = opts.tags_source
438
  if fname is None:
439
    return
440
  if fname == "-":
441
    new_fh = sys.stdin
442
  else:
443
    new_fh = open(fname, "r")
444
  new_data = []
445
  try:
446
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
447
    # because of python bug 1633941
448
    while True:
449
      line = new_fh.readline()
450
      if not line:
451
        break
452
      new_data.append(line.strip())
453
  finally:
454
    new_fh.close()
455
  args.extend(new_data)
456

    
457

    
458
def ListTags(opts, args):
459
  """List the tags on a given object.
460

461
  This is a generic implementation that knows how to deal with all
462
  three cases of tag objects (cluster, node, instance). The opts
463
  argument is expected to contain a tag_type field denoting what
464
  object type we work on.
465

466
  """
467
  kind, name = _ExtractTagsObject(opts, args)
468
  cl = GetClient(query=True)
469
  result = cl.QueryTags(kind, name)
470
  result = list(result)
471
  result.sort()
472
  for tag in result:
473
    ToStdout(tag)
474

    
475

    
476
def AddTags(opts, args):
477
  """Add tags on a given object.
478

479
  This is a generic implementation that knows how to deal with all
480
  three cases of tag objects (cluster, node, instance). The opts
481
  argument is expected to contain a tag_type field denoting what
482
  object type we work on.
483

484
  """
485
  kind, name = _ExtractTagsObject(opts, args)
486
  _ExtendTags(opts, args)
487
  if not args:
488
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
489
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
490
  SubmitOrSend(op, opts)
491

    
492

    
493
def RemoveTags(opts, args):
494
  """Remove tags from a given object.
495

496
  This is a generic implementation that knows how to deal with all
497
  three cases of tag objects (cluster, node, instance). The opts
498
  argument is expected to contain a tag_type field denoting what
499
  object type we work on.
500

501
  """
502
  kind, name = _ExtractTagsObject(opts, args)
503
  _ExtendTags(opts, args)
504
  if not args:
505
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
506
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
507
  SubmitOrSend(op, opts)
508

    
509

    
510
def check_unit(option, opt, value): # pylint: disable=W0613
511
  """OptParsers custom converter for units.
512

513
  """
514
  try:
515
    return utils.ParseUnit(value)
516
  except errors.UnitParseError, err:
517
    raise OptionValueError("option %s: %s" % (opt, err))
518

    
519

    
520
def _SplitKeyVal(opt, data):
521
  """Convert a KeyVal string into a dict.
522

523
  This function will convert a key=val[,...] string into a dict. Empty
524
  values will be converted specially: keys which have the prefix 'no_'
525
  will have the value=False and the prefix stripped, the others will
526
  have value=True.
527

528
  @type opt: string
529
  @param opt: a string holding the option name for which we process the
530
      data, used in building error messages
531
  @type data: string
532
  @param data: a string of the format key=val,key=val,...
533
  @rtype: dict
534
  @return: {key=val, key=val}
535
  @raises errors.ParameterError: if there are duplicate keys
536

537
  """
538
  kv_dict = {}
539
  if data:
540
    for elem in utils.UnescapeAndSplit(data, sep=","):
541
      if "=" in elem:
542
        key, val = elem.split("=", 1)
543
      else:
544
        if elem.startswith(NO_PREFIX):
545
          key, val = elem[len(NO_PREFIX):], False
546
        elif elem.startswith(UN_PREFIX):
547
          key, val = elem[len(UN_PREFIX):], None
548
        else:
549
          key, val = elem, True
550
      if key in kv_dict:
551
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
552
                                    (key, opt))
553
      kv_dict[key] = val
554
  return kv_dict
555

    
556

    
557
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
558
  """Custom parser for ident:key=val,key=val options.
559

560
  This will store the parsed values as a tuple (ident, {key: val}). As such,
561
  multiple uses of this option via action=append is possible.
562

563
  """
564
  if ":" not in value:
565
    ident, rest = value, ""
566
  else:
567
    ident, rest = value.split(":", 1)
568

    
569
  if ident.startswith(NO_PREFIX):
570
    if rest:
571
      msg = "Cannot pass options when removing parameter groups: %s" % value
572
      raise errors.ParameterError(msg)
573
    retval = (ident[len(NO_PREFIX):], False)
574
  elif (ident.startswith(UN_PREFIX) and
575
        (len(ident) <= len(UN_PREFIX) or
576
         not ident[len(UN_PREFIX)][0].isdigit())):
577
    if rest:
578
      msg = "Cannot pass options when removing parameter groups: %s" % value
579
      raise errors.ParameterError(msg)
580
    retval = (ident[len(UN_PREFIX):], None)
581
  else:
582
    kv_dict = _SplitKeyVal(opt, rest)
583
    retval = (ident, kv_dict)
584
  return retval
585

    
586

    
587
def check_key_val(option, opt, value):  # pylint: disable=W0613
588
  """Custom parser class for key=val,key=val options.
589

590
  This will store the parsed values as a dict {key: val}.
591

592
  """
593
  return _SplitKeyVal(opt, value)
594

    
595

    
596
def check_bool(option, opt, value): # pylint: disable=W0613
597
  """Custom parser for yes/no options.
598

599
  This will store the parsed value as either True or False.
600

601
  """
602
  value = value.lower()
603
  if value == constants.VALUE_FALSE or value == "no":
604
    return False
605
  elif value == constants.VALUE_TRUE or value == "yes":
606
    return True
607
  else:
608
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
609

    
610

    
611
def check_list(option, opt, value): # pylint: disable=W0613
612
  """Custom parser for comma-separated lists.
613

614
  """
615
  # we have to make this explicit check since "".split(",") is [""],
616
  # not an empty list :(
617
  if not value:
618
    return []
619
  else:
620
    return utils.UnescapeAndSplit(value)
621

    
622

    
623
def check_maybefloat(option, opt, value): # pylint: disable=W0613
624
  """Custom parser for float numbers which might be also defaults.
625

626
  """
627
  value = value.lower()
628

    
629
  if value == constants.VALUE_DEFAULT:
630
    return value
631
  else:
632
    return float(value)
633

    
634

    
635
# completion_suggestion is normally a list. Using numeric values not evaluating
636
# to False for dynamic completion.
637
(OPT_COMPL_MANY_NODES,
638
 OPT_COMPL_ONE_NODE,
639
 OPT_COMPL_ONE_INSTANCE,
640
 OPT_COMPL_ONE_OS,
641
 OPT_COMPL_ONE_IALLOCATOR,
642
 OPT_COMPL_INST_ADD_NODES,
643
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
644

    
645
OPT_COMPL_ALL = frozenset([
646
  OPT_COMPL_MANY_NODES,
647
  OPT_COMPL_ONE_NODE,
648
  OPT_COMPL_ONE_INSTANCE,
649
  OPT_COMPL_ONE_OS,
650
  OPT_COMPL_ONE_IALLOCATOR,
651
  OPT_COMPL_INST_ADD_NODES,
652
  OPT_COMPL_ONE_NODEGROUP,
653
  ])
654

    
655

    
656
class CliOption(Option):
657
  """Custom option class for optparse.
658

659
  """
660
  ATTRS = Option.ATTRS + [
661
    "completion_suggest",
662
    ]
663
  TYPES = Option.TYPES + (
664
    "identkeyval",
665
    "keyval",
666
    "unit",
667
    "bool",
668
    "list",
669
    "maybefloat",
670
    )
671
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
672
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
673
  TYPE_CHECKER["keyval"] = check_key_val
674
  TYPE_CHECKER["unit"] = check_unit
675
  TYPE_CHECKER["bool"] = check_bool
676
  TYPE_CHECKER["list"] = check_list
677
  TYPE_CHECKER["maybefloat"] = check_maybefloat
678

    
679

    
680
# optparse.py sets make_option, so we do it for our own option class, too
681
cli_option = CliOption
682

    
683

    
684
_YORNO = "yes|no"
685

    
686
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
687
                       help="Increase debugging level")
688

    
689
NOHDR_OPT = cli_option("--no-headers", default=False,
690
                       action="store_true", dest="no_headers",
691
                       help="Don't display column headers")
692

    
693
SEP_OPT = cli_option("--separator", default=None,
694
                     action="store", dest="separator",
695
                     help=("Separator between output fields"
696
                           " (defaults to one space)"))
697

    
698
USEUNITS_OPT = cli_option("--units", default=None,
699
                          dest="units", choices=("h", "m", "g", "t"),
700
                          help="Specify units for output (one of h/m/g/t)")
701

    
702
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
703
                        type="string", metavar="FIELDS",
704
                        help="Comma separated list of output fields")
705

    
706
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
707
                       default=False, help="Force the operation")
708

    
709
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
710
                         default=False, help="Do not require confirmation")
711

    
712
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
713
                                  action="store_true", default=False,
714
                                  help=("Ignore offline nodes and do as much"
715
                                        " as possible"))
716

    
717
TAG_ADD_OPT = cli_option("--tags", dest="tags",
718
                         default=None, help="Comma-separated list of instance"
719
                                            " tags")
720

    
721
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
722
                         default=None, help="File with tag names")
723

    
724
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
725
                        default=False, action="store_true",
726
                        help=("Submit the job and return the job ID, but"
727
                              " don't wait for the job to finish"))
728

    
729
SYNC_OPT = cli_option("--sync", dest="do_locking",
730
                      default=False, action="store_true",
731
                      help=("Grab locks while doing the queries"
732
                            " in order to ensure more consistent results"))
733

    
734
DRY_RUN_OPT = cli_option("--dry-run", default=False,
735
                         action="store_true",
736
                         help=("Do not execute the operation, just run the"
737
                               " check steps and verify if it could be"
738
                               " executed"))
739

    
740
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
741
                         action="store_true",
742
                         help="Increase the verbosity of the operation")
743

    
744
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
745
                              action="store_true", dest="simulate_errors",
746
                              help="Debugging option that makes the operation"
747
                              " treat most runtime checks as failed")
748

    
749
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
750
                        default=True, action="store_false",
751
                        help="Don't wait for sync (DANGEROUS!)")
752

    
753
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
754
                        default=False, action="store_true",
755
                        help="Wait for disks to sync")
756

    
757
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
758
                             action="store_true", default=False,
759
                             help="Enable offline instance")
760

    
761
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
762
                              action="store_true", default=False,
763
                              help="Disable down instance")
764

    
765
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
766
                               help=("Custom disk setup (%s)" %
767
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
768
                               default=None, metavar="TEMPL",
769
                               choices=list(constants.DISK_TEMPLATES))
770

    
771
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
772
                        help="Do not create any network cards for"
773
                        " the instance")
774

    
775
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
776
                               help="Relative path under default cluster-wide"
777
                               " file storage dir to store file-based disks",
778
                               default=None, metavar="<DIR>")
779

    
780
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
781
                                  help="Driver to use for image files",
782
                                  default="loop", metavar="<DRIVER>",
783
                                  choices=list(constants.FILE_DRIVER))
784

    
785
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
786
                            help="Select nodes for the instance automatically"
787
                            " using the <NAME> iallocator plugin",
788
                            default=None, type="string",
789
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
790

    
791
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
792
                                    metavar="<NAME>",
793
                                    help="Set the default instance"
794
                                    " allocator plugin",
795
                                    default=None, type="string",
796
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
797

    
798
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
799
                    metavar="<os>",
800
                    completion_suggest=OPT_COMPL_ONE_OS)
801

    
802
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
803
                          type="keyval", default={},
804
                          help="OS parameters")
805

    
806
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
807
                               action="store_true", default=False,
808
                               help="Force an unknown variant")
809

    
810
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
811
                            action="store_true", default=False,
812
                            help="Do not install the OS (will"
813
                            " enable no-start)")
814

    
815
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
816
                                dest="allow_runtime_chgs",
817
                                default=True, action="store_false",
818
                                help="Don't allow runtime changes")
819

    
820
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
821
                         type="keyval", default={},
822
                         help="Backend parameters")
823

    
824
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
825
                        default={}, dest="hvparams",
826
                        help="Hypervisor parameters")
827

    
828
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
829
                             help="Disk template parameters, in the format"
830
                             " template:option=value,option=value,...",
831
                             type="identkeyval", action="append", default=[])
832

    
833
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
834
                                 type="keyval", default={},
835
                                 help="Memory size specs: list of key=value,"
836
                                " where key is one of min, max, std"
837
                                 " (in MB or using a unit)")
838

    
839
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
840
                                 type="keyval", default={},
841
                                 help="CPU count specs: list of key=value,"
842
                                 " where key is one of min, max, std")
843

    
844
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
845
                                  dest="ispecs_disk_count",
846
                                  type="keyval", default={},
847
                                  help="Disk count specs: list of key=value,"
848
                                  " where key is one of min, max, std")
849

    
850
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
851
                                 type="keyval", default={},
852
                                 help="Disk size specs: list of key=value,"
853
                                 " where key is one of min, max, std"
854
                                 " (in MB or using a unit)")
855

    
856
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
857
                                 type="keyval", default={},
858
                                 help="NIC count specs: list of key=value,"
859
                                 " where key is one of min, max, std")
860

    
861
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
862
                                    dest="ipolicy_disk_templates",
863
                                    type="list", default=None,
864
                                    help="Comma-separated list of"
865
                                    " enabled disk templates")
866

    
867
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
868
                                 dest="ipolicy_vcpu_ratio",
869
                                 type="maybefloat", default=None,
870
                                 help="The maximum allowed vcpu-to-cpu ratio")
871

    
872
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
873
                                   dest="ipolicy_spindle_ratio",
874
                                   type="maybefloat", default=None,
875
                                   help=("The maximum allowed instances to"
876
                                         " spindle ratio"))
877

    
878
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
879
                            help="Hypervisor and hypervisor options, in the"
880
                            " format hypervisor:option=value,option=value,...",
881
                            default=None, type="identkeyval")
882

    
883
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
884
                        help="Hypervisor and hypervisor options, in the"
885
                        " format hypervisor:option=value,option=value,...",
886
                        default=[], action="append", type="identkeyval")
887

    
888
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
889
                           action="store_false",
890
                           help="Don't check that the instance's IP"
891
                           " is alive")
892

    
893
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
894
                             default=True, action="store_false",
895
                             help="Don't check that the instance's name"
896
                             " is resolvable")
897

    
898
NET_OPT = cli_option("--net",
899
                     help="NIC parameters", default=[],
900
                     dest="nics", action="append", type="identkeyval")
901

    
902
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
903
                      dest="disks", action="append", type="identkeyval")
904

    
905
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
906
                         help="Comma-separated list of disks"
907
                         " indices to act on (e.g. 0,2) (optional,"
908
                         " defaults to all disks)")
909

    
910
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
911
                         help="Enforces a single-disk configuration using the"
912
                         " given disk size, in MiB unless a suffix is used",
913
                         default=None, type="unit", metavar="<size>")
914

    
915
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
916
                                dest="ignore_consistency",
917
                                action="store_true", default=False,
918
                                help="Ignore the consistency of the disks on"
919
                                " the secondary")
920

    
921
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
922
                                dest="allow_failover",
923
                                action="store_true", default=False,
924
                                help="If migration is not possible fallback to"
925
                                     " failover")
926

    
927
NONLIVE_OPT = cli_option("--non-live", dest="live",
928
                         default=True, action="store_false",
929
                         help="Do a non-live migration (this usually means"
930
                         " freeze the instance, save the state, transfer and"
931
                         " only then resume running on the secondary node)")
932

    
933
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
934
                                default=None,
935
                                choices=list(constants.HT_MIGRATION_MODES),
936
                                help="Override default migration mode (choose"
937
                                " either live or non-live")
938

    
939
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
940
                                help="Target node and optional secondary node",
941
                                metavar="<pnode>[:<snode>]",
942
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
943

    
944
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
945
                           action="append", metavar="<node>",
946
                           help="Use only this node (can be used multiple"
947
                           " times, if not given defaults to all nodes)",
948
                           completion_suggest=OPT_COMPL_ONE_NODE)
949

    
950
NODEGROUP_OPT_NAME = "--node-group"
951
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
952
                           dest="nodegroup",
953
                           help="Node group (name or uuid)",
954
                           metavar="<nodegroup>",
955
                           default=None, type="string",
956
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
957

    
958
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
959
                             metavar="<node>",
960
                             completion_suggest=OPT_COMPL_ONE_NODE)
961

    
962
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
963
                         action="store_false",
964
                         help="Don't start the instance after creation")
965

    
966
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
967
                         action="store_true", default=False,
968
                         help="Show command instead of executing it")
969

    
970
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
971
                         default=False, action="store_true",
972
                         help="Instead of performing the migration, try to"
973
                         " recover from a failed cleanup. This is safe"
974
                         " to run even if the instance is healthy, but it"
975
                         " will create extra replication traffic and "
976
                         " disrupt briefly the replication (like during the"
977
                         " migration")
978

    
979
STATIC_OPT = cli_option("-s", "--static", dest="static",
980
                        action="store_true", default=False,
981
                        help="Only show configuration data, not runtime data")
982

    
983
ALL_OPT = cli_option("--all", dest="show_all",
984
                     default=False, action="store_true",
985
                     help="Show info on all instances on the cluster."
986
                     " This can take a long time to run, use wisely")
987

    
988
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
989
                           action="store_true", default=False,
990
                           help="Interactive OS reinstall, lists available"
991
                           " OS templates for selection")
992

    
993
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
994
                                 action="store_true", default=False,
995
                                 help="Remove the instance from the cluster"
996
                                 " configuration even if there are failures"
997
                                 " during the removal process")
998

    
999
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1000
                                        dest="ignore_remove_failures",
1001
                                        action="store_true", default=False,
1002
                                        help="Remove the instance from the"
1003
                                        " cluster configuration even if there"
1004
                                        " are failures during the removal"
1005
                                        " process")
1006

    
1007
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1008
                                 action="store_true", default=False,
1009
                                 help="Remove the instance from the cluster")
1010

    
1011
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1012
                               help="Specifies the new node for the instance",
1013
                               metavar="NODE", default=None,
1014
                               completion_suggest=OPT_COMPL_ONE_NODE)
1015

    
1016
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1017
                               help="Specifies the new secondary node",
1018
                               metavar="NODE", default=None,
1019
                               completion_suggest=OPT_COMPL_ONE_NODE)
1020

    
1021
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1022
                            default=False, action="store_true",
1023
                            help="Replace the disk(s) on the primary"
1024
                                 " node (applies only to internally mirrored"
1025
                                 " disk templates, e.g. %s)" %
1026
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1027

    
1028
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1029
                              default=False, action="store_true",
1030
                              help="Replace the disk(s) on the secondary"
1031
                                   " node (applies only to internally mirrored"
1032
                                   " disk templates, e.g. %s)" %
1033
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1034

    
1035
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1036
                              default=False, action="store_true",
1037
                              help="Lock all nodes and auto-promote as needed"
1038
                              " to MC status")
1039

    
1040
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1041
                              default=False, action="store_true",
1042
                              help="Automatically replace faulty disks"
1043
                                   " (applies only to internally mirrored"
1044
                                   " disk templates, e.g. %s)" %
1045
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1046

    
1047
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1048
                             default=False, action="store_true",
1049
                             help="Ignore current recorded size"
1050
                             " (useful for forcing activation when"
1051
                             " the recorded size is wrong)")
1052

    
1053
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1054
                          metavar="<node>",
1055
                          completion_suggest=OPT_COMPL_ONE_NODE)
1056

    
1057
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1058
                         metavar="<dir>")
1059

    
1060
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1061
                              help="Specify the secondary ip for the node",
1062
                              metavar="ADDRESS", default=None)
1063

    
1064
READD_OPT = cli_option("--readd", dest="readd",
1065
                       default=False, action="store_true",
1066
                       help="Readd old node after replacing it")
1067

    
1068
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1069
                                default=True, action="store_false",
1070
                                help="Disable SSH key fingerprint checking")
1071

    
1072
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1073
                                 default=False, action="store_true",
1074
                                 help="Force the joining of a node")
1075

    
1076
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1077
                    type="bool", default=None, metavar=_YORNO,
1078
                    help="Set the master_candidate flag on the node")
1079

    
1080
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1081
                         type="bool", default=None,
1082
                         help=("Set the offline flag on the node"
1083
                               " (cluster does not communicate with offline"
1084
                               " nodes)"))
1085

    
1086
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1087
                         type="bool", default=None,
1088
                         help=("Set the drained flag on the node"
1089
                               " (excluded from allocation operations)"))
1090

    
1091
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1092
                              type="bool", default=None, metavar=_YORNO,
1093
                              help="Set the master_capable flag on the node")
1094

    
1095
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1096
                          type="bool", default=None, metavar=_YORNO,
1097
                          help="Set the vm_capable flag on the node")
1098

    
1099
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1100
                             type="bool", default=None, metavar=_YORNO,
1101
                             help="Set the allocatable flag on a volume")
1102

    
1103
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1104
                               help="Disable support for lvm based instances"
1105
                               " (cluster-wide)",
1106
                               action="store_false", default=True)
1107

    
1108
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1109
                            dest="enabled_hypervisors",
1110
                            help="Comma-separated list of hypervisors",
1111
                            type="string", default=None)
1112

    
1113
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1114
                            type="keyval", default={},
1115
                            help="NIC parameters")
1116

    
1117
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1118
                         dest="candidate_pool_size", type="int",
1119
                         help="Set the candidate pool size")
1120

    
1121
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1122
                         help=("Enables LVM and specifies the volume group"
1123
                               " name (cluster-wide) for disk allocation"
1124
                               " [%s]" % constants.DEFAULT_VG),
1125
                         metavar="VG", default=None)
1126

    
1127
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1128
                          help="Destroy cluster", action="store_true")
1129

    
1130
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1131
                          help="Skip node agreement check (dangerous)",
1132
                          action="store_true", default=False)
1133

    
1134
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1135
                            help="Specify the mac prefix for the instance IP"
1136
                            " addresses, in the format XX:XX:XX",
1137
                            metavar="PREFIX",
1138
                            default=None)
1139

    
1140
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1141
                               help="Specify the node interface (cluster-wide)"
1142
                               " on which the master IP address will be added"
1143
                               " (cluster init default: %s)" %
1144
                               constants.DEFAULT_BRIDGE,
1145
                               metavar="NETDEV",
1146
                               default=None)
1147

    
1148
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1149
                                help="Specify the netmask of the master IP",
1150
                                metavar="NETMASK",
1151
                                default=None)
1152

    
1153
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1154
                                     dest="use_external_mip_script",
1155
                                     help="Specify whether to run a"
1156
                                     " user-provided script for the master"
1157
                                     " IP address turnup and"
1158
                                     " turndown operations",
1159
                                     type="bool", metavar=_YORNO, default=None)
1160

    
1161
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1162
                                help="Specify the default directory (cluster-"
1163
                                "wide) for storing the file-based disks [%s]" %
1164
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1165
                                metavar="DIR",
1166
                                default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1167

    
1168
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1169
  "--shared-file-storage-dir",
1170
  dest="shared_file_storage_dir",
1171
  help="Specify the default directory (cluster-wide) for storing the"
1172
  " shared file-based disks [%s]" %
1173
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1174
  metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1175

    
1176
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1177
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1178
                                   action="store_false", default=True)
1179

    
1180
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1181
                                    help="Don't initialize SSH keys",
1182
                                    action="store_false", default=True)
1183

    
1184
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1185
                             help="Enable parseable error messages",
1186
                             action="store_true", default=False)
1187

    
1188
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1189
                          help="Skip N+1 memory redundancy tests",
1190
                          action="store_true", default=False)
1191

    
1192
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1193
                             help="Type of reboot: soft/hard/full",
1194
                             default=constants.INSTANCE_REBOOT_HARD,
1195
                             metavar="<REBOOT>",
1196
                             choices=list(constants.REBOOT_TYPES))
1197

    
1198
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1199
                                    dest="ignore_secondaries",
1200
                                    default=False, action="store_true",
1201
                                    help="Ignore errors from secondaries")
1202

    
1203
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1204
                            action="store_false", default=True,
1205
                            help="Don't shutdown the instance (unsafe)")
1206

    
1207
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1208
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1209
                         help="Maximum time to wait")
1210

    
1211
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1212
                                  dest="shutdown_timeout", type="int",
1213
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1214
                                  help="Maximum time to wait for instance"
1215
                                  " shutdown")
1216

    
1217
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1218
                          default=None,
1219
                          help=("Number of seconds between repetions of the"
1220
                                " command"))
1221

    
1222
EARLY_RELEASE_OPT = cli_option("--early-release",
1223
                               dest="early_release", default=False,
1224
                               action="store_true",
1225
                               help="Release the locks on the secondary"
1226
                               " node(s) early")
1227

    
1228
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1229
                                  dest="new_cluster_cert",
1230
                                  default=False, action="store_true",
1231
                                  help="Generate a new cluster certificate")
1232

    
1233
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1234
                           default=None,
1235
                           help="File containing new RAPI certificate")
1236

    
1237
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1238
                               default=None, action="store_true",
1239
                               help=("Generate a new self-signed RAPI"
1240
                                     " certificate"))
1241

    
1242
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1243
                            default=None,
1244
                            help="File containing new SPICE certificate")
1245

    
1246
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1247
                              default=None,
1248
                              help="File containing the certificate of the CA"
1249
                              " which signed the SPICE certificate")
1250

    
1251
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1252
                                dest="new_spice_cert", default=None,
1253
                                action="store_true",
1254
                                help=("Generate a new self-signed SPICE"
1255
                                      " certificate"))
1256

    
1257
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1258
                                    dest="new_confd_hmac_key",
1259
                                    default=False, action="store_true",
1260
                                    help=("Create a new HMAC key for %s" %
1261
                                          constants.CONFD))
1262

    
1263
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1264
                                       dest="cluster_domain_secret",
1265
                                       default=None,
1266
                                       help=("Load new new cluster domain"
1267
                                             " secret from file"))
1268

    
1269
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1270
                                           dest="new_cluster_domain_secret",
1271
                                           default=False, action="store_true",
1272
                                           help=("Create a new cluster domain"
1273
                                                 " secret"))
1274

    
1275
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1276
                              dest="use_replication_network",
1277
                              help="Whether to use the replication network"
1278
                              " for talking to the nodes",
1279
                              action="store_true", default=False)
1280

    
1281
MAINTAIN_NODE_HEALTH_OPT = \
1282
    cli_option("--maintain-node-health", dest="maintain_node_health",
1283
               metavar=_YORNO, default=None, type="bool",
1284
               help="Configure the cluster to automatically maintain node"
1285
               " health, by shutting down unknown instances, shutting down"
1286
               " unknown DRBD devices, etc.")
1287

    
1288
IDENTIFY_DEFAULTS_OPT = \
1289
    cli_option("--identify-defaults", dest="identify_defaults",
1290
               default=False, action="store_true",
1291
               help="Identify which saved instance parameters are equal to"
1292
               " the current cluster defaults and set them as such, instead"
1293
               " of marking them as overridden")
1294

    
1295
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1296
                         action="store", dest="uid_pool",
1297
                         help=("A list of user-ids or user-id"
1298
                               " ranges separated by commas"))
1299

    
1300
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1301
                          action="store", dest="add_uids",
1302
                          help=("A list of user-ids or user-id"
1303
                                " ranges separated by commas, to be"
1304
                                " added to the user-id pool"))
1305

    
1306
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1307
                             action="store", dest="remove_uids",
1308
                             help=("A list of user-ids or user-id"
1309
                                   " ranges separated by commas, to be"
1310
                                   " removed from the user-id pool"))
1311

    
1312
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1313
                              action="store", dest="reserved_lvs",
1314
                              help=("A comma-separated list of reserved"
1315
                                    " logical volumes names, that will be"
1316
                                    " ignored by cluster verify"))
1317

    
1318
ROMAN_OPT = cli_option("--roman",
1319
                       dest="roman_integers", default=False,
1320
                       action="store_true",
1321
                       help="Use roman numbers for positive integers")
1322

    
1323
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1324
                             action="store", default=None,
1325
                             help="Specifies usermode helper for DRBD")
1326

    
1327
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1328
                                action="store_false", default=True,
1329
                                help="Disable support for DRBD")
1330

    
1331
PRIMARY_IP_VERSION_OPT = \
1332
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1333
               action="store", dest="primary_ip_version",
1334
               metavar="%d|%d" % (constants.IP4_VERSION,
1335
                                  constants.IP6_VERSION),
1336
               help="Cluster-wide IP version for primary IP")
1337

    
1338

    
1339
def _PriorityOptionCb(option, _, value, parser):
1340
  """Callback for processing C{--priority} option.
1341

1342
  """
1343
  value = _PRIONAME_TO_VALUE[value]
1344

    
1345
  setattr(parser.values, option.dest, value)
1346

    
1347

    
1348
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1349
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1350
                          choices=_PRIONAME_TO_VALUE.keys(),
1351
                          action="callback", type="choice",
1352
                          callback=_PriorityOptionCb,
1353
                          help="Priority for opcode processing")
1354

    
1355
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1356
                        type="bool", default=None, metavar=_YORNO,
1357
                        help="Sets the hidden flag on the OS")
1358

    
1359
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1360
                        type="bool", default=None, metavar=_YORNO,
1361
                        help="Sets the blacklisted flag on the OS")
1362

    
1363
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1364
                                     type="bool", metavar=_YORNO,
1365
                                     dest="prealloc_wipe_disks",
1366
                                     help=("Wipe disks prior to instance"
1367
                                           " creation"))
1368

    
1369
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1370
                             type="keyval", default=None,
1371
                             help="Node parameters")
1372

    
1373
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1374
                              action="store", metavar="POLICY", default=None,
1375
                              help="Allocation policy for the node group")
1376

    
1377
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1378
                              type="bool", metavar=_YORNO,
1379
                              dest="node_powered",
1380
                              help="Specify if the SoR for node is powered")
1381

    
1382
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1383
                             default=constants.OOB_TIMEOUT,
1384
                             help="Maximum time to wait for out-of-band helper")
1385

    
1386
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1387
                             default=constants.OOB_POWER_DELAY,
1388
                             help="Time in seconds to wait between power-ons")
1389

    
1390
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1391
                              action="store_true", default=False,
1392
                              help=("Whether command argument should be treated"
1393
                                    " as filter"))
1394

    
1395
NO_REMEMBER_OPT = cli_option("--no-remember",
1396
                             dest="no_remember",
1397
                             action="store_true", default=False,
1398
                             help="Perform but do not record the change"
1399
                             " in the configuration")
1400

    
1401
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1402
                              default=False, action="store_true",
1403
                              help="Evacuate primary instances only")
1404

    
1405
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1406
                                default=False, action="store_true",
1407
                                help="Evacuate secondary instances only"
1408
                                     " (applies only to internally mirrored"
1409
                                     " disk templates, e.g. %s)" %
1410
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1411

    
1412
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1413
                                action="store_true", default=False,
1414
                                help="Pause instance at startup")
1415

    
1416
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1417
                          help="Destination node group (name or uuid)",
1418
                          default=None, action="append",
1419
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1420

    
1421
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1422
                               action="append", dest="ignore_errors",
1423
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1424
                               help="Error code to be ignored")
1425

    
1426
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1427
                            action="append",
1428
                            help=("Specify disk state information in the"
1429
                                  " format"
1430
                                  " storage_type/identifier:option=value,...;"
1431
                                  " note this is unused for now"),
1432
                            type="identkeyval")
1433

    
1434
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1435
                          action="append",
1436
                          help=("Specify hypervisor state information in the"
1437
                                " format hypervisor:option=value,...;"
1438
                                " note this is unused for now"),
1439
                          type="identkeyval")
1440

    
1441
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1442
                                action="store_true", default=False,
1443
                                help="Ignore instance policy violations")
1444

    
1445
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1446
                             help="Sets the instance's runtime memory,"
1447
                             " ballooning it up or down to the new value",
1448
                             default=None, type="unit", metavar="<size>")
1449

    
1450
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1451
                          action="store_true", default=False,
1452
                          help="Marks the grow as absolute instead of the"
1453
                          " (default) relative mode")
1454

    
1455
#: Options provided by all commands
1456
COMMON_OPTS = [DEBUG_OPT]
1457

    
1458
# common options for creating instances. add and import then add their own
1459
# specific ones.
1460
COMMON_CREATE_OPTS = [
1461
  BACKEND_OPT,
1462
  DISK_OPT,
1463
  DISK_TEMPLATE_OPT,
1464
  FILESTORE_DIR_OPT,
1465
  FILESTORE_DRIVER_OPT,
1466
  HYPERVISOR_OPT,
1467
  IALLOCATOR_OPT,
1468
  NET_OPT,
1469
  NODE_PLACEMENT_OPT,
1470
  NOIPCHECK_OPT,
1471
  NONAMECHECK_OPT,
1472
  NONICS_OPT,
1473
  NWSYNC_OPT,
1474
  OSPARAMS_OPT,
1475
  OS_SIZE_OPT,
1476
  SUBMIT_OPT,
1477
  TAG_ADD_OPT,
1478
  DRY_RUN_OPT,
1479
  PRIORITY_OPT,
1480
  ]
1481

    
1482
# common instance policy options
1483
INSTANCE_POLICY_OPTS = [
1484
  SPECS_CPU_COUNT_OPT,
1485
  SPECS_DISK_COUNT_OPT,
1486
  SPECS_DISK_SIZE_OPT,
1487
  SPECS_MEM_SIZE_OPT,
1488
  SPECS_NIC_COUNT_OPT,
1489
  IPOLICY_DISK_TEMPLATES,
1490
  IPOLICY_VCPU_RATIO,
1491
  IPOLICY_SPINDLE_RATIO,
1492
  ]
1493

    
1494

    
1495
class _ShowUsage(Exception):
1496
  """Exception class for L{_ParseArgs}.
1497

1498
  """
1499
  def __init__(self, exit_error):
1500
    """Initializes instances of this class.
1501

1502
    @type exit_error: bool
1503
    @param exit_error: Whether to report failure on exit
1504

1505
    """
1506
    Exception.__init__(self)
1507
    self.exit_error = exit_error
1508

    
1509

    
1510
class _ShowVersion(Exception):
1511
  """Exception class for L{_ParseArgs}.
1512

1513
  """
1514

    
1515

    
1516
def _ParseArgs(binary, argv, commands, aliases, env_override):
1517
  """Parser for the command line arguments.
1518

1519
  This function parses the arguments and returns the function which
1520
  must be executed together with its (modified) arguments.
1521

1522
  @param binary: Script name
1523
  @param argv: Command line arguments
1524
  @param commands: Dictionary containing command definitions
1525
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1526
  @param env_override: list of env variables allowed for default args
1527
  @raise _ShowUsage: If usage description should be shown
1528
  @raise _ShowVersion: If version should be shown
1529

1530
  """
1531
  assert not (env_override - set(commands))
1532
  assert not (set(aliases.keys()) & set(commands.keys()))
1533

    
1534
  if len(argv) > 1:
1535
    cmd = argv[1]
1536
  else:
1537
    # No option or command given
1538
    raise _ShowUsage(exit_error=True)
1539

    
1540
  if cmd == "--version":
1541
    raise _ShowVersion()
1542
  elif cmd == "--help":
1543
    raise _ShowUsage(exit_error=False)
1544
  elif not (cmd in commands or cmd in aliases):
1545
    raise _ShowUsage(exit_error=True)
1546

    
1547
  # get command, unalias it, and look it up in commands
1548
  if cmd in aliases:
1549
    if aliases[cmd] not in commands:
1550
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1551
                                   " command '%s'" % (cmd, aliases[cmd]))
1552

    
1553
    cmd = aliases[cmd]
1554

    
1555
  if cmd in env_override:
1556
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1557
    env_args = os.environ.get(args_env_name)
1558
    if env_args:
1559
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1560

    
1561
  func, args_def, parser_opts, usage, description = commands[cmd]
1562
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1563
                        description=description,
1564
                        formatter=TitledHelpFormatter(),
1565
                        usage="%%prog %s %s" % (cmd, usage))
1566
  parser.disable_interspersed_args()
1567
  options, args = parser.parse_args(args=argv[2:])
1568

    
1569
  if not _CheckArguments(cmd, args_def, args):
1570
    return None, None, None
1571

    
1572
  return func, options, args
1573

    
1574

    
1575
def _FormatUsage(binary, commands):
1576
  """Generates a nice description of all commands.
1577

1578
  @param binary: Script name
1579
  @param commands: Dictionary containing command definitions
1580

1581
  """
1582
  # compute the max line length for cmd + usage
1583
  mlen = min(60, max(map(len, commands)))
1584

    
1585
  yield "Usage: %s {command} [options...] [argument...]" % binary
1586
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1587
  yield ""
1588
  yield "Commands:"
1589

    
1590
  # and format a nice command list
1591
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1592
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1593
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1594
    for line in help_lines:
1595
      yield " %-*s   %s" % (mlen, "", line)
1596

    
1597
  yield ""
1598

    
1599

    
1600
def _CheckArguments(cmd, args_def, args):
1601
  """Verifies the arguments using the argument definition.
1602

1603
  Algorithm:
1604

1605
    1. Abort with error if values specified by user but none expected.
1606

1607
    1. For each argument in definition
1608

1609
      1. Keep running count of minimum number of values (min_count)
1610
      1. Keep running count of maximum number of values (max_count)
1611
      1. If it has an unlimited number of values
1612

1613
        1. Abort with error if it's not the last argument in the definition
1614

1615
    1. If last argument has limited number of values
1616

1617
      1. Abort with error if number of values doesn't match or is too large
1618

1619
    1. Abort with error if user didn't pass enough values (min_count)
1620

1621
  """
1622
  if args and not args_def:
1623
    ToStderr("Error: Command %s expects no arguments", cmd)
1624
    return False
1625

    
1626
  min_count = None
1627
  max_count = None
1628
  check_max = None
1629

    
1630
  last_idx = len(args_def) - 1
1631

    
1632
  for idx, arg in enumerate(args_def):
1633
    if min_count is None:
1634
      min_count = arg.min
1635
    elif arg.min is not None:
1636
      min_count += arg.min
1637

    
1638
    if max_count is None:
1639
      max_count = arg.max
1640
    elif arg.max is not None:
1641
      max_count += arg.max
1642

    
1643
    if idx == last_idx:
1644
      check_max = (arg.max is not None)
1645

    
1646
    elif arg.max is None:
1647
      raise errors.ProgrammerError("Only the last argument can have max=None")
1648

    
1649
  if check_max:
1650
    # Command with exact number of arguments
1651
    if (min_count is not None and max_count is not None and
1652
        min_count == max_count and len(args) != min_count):
1653
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1654
      return False
1655

    
1656
    # Command with limited number of arguments
1657
    if max_count is not None and len(args) > max_count:
1658
      ToStderr("Error: Command %s expects only %d argument(s)",
1659
               cmd, max_count)
1660
      return False
1661

    
1662
  # Command with some required arguments
1663
  if min_count is not None and len(args) < min_count:
1664
    ToStderr("Error: Command %s expects at least %d argument(s)",
1665
             cmd, min_count)
1666
    return False
1667

    
1668
  return True
1669

    
1670

    
1671
def SplitNodeOption(value):
1672
  """Splits the value of a --node option.
1673

1674
  """
1675
  if value and ":" in value:
1676
    return value.split(":", 1)
1677
  else:
1678
    return (value, None)
1679

    
1680

    
1681
def CalculateOSNames(os_name, os_variants):
1682
  """Calculates all the names an OS can be called, according to its variants.
1683

1684
  @type os_name: string
1685
  @param os_name: base name of the os
1686
  @type os_variants: list or None
1687
  @param os_variants: list of supported variants
1688
  @rtype: list
1689
  @return: list of valid names
1690

1691
  """
1692
  if os_variants:
1693
    return ["%s+%s" % (os_name, v) for v in os_variants]
1694
  else:
1695
    return [os_name]
1696

    
1697

    
1698
def ParseFields(selected, default):
1699
  """Parses the values of "--field"-like options.
1700

1701
  @type selected: string or None
1702
  @param selected: User-selected options
1703
  @type default: list
1704
  @param default: Default fields
1705

1706
  """
1707
  if selected is None:
1708
    return default
1709

    
1710
  if selected.startswith("+"):
1711
    return default + selected[1:].split(",")
1712

    
1713
  return selected.split(",")
1714

    
1715

    
1716
UsesRPC = rpc.RunWithRPC
1717

    
1718

    
1719
def AskUser(text, choices=None):
1720
  """Ask the user a question.
1721

1722
  @param text: the question to ask
1723

1724
  @param choices: list with elements tuples (input_char, return_value,
1725
      description); if not given, it will default to: [('y', True,
1726
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1727
      note that the '?' char is reserved for help
1728

1729
  @return: one of the return values from the choices list; if input is
1730
      not possible (i.e. not running with a tty, we return the last
1731
      entry from the list
1732

1733
  """
1734
  if choices is None:
1735
    choices = [("y", True, "Perform the operation"),
1736
               ("n", False, "Do not perform the operation")]
1737
  if not choices or not isinstance(choices, list):
1738
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1739
  for entry in choices:
1740
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1741
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1742

    
1743
  answer = choices[-1][1]
1744
  new_text = []
1745
  for line in text.splitlines():
1746
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1747
  text = "\n".join(new_text)
1748
  try:
1749
    f = file("/dev/tty", "a+")
1750
  except IOError:
1751
    return answer
1752
  try:
1753
    chars = [entry[0] for entry in choices]
1754
    chars[-1] = "[%s]" % chars[-1]
1755
    chars.append("?")
1756
    maps = dict([(entry[0], entry[1]) for entry in choices])
1757
    while True:
1758
      f.write(text)
1759
      f.write("\n")
1760
      f.write("/".join(chars))
1761
      f.write(": ")
1762
      line = f.readline(2).strip().lower()
1763
      if line in maps:
1764
        answer = maps[line]
1765
        break
1766
      elif line == "?":
1767
        for entry in choices:
1768
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1769
        f.write("\n")
1770
        continue
1771
  finally:
1772
    f.close()
1773
  return answer
1774

    
1775

    
1776
class JobSubmittedException(Exception):
1777
  """Job was submitted, client should exit.
1778

1779
  This exception has one argument, the ID of the job that was
1780
  submitted. The handler should print this ID.
1781

1782
  This is not an error, just a structured way to exit from clients.
1783

1784
  """
1785

    
1786

    
1787
def SendJob(ops, cl=None):
1788
  """Function to submit an opcode without waiting for the results.
1789

1790
  @type ops: list
1791
  @param ops: list of opcodes
1792
  @type cl: luxi.Client
1793
  @param cl: the luxi client to use for communicating with the master;
1794
             if None, a new client will be created
1795

1796
  """
1797
  if cl is None:
1798
    cl = GetClient()
1799

    
1800
  job_id = cl.SubmitJob(ops)
1801

    
1802
  return job_id
1803

    
1804

    
1805
def GenericPollJob(job_id, cbs, report_cbs):
1806
  """Generic job-polling function.
1807

1808
  @type job_id: number
1809
  @param job_id: Job ID
1810
  @type cbs: Instance of L{JobPollCbBase}
1811
  @param cbs: Data callbacks
1812
  @type report_cbs: Instance of L{JobPollReportCbBase}
1813
  @param report_cbs: Reporting callbacks
1814

1815
  """
1816
  prev_job_info = None
1817
  prev_logmsg_serial = None
1818

    
1819
  status = None
1820

    
1821
  while True:
1822
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1823
                                      prev_logmsg_serial)
1824
    if not result:
1825
      # job not found, go away!
1826
      raise errors.JobLost("Job with id %s lost" % job_id)
1827

    
1828
    if result == constants.JOB_NOTCHANGED:
1829
      report_cbs.ReportNotChanged(job_id, status)
1830

    
1831
      # Wait again
1832
      continue
1833

    
1834
    # Split result, a tuple of (field values, log entries)
1835
    (job_info, log_entries) = result
1836
    (status, ) = job_info
1837

    
1838
    if log_entries:
1839
      for log_entry in log_entries:
1840
        (serial, timestamp, log_type, message) = log_entry
1841
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1842
                                    log_type, message)
1843
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1844

    
1845
    # TODO: Handle canceled and archived jobs
1846
    elif status in (constants.JOB_STATUS_SUCCESS,
1847
                    constants.JOB_STATUS_ERROR,
1848
                    constants.JOB_STATUS_CANCELING,
1849
                    constants.JOB_STATUS_CANCELED):
1850
      break
1851

    
1852
    prev_job_info = job_info
1853

    
1854
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1855
  if not jobs:
1856
    raise errors.JobLost("Job with id %s lost" % job_id)
1857

    
1858
  status, opstatus, result = jobs[0]
1859

    
1860
  if status == constants.JOB_STATUS_SUCCESS:
1861
    return result
1862

    
1863
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1864
    raise errors.OpExecError("Job was canceled")
1865

    
1866
  has_ok = False
1867
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1868
    if status == constants.OP_STATUS_SUCCESS:
1869
      has_ok = True
1870
    elif status == constants.OP_STATUS_ERROR:
1871
      errors.MaybeRaise(msg)
1872

    
1873
      if has_ok:
1874
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1875
                                 (idx, msg))
1876

    
1877
      raise errors.OpExecError(str(msg))
1878

    
1879
  # default failure mode
1880
  raise errors.OpExecError(result)
1881

    
1882

    
1883
class JobPollCbBase:
1884
  """Base class for L{GenericPollJob} callbacks.
1885

1886
  """
1887
  def __init__(self):
1888
    """Initializes this class.
1889

1890
    """
1891

    
1892
  def WaitForJobChangeOnce(self, job_id, fields,
1893
                           prev_job_info, prev_log_serial):
1894
    """Waits for changes on a job.
1895

1896
    """
1897
    raise NotImplementedError()
1898

    
1899
  def QueryJobs(self, job_ids, fields):
1900
    """Returns the selected fields for the selected job IDs.
1901

1902
    @type job_ids: list of numbers
1903
    @param job_ids: Job IDs
1904
    @type fields: list of strings
1905
    @param fields: Fields
1906

1907
    """
1908
    raise NotImplementedError()
1909

    
1910

    
1911
class JobPollReportCbBase:
1912
  """Base class for L{GenericPollJob} reporting callbacks.
1913

1914
  """
1915
  def __init__(self):
1916
    """Initializes this class.
1917

1918
    """
1919

    
1920
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1921
    """Handles a log message.
1922

1923
    """
1924
    raise NotImplementedError()
1925

    
1926
  def ReportNotChanged(self, job_id, status):
1927
    """Called for if a job hasn't changed in a while.
1928

1929
    @type job_id: number
1930
    @param job_id: Job ID
1931
    @type status: string or None
1932
    @param status: Job status if available
1933

1934
    """
1935
    raise NotImplementedError()
1936

    
1937

    
1938
class _LuxiJobPollCb(JobPollCbBase):
1939
  def __init__(self, cl):
1940
    """Initializes this class.
1941

1942
    """
1943
    JobPollCbBase.__init__(self)
1944
    self.cl = cl
1945

    
1946
  def WaitForJobChangeOnce(self, job_id, fields,
1947
                           prev_job_info, prev_log_serial):
1948
    """Waits for changes on a job.
1949

1950
    """
1951
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1952
                                        prev_job_info, prev_log_serial)
1953

    
1954
  def QueryJobs(self, job_ids, fields):
1955
    """Returns the selected fields for the selected job IDs.
1956

1957
    """
1958
    return self.cl.QueryJobs(job_ids, fields)
1959

    
1960

    
1961
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1962
  def __init__(self, feedback_fn):
1963
    """Initializes this class.
1964

1965
    """
1966
    JobPollReportCbBase.__init__(self)
1967

    
1968
    self.feedback_fn = feedback_fn
1969

    
1970
    assert callable(feedback_fn)
1971

    
1972
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1973
    """Handles a log message.
1974

1975
    """
1976
    self.feedback_fn((timestamp, log_type, log_msg))
1977

    
1978
  def ReportNotChanged(self, job_id, status):
1979
    """Called if a job hasn't changed in a while.
1980

1981
    """
1982
    # Ignore
1983

    
1984

    
1985
class StdioJobPollReportCb(JobPollReportCbBase):
1986
  def __init__(self):
1987
    """Initializes this class.
1988

1989
    """
1990
    JobPollReportCbBase.__init__(self)
1991

    
1992
    self.notified_queued = False
1993
    self.notified_waitlock = False
1994

    
1995
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1996
    """Handles a log message.
1997

1998
    """
1999
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2000
             FormatLogMessage(log_type, log_msg))
2001

    
2002
  def ReportNotChanged(self, job_id, status):
2003
    """Called if a job hasn't changed in a while.
2004

2005
    """
2006
    if status is None:
2007
      return
2008

    
2009
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2010
      ToStderr("Job %s is waiting in queue", job_id)
2011
      self.notified_queued = True
2012

    
2013
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2014
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2015
      self.notified_waitlock = True
2016

    
2017

    
2018
def FormatLogMessage(log_type, log_msg):
2019
  """Formats a job message according to its type.
2020

2021
  """
2022
  if log_type != constants.ELOG_MESSAGE:
2023
    log_msg = str(log_msg)
2024

    
2025
  return utils.SafeEncode(log_msg)
2026

    
2027

    
2028
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2029
  """Function to poll for the result of a job.
2030

2031
  @type job_id: job identified
2032
  @param job_id: the job to poll for results
2033
  @type cl: luxi.Client
2034
  @param cl: the luxi client to use for communicating with the master;
2035
             if None, a new client will be created
2036

2037
  """
2038
  if cl is None:
2039
    cl = GetClient()
2040

    
2041
  if reporter is None:
2042
    if feedback_fn:
2043
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2044
    else:
2045
      reporter = StdioJobPollReportCb()
2046
  elif feedback_fn:
2047
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2048

    
2049
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2050

    
2051

    
2052
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2053
  """Legacy function to submit an opcode.
2054

2055
  This is just a simple wrapper over the construction of the processor
2056
  instance. It should be extended to better handle feedback and
2057
  interaction functions.
2058

2059
  """
2060
  if cl is None:
2061
    cl = GetClient()
2062

    
2063
  SetGenericOpcodeOpts([op], opts)
2064

    
2065
  job_id = SendJob([op], cl=cl)
2066

    
2067
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2068
                       reporter=reporter)
2069

    
2070
  return op_results[0]
2071

    
2072

    
2073
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2074
  """Wrapper around SubmitOpCode or SendJob.
2075

2076
  This function will decide, based on the 'opts' parameter, whether to
2077
  submit and wait for the result of the opcode (and return it), or
2078
  whether to just send the job and print its identifier. It is used in
2079
  order to simplify the implementation of the '--submit' option.
2080

2081
  It will also process the opcodes if we're sending the via SendJob
2082
  (otherwise SubmitOpCode does it).
2083

2084
  """
2085
  if opts and opts.submit_only:
2086
    job = [op]
2087
    SetGenericOpcodeOpts(job, opts)
2088
    job_id = SendJob(job, cl=cl)
2089
    raise JobSubmittedException(job_id)
2090
  else:
2091
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2092

    
2093

    
2094
def SetGenericOpcodeOpts(opcode_list, options):
2095
  """Processor for generic options.
2096

2097
  This function updates the given opcodes based on generic command
2098
  line options (like debug, dry-run, etc.).
2099

2100
  @param opcode_list: list of opcodes
2101
  @param options: command line options or None
2102
  @return: None (in-place modification)
2103

2104
  """
2105
  if not options:
2106
    return
2107
  for op in opcode_list:
2108
    op.debug_level = options.debug
2109
    if hasattr(options, "dry_run"):
2110
      op.dry_run = options.dry_run
2111
    if getattr(options, "priority", None) is not None:
2112
      op.priority = options.priority
2113

    
2114

    
2115
def GetClient(query=False):
2116
  """Connects to the a luxi socket and returns a client.
2117

2118
  @type query: boolean
2119
  @param query: this signifies that the client will only be
2120
      used for queries; if the build-time parameter
2121
      enable-split-queries is enabled, then the client will be
2122
      connected to the query socket instead of the masterd socket
2123

2124
  """
2125
  if query and constants.ENABLE_SPLIT_QUERY:
2126
    address = pathutils.QUERY_SOCKET
2127
  else:
2128
    address = None
2129
  # TODO: Cache object?
2130
  try:
2131
    client = luxi.Client(address=address)
2132
  except luxi.NoMasterError:
2133
    ss = ssconf.SimpleStore()
2134

    
2135
    # Try to read ssconf file
2136
    try:
2137
      ss.GetMasterNode()
2138
    except errors.ConfigurationError:
2139
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2140
                                 " not part of a cluster",
2141
                                 errors.ECODE_INVAL)
2142

    
2143
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2144
    if master != myself:
2145
      raise errors.OpPrereqError("This is not the master node, please connect"
2146
                                 " to node '%s' and rerun the command" %
2147
                                 master, errors.ECODE_INVAL)
2148
    raise
2149
  return client
2150

    
2151

    
2152
def FormatError(err):
2153
  """Return a formatted error message for a given error.
2154

2155
  This function takes an exception instance and returns a tuple
2156
  consisting of two values: first, the recommended exit code, and
2157
  second, a string describing the error message (not
2158
  newline-terminated).
2159

2160
  """
2161
  retcode = 1
2162
  obuf = StringIO()
2163
  msg = str(err)
2164
  if isinstance(err, errors.ConfigurationError):
2165
    txt = "Corrupt configuration file: %s" % msg
2166
    logging.error(txt)
2167
    obuf.write(txt + "\n")
2168
    obuf.write("Aborting.")
2169
    retcode = 2
2170
  elif isinstance(err, errors.HooksAbort):
2171
    obuf.write("Failure: hooks execution failed:\n")
2172
    for node, script, out in err.args[0]:
2173
      if out:
2174
        obuf.write("  node: %s, script: %s, output: %s\n" %
2175
                   (node, script, out))
2176
      else:
2177
        obuf.write("  node: %s, script: %s (no output)\n" %
2178
                   (node, script))
2179
  elif isinstance(err, errors.HooksFailure):
2180
    obuf.write("Failure: hooks general failure: %s" % msg)
2181
  elif isinstance(err, errors.ResolverError):
2182
    this_host = netutils.Hostname.GetSysName()
2183
    if err.args[0] == this_host:
2184
      msg = "Failure: can't resolve my own hostname ('%s')"
2185
    else:
2186
      msg = "Failure: can't resolve hostname '%s'"
2187
    obuf.write(msg % err.args[0])
2188
  elif isinstance(err, errors.OpPrereqError):
2189
    if len(err.args) == 2:
2190
      obuf.write("Failure: prerequisites not met for this"
2191
                 " operation:\nerror type: %s, error details:\n%s" %
2192
                 (err.args[1], err.args[0]))
2193
    else:
2194
      obuf.write("Failure: prerequisites not met for this"
2195
                 " operation:\n%s" % msg)
2196
  elif isinstance(err, errors.OpExecError):
2197
    obuf.write("Failure: command execution error:\n%s" % msg)
2198
  elif isinstance(err, errors.TagError):
2199
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2200
  elif isinstance(err, errors.JobQueueDrainError):
2201
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2202
               " accept new requests\n")
2203
  elif isinstance(err, errors.JobQueueFull):
2204
    obuf.write("Failure: the job queue is full and doesn't accept new"
2205
               " job submissions until old jobs are archived\n")
2206
  elif isinstance(err, errors.TypeEnforcementError):
2207
    obuf.write("Parameter Error: %s" % msg)
2208
  elif isinstance(err, errors.ParameterError):
2209
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2210
  elif isinstance(err, luxi.NoMasterError):
2211
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
2212
               " and listening for connections?")
2213
  elif isinstance(err, luxi.TimeoutError):
2214
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2215
               " been submitted and will continue to run even if the call"
2216
               " timed out. Useful commands in this situation are \"gnt-job"
2217
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2218
    obuf.write(msg)
2219
  elif isinstance(err, luxi.PermissionError):
2220
    obuf.write("It seems you don't have permissions to connect to the"
2221
               " master daemon.\nPlease retry as a different user.")
2222
  elif isinstance(err, luxi.ProtocolError):
2223
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2224
               "%s" % msg)
2225
  elif isinstance(err, errors.JobLost):
2226
    obuf.write("Error checking job status: %s" % msg)
2227
  elif isinstance(err, errors.QueryFilterParseError):
2228
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2229
    obuf.write("\n".join(err.GetDetails()))
2230
  elif isinstance(err, errors.GenericError):
2231
    obuf.write("Unhandled Ganeti error: %s" % msg)
2232
  elif isinstance(err, JobSubmittedException):
2233
    obuf.write("JobID: %s\n" % err.args[0])
2234
    retcode = 0
2235
  else:
2236
    obuf.write("Unhandled exception: %s" % msg)
2237
  return retcode, obuf.getvalue().rstrip("\n")
2238

    
2239

    
2240
def GenericMain(commands, override=None, aliases=None,
2241
                env_override=frozenset()):
2242
  """Generic main function for all the gnt-* commands.
2243

2244
  @param commands: a dictionary with a special structure, see the design doc
2245
                   for command line handling.
2246
  @param override: if not None, we expect a dictionary with keys that will
2247
                   override command line options; this can be used to pass
2248
                   options from the scripts to generic functions
2249
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2250
  @param env_override: list of environment names which are allowed to submit
2251
                       default args for commands
2252

2253
  """
2254
  # save the program name and the entire command line for later logging
2255
  if sys.argv:
2256
    binary = os.path.basename(sys.argv[0])
2257
    if not binary:
2258
      binary = sys.argv[0]
2259

    
2260
    if len(sys.argv) >= 2:
2261
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2262
    else:
2263
      logname = binary
2264

    
2265
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2266
  else:
2267
    binary = "<unknown program>"
2268
    cmdline = "<unknown>"
2269

    
2270
  if aliases is None:
2271
    aliases = {}
2272

    
2273
  try:
2274
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2275
                                       env_override)
2276
  except _ShowVersion:
2277
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2278
             constants.RELEASE_VERSION)
2279
    return constants.EXIT_SUCCESS
2280
  except _ShowUsage, err:
2281
    for line in _FormatUsage(binary, commands):
2282
      ToStdout(line)
2283

    
2284
    if err.exit_error:
2285
      return constants.EXIT_FAILURE
2286
    else:
2287
      return constants.EXIT_SUCCESS
2288
  except errors.ParameterError, err:
2289
    result, err_msg = FormatError(err)
2290
    ToStderr(err_msg)
2291
    return 1
2292

    
2293
  if func is None: # parse error
2294
    return 1
2295

    
2296
  if override is not None:
2297
    for key, val in override.iteritems():
2298
      setattr(options, key, val)
2299

    
2300
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2301
                     stderr_logging=True)
2302

    
2303
  logging.info("Command line: %s", cmdline)
2304

    
2305
  try:
2306
    result = func(options, args)
2307
  except (errors.GenericError, luxi.ProtocolError,
2308
          JobSubmittedException), err:
2309
    result, err_msg = FormatError(err)
2310
    logging.exception("Error during command processing")
2311
    ToStderr(err_msg)
2312
  except KeyboardInterrupt:
2313
    result = constants.EXIT_FAILURE
2314
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2315
             " might have been submitted and"
2316
             " will continue to run in the background.")
2317
  except IOError, err:
2318
    if err.errno == errno.EPIPE:
2319
      # our terminal went away, we'll exit
2320
      sys.exit(constants.EXIT_FAILURE)
2321
    else:
2322
      raise
2323

    
2324
  return result
2325

    
2326

    
2327
def ParseNicOption(optvalue):
2328
  """Parses the value of the --net option(s).
2329

2330
  """
2331
  try:
2332
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2333
  except (TypeError, ValueError), err:
2334
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2335
                               errors.ECODE_INVAL)
2336

    
2337
  nics = [{}] * nic_max
2338
  for nidx, ndict in optvalue:
2339
    nidx = int(nidx)
2340

    
2341
    if not isinstance(ndict, dict):
2342
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2343
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2344

    
2345
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2346

    
2347
    nics[nidx] = ndict
2348

    
2349
  return nics
2350

    
2351

    
2352
def GenericInstanceCreate(mode, opts, args):
2353
  """Add an instance to the cluster via either creation or import.
2354

2355
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2356
  @param opts: the command line options selected by the user
2357
  @type args: list
2358
  @param args: should contain only one element, the new instance name
2359
  @rtype: int
2360
  @return: the desired exit code
2361

2362
  """
2363
  instance = args[0]
2364

    
2365
  (pnode, snode) = SplitNodeOption(opts.node)
2366

    
2367
  hypervisor = None
2368
  hvparams = {}
2369
  if opts.hypervisor:
2370
    hypervisor, hvparams = opts.hypervisor
2371

    
2372
  if opts.nics:
2373
    nics = ParseNicOption(opts.nics)
2374
  elif opts.no_nics:
2375
    # no nics
2376
    nics = []
2377
  elif mode == constants.INSTANCE_CREATE:
2378
    # default of one nic, all auto
2379
    nics = [{}]
2380
  else:
2381
    # mode == import
2382
    nics = []
2383

    
2384
  if opts.disk_template == constants.DT_DISKLESS:
2385
    if opts.disks or opts.sd_size is not None:
2386
      raise errors.OpPrereqError("Diskless instance but disk"
2387
                                 " information passed", errors.ECODE_INVAL)
2388
    disks = []
2389
  else:
2390
    if (not opts.disks and not opts.sd_size
2391
        and mode == constants.INSTANCE_CREATE):
2392
      raise errors.OpPrereqError("No disk information specified",
2393
                                 errors.ECODE_INVAL)
2394
    if opts.disks and opts.sd_size is not None:
2395
      raise errors.OpPrereqError("Please use either the '--disk' or"
2396
                                 " '-s' option", errors.ECODE_INVAL)
2397
    if opts.sd_size is not None:
2398
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2399

    
2400
    if opts.disks:
2401
      try:
2402
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2403
      except ValueError, err:
2404
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2405
                                   errors.ECODE_INVAL)
2406
      disks = [{}] * disk_max
2407
    else:
2408
      disks = []
2409
    for didx, ddict in opts.disks:
2410
      didx = int(didx)
2411
      if not isinstance(ddict, dict):
2412
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2413
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2414
      elif constants.IDISK_SIZE in ddict:
2415
        if constants.IDISK_ADOPT in ddict:
2416
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2417
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2418
        try:
2419
          ddict[constants.IDISK_SIZE] = \
2420
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2421
        except ValueError, err:
2422
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2423
                                     (didx, err), errors.ECODE_INVAL)
2424
      elif constants.IDISK_ADOPT in ddict:
2425
        if mode == constants.INSTANCE_IMPORT:
2426
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2427
                                     " import", errors.ECODE_INVAL)
2428
        ddict[constants.IDISK_SIZE] = 0
2429
      else:
2430
        raise errors.OpPrereqError("Missing size or adoption source for"
2431
                                   " disk %d" % didx, errors.ECODE_INVAL)
2432
      disks[didx] = ddict
2433

    
2434
  if opts.tags is not None:
2435
    tags = opts.tags.split(",")
2436
  else:
2437
    tags = []
2438

    
2439
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2440
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2441

    
2442
  if mode == constants.INSTANCE_CREATE:
2443
    start = opts.start
2444
    os_type = opts.os
2445
    force_variant = opts.force_variant
2446
    src_node = None
2447
    src_path = None
2448
    no_install = opts.no_install
2449
    identify_defaults = False
2450
  elif mode == constants.INSTANCE_IMPORT:
2451
    start = False
2452
    os_type = None
2453
    force_variant = False
2454
    src_node = opts.src_node
2455
    src_path = opts.src_dir
2456
    no_install = None
2457
    identify_defaults = opts.identify_defaults
2458
  else:
2459
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2460

    
2461
  op = opcodes.OpInstanceCreate(instance_name=instance,
2462
                                disks=disks,
2463
                                disk_template=opts.disk_template,
2464
                                nics=nics,
2465
                                pnode=pnode, snode=snode,
2466
                                ip_check=opts.ip_check,
2467
                                name_check=opts.name_check,
2468
                                wait_for_sync=opts.wait_for_sync,
2469
                                file_storage_dir=opts.file_storage_dir,
2470
                                file_driver=opts.file_driver,
2471
                                iallocator=opts.iallocator,
2472
                                hypervisor=hypervisor,
2473
                                hvparams=hvparams,
2474
                                beparams=opts.beparams,
2475
                                osparams=opts.osparams,
2476
                                mode=mode,
2477
                                start=start,
2478
                                os_type=os_type,
2479
                                force_variant=force_variant,
2480
                                src_node=src_node,
2481
                                src_path=src_path,
2482
                                tags=tags,
2483
                                no_install=no_install,
2484
                                identify_defaults=identify_defaults,
2485
                                ignore_ipolicy=opts.ignore_ipolicy)
2486

    
2487
  SubmitOrSend(op, opts)
2488
  return 0
2489

    
2490

    
2491
class _RunWhileClusterStoppedHelper:
2492
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2493

2494
  """
2495
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2496
    """Initializes this class.
2497

2498
    @type feedback_fn: callable
2499
    @param feedback_fn: Feedback function
2500
    @type cluster_name: string
2501
    @param cluster_name: Cluster name
2502
    @type master_node: string
2503
    @param master_node Master node name
2504
    @type online_nodes: list
2505
    @param online_nodes: List of names of online nodes
2506

2507
    """
2508
    self.feedback_fn = feedback_fn
2509
    self.cluster_name = cluster_name
2510
    self.master_node = master_node
2511
    self.online_nodes = online_nodes
2512

    
2513
    self.ssh = ssh.SshRunner(self.cluster_name)
2514

    
2515
    self.nonmaster_nodes = [name for name in online_nodes
2516
                            if name != master_node]
2517

    
2518
    assert self.master_node not in self.nonmaster_nodes
2519

    
2520
  def _RunCmd(self, node_name, cmd):
2521
    """Runs a command on the local or a remote machine.
2522

2523
    @type node_name: string
2524
    @param node_name: Machine name
2525
    @type cmd: list
2526
    @param cmd: Command
2527

2528
    """
2529
    if node_name is None or node_name == self.master_node:
2530
      # No need to use SSH
2531
      result = utils.RunCmd(cmd)
2532
    else:
2533
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2534
                            utils.ShellQuoteArgs(cmd))
2535

    
2536
    if result.failed:
2537
      errmsg = ["Failed to run command %s" % result.cmd]
2538
      if node_name:
2539
        errmsg.append("on node %s" % node_name)
2540
      errmsg.append(": exitcode %s and error %s" %
2541
                    (result.exit_code, result.output))
2542
      raise errors.OpExecError(" ".join(errmsg))
2543

    
2544
  def Call(self, fn, *args):
2545
    """Call function while all daemons are stopped.
2546

2547
    @type fn: callable
2548
    @param fn: Function to be called
2549

2550
    """
2551
    # Pause watcher by acquiring an exclusive lock on watcher state file
2552
    self.feedback_fn("Blocking watcher")
2553
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2554
    try:
2555
      # TODO: Currently, this just blocks. There's no timeout.
2556
      # TODO: Should it be a shared lock?
2557
      watcher_block.Exclusive(blocking=True)
2558

    
2559
      # Stop master daemons, so that no new jobs can come in and all running
2560
      # ones are finished
2561
      self.feedback_fn("Stopping master daemons")
2562
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2563
      try:
2564
        # Stop daemons on all nodes
2565
        for node_name in self.online_nodes:
2566
          self.feedback_fn("Stopping daemons on %s" % node_name)
2567
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2568

    
2569
        # All daemons are shut down now
2570
        try:
2571
          return fn(self, *args)
2572
        except Exception, err:
2573
          _, errmsg = FormatError(err)
2574
          logging.exception("Caught exception")
2575
          self.feedback_fn(errmsg)
2576
          raise
2577
      finally:
2578
        # Start cluster again, master node last
2579
        for node_name in self.nonmaster_nodes + [self.master_node]:
2580
          self.feedback_fn("Starting daemons on %s" % node_name)
2581
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2582
    finally:
2583
      # Resume watcher
2584
      watcher_block.Close()
2585

    
2586

    
2587
def RunWhileClusterStopped(feedback_fn, fn, *args):
2588
  """Calls a function while all cluster daemons are stopped.
2589

2590
  @type feedback_fn: callable
2591
  @param feedback_fn: Feedback function
2592
  @type fn: callable
2593
  @param fn: Function to be called when daemons are stopped
2594

2595
  """
2596
  feedback_fn("Gathering cluster information")
2597

    
2598
  # This ensures we're running on the master daemon
2599
  cl = GetClient()
2600

    
2601
  (cluster_name, master_node) = \
2602
    cl.QueryConfigValues(["cluster_name", "master_node"])
2603

    
2604
  online_nodes = GetOnlineNodes([], cl=cl)
2605

    
2606
  # Don't keep a reference to the client. The master daemon will go away.
2607
  del cl
2608

    
2609
  assert master_node in online_nodes
2610

    
2611
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2612
                                       online_nodes).Call(fn, *args)
2613

    
2614

    
2615
def GenerateTable(headers, fields, separator, data,
2616
                  numfields=None, unitfields=None,
2617
                  units=None):
2618
  """Prints a table with headers and different fields.
2619

2620
  @type headers: dict
2621
  @param headers: dictionary mapping field names to headers for
2622
      the table
2623
  @type fields: list
2624
  @param fields: the field names corresponding to each row in
2625
      the data field
2626
  @param separator: the separator to be used; if this is None,
2627
      the default 'smart' algorithm is used which computes optimal
2628
      field width, otherwise just the separator is used between
2629
      each field
2630
  @type data: list
2631
  @param data: a list of lists, each sublist being one row to be output
2632
  @type numfields: list
2633
  @param numfields: a list with the fields that hold numeric
2634
      values and thus should be right-aligned
2635
  @type unitfields: list
2636
  @param unitfields: a list with the fields that hold numeric
2637
      values that should be formatted with the units field
2638
  @type units: string or None
2639
  @param units: the units we should use for formatting, or None for
2640
      automatic choice (human-readable for non-separator usage, otherwise
2641
      megabytes); this is a one-letter string
2642

2643
  """
2644
  if units is None:
2645
    if separator:
2646
      units = "m"
2647
    else:
2648
      units = "h"
2649

    
2650
  if numfields is None:
2651
    numfields = []
2652
  if unitfields is None:
2653
    unitfields = []
2654

    
2655
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2656
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2657

    
2658
  format_fields = []
2659
  for field in fields:
2660
    if headers and field not in headers:
2661
      # TODO: handle better unknown fields (either revert to old
2662
      # style of raising exception, or deal more intelligently with
2663
      # variable fields)
2664
      headers[field] = field
2665
    if separator is not None:
2666
      format_fields.append("%s")
2667
    elif numfields.Matches(field):
2668
      format_fields.append("%*s")
2669
    else:
2670
      format_fields.append("%-*s")
2671

    
2672
  if separator is None:
2673
    mlens = [0 for name in fields]
2674
    format_str = " ".join(format_fields)
2675
  else:
2676
    format_str = separator.replace("%", "%%").join(format_fields)
2677

    
2678
  for row in data:
2679
    if row is None:
2680
      continue
2681
    for idx, val in enumerate(row):
2682
      if unitfields.Matches(fields[idx]):
2683
        try:
2684
          val = int(val)
2685
        except (TypeError, ValueError):
2686
          pass
2687
        else:
2688
          val = row[idx] = utils.FormatUnit(val, units)
2689
      val = row[idx] = str(val)
2690
      if separator is None:
2691
        mlens[idx] = max(mlens[idx], len(val))
2692

    
2693
  result = []
2694
  if headers:
2695
    args = []
2696
    for idx, name in enumerate(fields):
2697
      hdr = headers[name]
2698
      if separator is None:
2699
        mlens[idx] = max(mlens[idx], len(hdr))
2700
        args.append(mlens[idx])
2701
      args.append(hdr)
2702
    result.append(format_str % tuple(args))
2703

    
2704
  if separator is None:
2705
    assert len(mlens) == len(fields)
2706

    
2707
    if fields and not numfields.Matches(fields[-1]):
2708
      mlens[-1] = 0
2709

    
2710
  for line in data:
2711
    args = []
2712
    if line is None:
2713
      line = ["-" for _ in fields]
2714
    for idx in range(len(fields)):
2715
      if separator is None:
2716
        args.append(mlens[idx])
2717
      args.append(line[idx])
2718
    result.append(format_str % tuple(args))
2719

    
2720
  return result
2721

    
2722

    
2723
def _FormatBool(value):
2724
  """Formats a boolean value as a string.
2725

2726
  """
2727
  if value:
2728
    return "Y"
2729
  return "N"
2730

    
2731

    
2732
#: Default formatting for query results; (callback, align right)
2733
_DEFAULT_FORMAT_QUERY = {
2734
  constants.QFT_TEXT: (str, False),
2735
  constants.QFT_BOOL: (_FormatBool, False),
2736
  constants.QFT_NUMBER: (str, True),
2737
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2738
  constants.QFT_OTHER: (str, False),
2739
  constants.QFT_UNKNOWN: (str, False),
2740
  }
2741

    
2742

    
2743
def _GetColumnFormatter(fdef, override, unit):
2744
  """Returns formatting function for a field.
2745

2746
  @type fdef: L{objects.QueryFieldDefinition}
2747
  @type override: dict
2748
  @param override: Dictionary for overriding field formatting functions,
2749
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2750
  @type unit: string
2751
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2752
  @rtype: tuple; (callable, bool)
2753
  @return: Returns the function to format a value (takes one parameter) and a
2754
    boolean for aligning the value on the right-hand side
2755

2756
  """
2757
  fmt = override.get(fdef.name, None)
2758
  if fmt is not None:
2759
    return fmt
2760

    
2761
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2762

    
2763
  if fdef.kind == constants.QFT_UNIT:
2764
    # Can't keep this information in the static dictionary
2765
    return (lambda value: utils.FormatUnit(value, unit), True)
2766

    
2767
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2768
  if fmt is not None:
2769
    return fmt
2770

    
2771
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2772

    
2773

    
2774
class _QueryColumnFormatter:
2775
  """Callable class for formatting fields of a query.
2776

2777
  """
2778
  def __init__(self, fn, status_fn, verbose):
2779
    """Initializes this class.
2780

2781
    @type fn: callable
2782
    @param fn: Formatting function
2783
    @type status_fn: callable
2784
    @param status_fn: Function to report fields' status
2785
    @type verbose: boolean
2786
    @param verbose: whether to use verbose field descriptions or not
2787

2788
    """
2789
    self._fn = fn
2790
    self._status_fn = status_fn
2791
    self._verbose = verbose
2792

    
2793
  def __call__(self, data):
2794
    """Returns a field's string representation.
2795

2796
    """
2797
    (status, value) = data
2798

    
2799
    # Report status
2800
    self._status_fn(status)
2801

    
2802
    if status == constants.RS_NORMAL:
2803
      return self._fn(value)
2804

    
2805
    assert value is None, \
2806
           "Found value %r for abnormal status %s" % (value, status)
2807

    
2808
    return FormatResultError(status, self._verbose)
2809

    
2810

    
2811
def FormatResultError(status, verbose):
2812
  """Formats result status other than L{constants.RS_NORMAL}.
2813

2814
  @param status: The result status
2815
  @type verbose: boolean
2816
  @param verbose: Whether to return the verbose text
2817
  @return: Text of result status
2818

2819
  """
2820
  assert status != constants.RS_NORMAL, \
2821
         "FormatResultError called with status equal to constants.RS_NORMAL"
2822
  try:
2823
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2824
  except KeyError:
2825
    raise NotImplementedError("Unknown status %s" % status)
2826
  else:
2827
    if verbose:
2828
      return verbose_text
2829
    return normal_text
2830

    
2831

    
2832
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2833
                      header=False, verbose=False):
2834
  """Formats data in L{objects.QueryResponse}.
2835

2836
  @type result: L{objects.QueryResponse}
2837
  @param result: result of query operation
2838
  @type unit: string
2839
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2840
    see L{utils.text.FormatUnit}
2841
  @type format_override: dict
2842
  @param format_override: Dictionary for overriding field formatting functions,
2843
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2844
  @type separator: string or None
2845
  @param separator: String used to separate fields
2846
  @type header: bool
2847
  @param header: Whether to output header row
2848
  @type verbose: boolean
2849
  @param verbose: whether to use verbose field descriptions or not
2850

2851
  """
2852
  if unit is None:
2853
    if separator:
2854
      unit = "m"
2855
    else:
2856
      unit = "h"
2857

    
2858
  if format_override is None:
2859
    format_override = {}
2860

    
2861
  stats = dict.fromkeys(constants.RS_ALL, 0)
2862

    
2863
  def _RecordStatus(status):
2864
    if status in stats:
2865
      stats[status] += 1
2866

    
2867
  columns = []
2868
  for fdef in result.fields:
2869
    assert fdef.title and fdef.name
2870
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2871
    columns.append(TableColumn(fdef.title,
2872
                               _QueryColumnFormatter(fn, _RecordStatus,
2873
                                                     verbose),
2874
                               align_right))
2875

    
2876
  table = FormatTable(result.data, columns, header, separator)
2877

    
2878
  # Collect statistics
2879
  assert len(stats) == len(constants.RS_ALL)
2880
  assert compat.all(count >= 0 for count in stats.values())
2881

    
2882
  # Determine overall status. If there was no data, unknown fields must be
2883
  # detected via the field definitions.
2884
  if (stats[constants.RS_UNKNOWN] or
2885
      (not result.data and _GetUnknownFields(result.fields))):
2886
    status = QR_UNKNOWN
2887
  elif compat.any(count > 0 for key, count in stats.items()
2888
                  if key != constants.RS_NORMAL):
2889
    status = QR_INCOMPLETE
2890
  else:
2891
    status = QR_NORMAL
2892

    
2893
  return (status, table)
2894

    
2895

    
2896
def _GetUnknownFields(fdefs):
2897
  """Returns list of unknown fields included in C{fdefs}.
2898

2899
  @type fdefs: list of L{objects.QueryFieldDefinition}
2900

2901
  """
2902
  return [fdef for fdef in fdefs
2903
          if fdef.kind == constants.QFT_UNKNOWN]
2904

    
2905

    
2906
def _WarnUnknownFields(fdefs):
2907
  """Prints a warning to stderr if a query included unknown fields.
2908

2909
  @type fdefs: list of L{objects.QueryFieldDefinition}
2910

2911
  """
2912
  unknown = _GetUnknownFields(fdefs)
2913
  if unknown:
2914
    ToStderr("Warning: Queried for unknown fields %s",
2915
             utils.CommaJoin(fdef.name for fdef in unknown))
2916
    return True
2917

    
2918
  return False
2919

    
2920

    
2921
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2922
                format_override=None, verbose=False, force_filter=False,
2923
                namefield=None, qfilter=None, isnumeric=False):
2924
  """Generic implementation for listing all items of a resource.
2925

2926
  @param resource: One of L{constants.QR_VIA_LUXI}
2927
  @type fields: list of strings
2928
  @param fields: List of fields to query for
2929
  @type names: list of strings
2930
  @param names: Names of items to query for
2931
  @type unit: string or None
2932
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2933
    None for automatic choice (human-readable for non-separator usage,
2934
    otherwise megabytes); this is a one-letter string
2935
  @type separator: string or None
2936
  @param separator: String used to separate fields
2937
  @type header: bool
2938
  @param header: Whether to show header row
2939
  @type force_filter: bool
2940
  @param force_filter: Whether to always treat names as filter
2941
  @type format_override: dict
2942
  @param format_override: Dictionary for overriding field formatting functions,
2943
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2944
  @type verbose: boolean
2945
  @param verbose: whether to use verbose field descriptions or not
2946
  @type namefield: string
2947
  @param namefield: Name of field to use for simple filters (see
2948
    L{qlang.MakeFilter} for details)
2949
  @type qfilter: list or None
2950
  @param qfilter: Query filter (in addition to names)
2951
  @param isnumeric: bool
2952
  @param isnumeric: Whether the namefield's type is numeric, and therefore
2953
    any simple filters built by namefield should use integer values to
2954
    reflect that
2955

2956
  """
2957
  if not names:
2958
    names = None
2959

    
2960
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
2961
                                isnumeric=isnumeric)
2962

    
2963
  if qfilter is None:
2964
    qfilter = namefilter
2965
  elif namefilter is not None:
2966
    qfilter = [qlang.OP_AND, namefilter, qfilter]
2967

    
2968
  if cl is None:
2969
    cl = GetClient()
2970

    
2971
  response = cl.Query(resource, fields, qfilter)
2972

    
2973
  found_unknown = _WarnUnknownFields(response.fields)
2974

    
2975
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2976
                                     header=header,
2977
                                     format_override=format_override,
2978
                                     verbose=verbose)
2979

    
2980
  for line in data:
2981
    ToStdout(line)
2982

    
2983
  assert ((found_unknown and status == QR_UNKNOWN) or
2984
          (not found_unknown and status != QR_UNKNOWN))
2985

    
2986
  if status == QR_UNKNOWN:
2987
    return constants.EXIT_UNKNOWN_FIELD
2988

    
2989
  # TODO: Should the list command fail if not all data could be collected?
2990
  return constants.EXIT_SUCCESS
2991

    
2992

    
2993
def GenericListFields(resource, fields, separator, header, cl=None):
2994
  """Generic implementation for listing fields for a resource.
2995

2996
  @param resource: One of L{constants.QR_VIA_LUXI}
2997
  @type fields: list of strings
2998
  @param fields: List of fields to query for
2999
  @type separator: string or None
3000
  @param separator: String used to separate fields
3001
  @type header: bool
3002
  @param header: Whether to show header row
3003

3004
  """
3005
  if cl is None:
3006
    cl = GetClient()
3007

    
3008
  if not fields:
3009
    fields = None
3010

    
3011
  response = cl.QueryFields(resource, fields)
3012

    
3013
  found_unknown = _WarnUnknownFields(response.fields)
3014

    
3015
  columns = [
3016
    TableColumn("Name", str, False),
3017
    TableColumn("Title", str, False),
3018
    TableColumn("Description", str, False),
3019
    ]
3020

    
3021
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
3022

    
3023
  for line in FormatTable(rows, columns, header, separator):
3024
    ToStdout(line)
3025

    
3026
  if found_unknown:
3027
    return constants.EXIT_UNKNOWN_FIELD
3028

    
3029
  return constants.EXIT_SUCCESS
3030

    
3031

    
3032
class TableColumn:
3033
  """Describes a column for L{FormatTable}.
3034

3035
  """
3036
  def __init__(self, title, fn, align_right):
3037
    """Initializes this class.
3038

3039
    @type title: string
3040
    @param title: Column title
3041
    @type fn: callable
3042
    @param fn: Formatting function
3043
    @type align_right: bool
3044
    @param align_right: Whether to align values on the right-hand side
3045

3046
    """
3047
    self.title = title
3048
    self.format = fn
3049
    self.align_right = align_right
3050

    
3051

    
3052
def _GetColFormatString(width, align_right):
3053
  """Returns the format string for a field.
3054

3055
  """
3056
  if align_right:
3057
    sign = ""
3058
  else:
3059
    sign = "-"
3060

    
3061
  return "%%%s%ss" % (sign, width)
3062

    
3063

    
3064
def FormatTable(rows, columns, header, separator):
3065
  """Formats data as a table.
3066

3067
  @type rows: list of lists
3068
  @param rows: Row data, one list per row
3069
  @type columns: list of L{TableColumn}
3070
  @param columns: Column descriptions
3071
  @type header: bool
3072
  @param header: Whether to show header row
3073
  @type separator: string or None
3074
  @param separator: String used to separate columns
3075

3076
  """
3077
  if header:
3078
    data = [[col.title for col in columns]]
3079
    colwidth = [len(col.title) for col in columns]
3080
  else:
3081
    data = []
3082
    colwidth = [0 for _ in columns]
3083

    
3084
  # Format row data
3085
  for row in rows:
3086
    assert len(row) == len(columns)
3087

    
3088
    formatted = [col.format(value) for value, col in zip(row, columns)]
3089

    
3090
    if separator is None:
3091
      # Update column widths
3092
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3093
        # Modifying a list's items while iterating is fine
3094
        colwidth[idx] = max(oldwidth, len(value))
3095

    
3096
    data.append(formatted)
3097

    
3098
  if separator is not None:
3099
    # Return early if a separator is used
3100
    return [separator.join(row) for row in data]
3101

    
3102
  if columns and not columns[-1].align_right:
3103
    # Avoid unnecessary spaces at end of line
3104
    colwidth[-1] = 0
3105

    
3106
  # Build format string
3107
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3108
                  for col, width in zip(columns, colwidth)])
3109

    
3110
  return [fmt % tuple(row) for row in data]
3111

    
3112

    
3113
def FormatTimestamp(ts):
3114
  """Formats a given timestamp.
3115

3116
  @type ts: timestamp
3117
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3118

3119
  @rtype: string
3120
  @return: a string with the formatted timestamp
3121

3122
  """
3123
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3124
    return "?"
3125

    
3126
  (sec, usecs) = ts
3127
  return utils.FormatTime(sec, usecs=usecs)
3128

    
3129

    
3130
def ParseTimespec(value):
3131
  """Parse a time specification.
3132

3133
  The following suffixed will be recognized:
3134

3135
    - s: seconds
3136
    - m: minutes
3137
    - h: hours
3138
    - d: day
3139
    - w: weeks
3140

3141
  Without any suffix, the value will be taken to be in seconds.
3142

3143
  """
3144
  value = str(value)
3145
  if not value:
3146
    raise errors.OpPrereqError("Empty time specification passed",
3147
                               errors.ECODE_INVAL)
3148
  suffix_map = {
3149
    "s": 1,
3150
    "m": 60,
3151
    "h": 3600,
3152
    "d": 86400,
3153
    "w": 604800,
3154
    }
3155
  if value[-1] not in suffix_map:
3156
    try:
3157
      value = int(value)
3158
    except (TypeError, ValueError):
3159
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3160
                                 errors.ECODE_INVAL)
3161
  else:
3162
    multiplier = suffix_map[value[-1]]
3163
    value = value[:-1]
3164
    if not value: # no data left after stripping the suffix
3165
      raise errors.OpPrereqError("Invalid time specification (only"
3166
                                 " suffix passed)", errors.ECODE_INVAL)
3167
    try:
3168
      value = int(value) * multiplier
3169
    except (TypeError, ValueError):
3170
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3171
                                 errors.ECODE_INVAL)
3172
  return value
3173

    
3174

    
3175
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3176
                   filter_master=False, nodegroup=None):
3177
  """Returns the names of online nodes.
3178

3179
  This function will also log a warning on stderr with the names of
3180
  the online nodes.
3181

3182
  @param nodes: if not empty, use only this subset of nodes (minus the
3183
      offline ones)
3184
  @param cl: if not None, luxi client to use
3185
  @type nowarn: boolean
3186
  @param nowarn: by default, this function will output a note with the
3187
      offline nodes that are skipped; if this parameter is True the
3188
      note is not displayed
3189
  @type secondary_ips: boolean
3190
  @param secondary_ips: if True, return the secondary IPs instead of the
3191
      names, useful for doing network traffic over the replication interface
3192
      (if any)
3193
  @type filter_master: boolean
3194
  @param filter_master: if True, do not return the master node in the list
3195
      (useful in coordination with secondary_ips where we cannot check our
3196
      node name against the list)
3197
  @type nodegroup: string
3198
  @param nodegroup: If set, only return nodes in this node group
3199

3200
  """
3201
  if cl is None:
3202
    cl = GetClient()
3203

    
3204
  qfilter = []
3205

    
3206
  if nodes:
3207
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3208

    
3209
  if nodegroup is not None:
3210
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3211
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3212

    
3213
  if filter_master:
3214
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3215

    
3216
  if qfilter:
3217
    if len(qfilter) > 1:
3218
      final_filter = [qlang.OP_AND] + qfilter
3219
    else:
3220
      assert len(qfilter) == 1
3221
      final_filter = qfilter[0]
3222
  else:
3223
    final_filter = None
3224

    
3225
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3226

    
3227
  def _IsOffline(row):
3228
    (_, (_, offline), _) = row
3229
    return offline
3230

    
3231
  def _GetName(row):
3232
    ((_, name), _, _) = row
3233
    return name
3234

    
3235
  def _GetSip(row):
3236
    (_, _, (_, sip)) = row
3237
    return sip
3238

    
3239
  (offline, online) = compat.partition(result.data, _IsOffline)
3240

    
3241
  if offline and not nowarn:
3242
    ToStderr("Note: skipping offline node(s): %s" %
3243
             utils.CommaJoin(map(_GetName, offline)))
3244

    
3245
  if secondary_ips:
3246
    fn = _GetSip
3247
  else:
3248
    fn = _GetName
3249

    
3250
  return map(fn, online)
3251

    
3252

    
3253
def _ToStream(stream, txt, *args):
3254
  """Write a message to a stream, bypassing the logging system
3255

3256
  @type stream: file object
3257
  @param stream: the file to which we should write
3258
  @type txt: str
3259
  @param txt: the message
3260

3261
  """
3262
  try:
3263
    if args:
3264
      args = tuple(args)
3265
      stream.write(txt % args)
3266
    else:
3267
      stream.write(txt)
3268
    stream.write("\n")
3269
    stream.flush()
3270
  except IOError, err:
3271
    if err.errno == errno.EPIPE:
3272
      # our terminal went away, we'll exit
3273
      sys.exit(constants.EXIT_FAILURE)
3274
    else:
3275
      raise
3276

    
3277

    
3278
def ToStdout(txt, *args):
3279
  """Write a message to stdout only, bypassing the logging system
3280

3281
  This is just a wrapper over _ToStream.
3282

3283
  @type txt: str
3284
  @param txt: the message
3285

3286
  """
3287
  _ToStream(sys.stdout, txt, *args)
3288

    
3289

    
3290
def ToStderr(txt, *args):
3291
  """Write a message to stderr only, bypassing the logging system
3292

3293
  This is just a wrapper over _ToStream.
3294

3295
  @type txt: str
3296
  @param txt: the message
3297

3298
  """
3299
  _ToStream(sys.stderr, txt, *args)
3300

    
3301

    
3302
class JobExecutor(object):
3303
  """Class which manages the submission and execution of multiple jobs.
3304

3305
  Note that instances of this class should not be reused between
3306
  GetResults() calls.
3307

3308
  """
3309
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3310
    self.queue = []
3311
    if cl is None:
3312
      cl = GetClient()
3313
    self.cl = cl
3314
    self.verbose = verbose
3315
    self.jobs = []
3316
    self.opts = opts
3317
    self.feedback_fn = feedback_fn
3318
    self._counter = itertools.count()
3319

    
3320
  @staticmethod
3321
  def _IfName(name, fmt):
3322
    """Helper function for formatting name.
3323

3324
    """
3325
    if name:
3326
      return fmt % name
3327

    
3328
    return ""
3329

    
3330
  def QueueJob(self, name, *ops):
3331
    """Record a job for later submit.
3332

3333
    @type name: string
3334
    @param name: a description of the job, will be used in WaitJobSet
3335

3336
    """
3337
    SetGenericOpcodeOpts(ops, self.opts)
3338
    self.queue.append((self._counter.next(), name, ops))
3339

    
3340
  def AddJobId(self, name, status, job_id):
3341
    """Adds a job ID to the internal queue.
3342

3343
    """
3344
    self.jobs.append((self._counter.next(), status, job_id, name))
3345

    
3346
  def SubmitPending(self, each=False):
3347
    """Submit all pending jobs.
3348

3349
    """
3350
    if each:
3351
      results = []
3352
      for (_, _, ops) in self.queue:
3353
        # SubmitJob will remove the success status, but raise an exception if
3354
        # the submission fails, so we'll notice that anyway.
3355
        results.append([True, self.cl.SubmitJob(ops)[0]])
3356
    else:
3357
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3358
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3359
      self.jobs.append((idx, status, data, name))
3360

    
3361
  def _ChooseJob(self):
3362
    """Choose a non-waiting/queued job to poll next.
3363

3364
    """
3365
    assert self.jobs, "_ChooseJob called with empty job list"
3366

    
3367
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3368
                               ["status"])
3369
    assert result
3370

    
3371
    for job_data, status in zip(self.jobs, result):
3372
      if (isinstance(status, list) and status and
3373
          status[0] in (constants.JOB_STATUS_QUEUED,
3374
                        constants.JOB_STATUS_WAITING,
3375
                        constants.JOB_STATUS_CANCELING)):
3376
        # job is still present and waiting
3377
        continue
3378
      # good candidate found (either running job or lost job)
3379
      self.jobs.remove(job_data)
3380
      return job_data
3381

    
3382
    # no job found
3383
    return self.jobs.pop(0)
3384

    
3385
  def GetResults(self):
3386
    """Wait for and return the results of all jobs.
3387

3388
    @rtype: list
3389
    @return: list of tuples (success, job results), in the same order
3390
        as the submitted jobs; if a job has failed, instead of the result
3391
        there will be the error message
3392

3393
    """
3394
    if not self.jobs:
3395
      self.SubmitPending()
3396
    results = []
3397
    if self.verbose:
3398
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3399
      if ok_jobs:
3400
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3401

    
3402
    # first, remove any non-submitted jobs
3403
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3404
    for idx, _, jid, name in failures:
3405
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3406
      results.append((idx, False, jid))
3407

    
3408
    while self.jobs:
3409
      (idx, _, jid, name) = self._ChooseJob()
3410
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3411
      try:
3412
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3413
        success = True
3414
      except errors.JobLost, err:
3415
        _, job_result = FormatError(err)
3416
        ToStderr("Job %s%s has been archived, cannot check its result",
3417
                 jid, self._IfName(name, " for %s"))
3418
        success = False
3419
      except (errors.GenericError, luxi.ProtocolError), err:
3420
        _, job_result = FormatError(err)
3421
        success = False
3422
        # the error message will always be shown, verbose or not
3423
        ToStderr("Job %s%s has failed: %s",
3424
                 jid, self._IfName(name, " for %s"), job_result)
3425

    
3426
      results.append((idx, success, job_result))
3427

    
3428
    # sort based on the index, then drop it
3429
    results.sort()
3430
    results = [i[1:] for i in results]
3431

    
3432
    return results
3433

    
3434
  def WaitOrShow(self, wait):
3435
    """Wait for job results or only print the job IDs.
3436

3437
    @type wait: boolean
3438
    @param wait: whether to wait or not
3439

3440
    """
3441
    if wait:
3442
      return self.GetResults()
3443
    else:
3444
      if not self.jobs:
3445
        self.SubmitPending()
3446
      for _, status, result, name in self.jobs:
3447
        if status:
3448
          ToStdout("%s: %s", result, name)
3449
        else:
3450
          ToStderr("Failure for %s: %s", name, result)
3451
      return [row[1:3] for row in self.jobs]
3452

    
3453

    
3454
def FormatParameterDict(buf, param_dict, actual, level=1):
3455
  """Formats a parameter dictionary.
3456

3457
  @type buf: L{StringIO}
3458
  @param buf: the buffer into which to write
3459
  @type param_dict: dict
3460
  @param param_dict: the own parameters
3461
  @type actual: dict
3462
  @param actual: the current parameter set (including defaults)
3463
  @param level: Level of indent
3464

3465
  """
3466
  indent = "  " * level
3467

    
3468
  for key in sorted(actual):
3469
    data = actual[key]
3470
    buf.write("%s- %s:" % (indent, key))
3471

    
3472
    if isinstance(data, dict) and data:
3473
      buf.write("\n")
3474
      FormatParameterDict(buf, param_dict.get(key, {}), data,
3475
                          level=level + 1)
3476
    else:
3477
      val = param_dict.get(key, "default (%s)" % data)
3478
      buf.write(" %s\n" % val)
3479

    
3480

    
3481
def ConfirmOperation(names, list_type, text, extra=""):
3482
  """Ask the user to confirm an operation on a list of list_type.
3483

3484
  This function is used to request confirmation for doing an operation
3485
  on a given list of list_type.
3486

3487
  @type names: list
3488
  @param names: the list of names that we display when
3489
      we ask for confirmation
3490
  @type list_type: str
3491
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3492
  @type text: str
3493
  @param text: the operation that the user should confirm
3494
  @rtype: boolean
3495
  @return: True or False depending on user's confirmation.
3496

3497
  """
3498
  count = len(names)
3499
  msg = ("The %s will operate on %d %s.\n%s"
3500
         "Do you want to continue?" % (text, count, list_type, extra))
3501
  affected = (("\nAffected %s:\n" % list_type) +
3502
              "\n".join(["  %s" % name for name in names]))
3503

    
3504
  choices = [("y", True, "Yes, execute the %s" % text),
3505
             ("n", False, "No, abort the %s" % text)]
3506

    
3507
  if count > 20:
3508
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3509
    question = msg
3510
  else:
3511
    question = msg + affected
3512

    
3513
  choice = AskUser(question, choices)
3514
  if choice == "v":
3515
    choices.pop(1)
3516
    choice = AskUser(msg + affected, choices)
3517
  return choice
3518

    
3519

    
3520
def _MaybeParseUnit(elements):
3521
  """Parses and returns an array of potential values with units.
3522

3523
  """
3524
  parsed = {}
3525
  for k, v in elements.items():
3526
    if v == constants.VALUE_DEFAULT:
3527
      parsed[k] = v
3528
    else:
3529
      parsed[k] = utils.ParseUnit(v)
3530
  return parsed
3531

    
3532

    
3533
def CreateIPolicyFromOpts(ispecs_mem_size=None,
3534
                          ispecs_cpu_count=None,
3535
                          ispecs_disk_count=None,
3536
                          ispecs_disk_size=None,
3537
                          ispecs_nic_count=None,
3538
                          ipolicy_disk_templates=None,
3539
                          ipolicy_vcpu_ratio=None,
3540
                          ipolicy_spindle_ratio=None,
3541
                          group_ipolicy=False,
3542
                          allowed_values=None,
3543
                          fill_all=False):
3544
  """Creation of instance policy based on command line options.
3545

3546
  @param fill_all: whether for cluster policies we should ensure that
3547
    all values are filled
3548

3549

3550
  """
3551
  try:
3552
    if ispecs_mem_size:
3553
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3554
    if ispecs_disk_size:
3555
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3556
  except (TypeError, ValueError, errors.UnitParseError), err:
3557
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3558
                               " in policy: %s" %
3559
                               (ispecs_disk_size, ispecs_mem_size, err),
3560
                               errors.ECODE_INVAL)
3561

    
3562
  # prepare ipolicy dict
3563
  ipolicy_transposed = {
3564
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3565
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3566
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3567
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3568
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3569
    }
3570

    
3571
  # first, check that the values given are correct
3572
  if group_ipolicy:
3573
    forced_type = TISPECS_GROUP_TYPES
3574
  else:
3575
    forced_type = TISPECS_CLUSTER_TYPES
3576

    
3577
  for specs in ipolicy_transposed.values():
3578
    utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3579

    
3580
  # then transpose
3581
  ipolicy_out = objects.MakeEmptyIPolicy()
3582
  for name, specs in ipolicy_transposed.iteritems():
3583
    assert name in constants.ISPECS_PARAMETERS
3584
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3585
      ipolicy_out[key][name] = val
3586

    
3587
  # no filldict for non-dicts
3588
  if not group_ipolicy and fill_all:
3589
    if ipolicy_disk_templates is None:
3590
      ipolicy_disk_templates = constants.DISK_TEMPLATES
3591
    if ipolicy_vcpu_ratio is None:
3592
      ipolicy_vcpu_ratio = \
3593
        constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
3594
    if ipolicy_spindle_ratio is None:
3595
      ipolicy_spindle_ratio = \
3596
        constants.IPOLICY_DEFAULTS[constants.IPOLICY_SPINDLE_RATIO]
3597
  if ipolicy_disk_templates is not None:
3598
    ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3599
  if ipolicy_vcpu_ratio is not None:
3600
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3601
  if ipolicy_spindle_ratio is not None:
3602
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
3603

    
3604
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
3605

    
3606
  return ipolicy_out