Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 5ae4945a

History | View | Annotate | Download (116.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47

    
48
from optparse import (OptionParser, TitledHelpFormatter,
49
                      Option, OptionValueError)
50

    
51

    
52
__all__ = [
53
  # Command line options
54
  "ABSOLUTE_OPT",
55
  "ADD_UIDS_OPT",
56
  "ALLOCATABLE_OPT",
57
  "ALLOC_POLICY_OPT",
58
  "ALL_OPT",
59
  "ALLOW_FAILOVER_OPT",
60
  "AUTO_PROMOTE_OPT",
61
  "AUTO_REPLACE_OPT",
62
  "BACKEND_OPT",
63
  "BLK_OS_OPT",
64
  "CAPAB_MASTER_OPT",
65
  "CAPAB_VM_OPT",
66
  "CLEANUP_OPT",
67
  "CLUSTER_DOMAIN_SECRET_OPT",
68
  "CONFIRM_OPT",
69
  "CP_SIZE_OPT",
70
  "DEBUG_OPT",
71
  "DEBUG_SIMERR_OPT",
72
  "DISKIDX_OPT",
73
  "DISK_OPT",
74
  "DISK_PARAMS_OPT",
75
  "DISK_TEMPLATE_OPT",
76
  "DRAINED_OPT",
77
  "DRY_RUN_OPT",
78
  "DRBD_HELPER_OPT",
79
  "DST_NODE_OPT",
80
  "EARLY_RELEASE_OPT",
81
  "ENABLED_HV_OPT",
82
  "ERROR_CODES_OPT",
83
  "FIELDS_OPT",
84
  "FILESTORE_DIR_OPT",
85
  "FILESTORE_DRIVER_OPT",
86
  "FORCE_FILTER_OPT",
87
  "FORCE_OPT",
88
  "FORCE_VARIANT_OPT",
89
  "GLOBAL_FILEDIR_OPT",
90
  "HID_OS_OPT",
91
  "GLOBAL_SHARED_FILEDIR_OPT",
92
  "HVLIST_OPT",
93
  "HVOPTS_OPT",
94
  "HYPERVISOR_OPT",
95
  "IALLOCATOR_OPT",
96
  "DEFAULT_IALLOCATOR_OPT",
97
  "IDENTIFY_DEFAULTS_OPT",
98
  "IGNORE_CONSIST_OPT",
99
  "IGNORE_ERRORS_OPT",
100
  "IGNORE_FAILURES_OPT",
101
  "IGNORE_OFFLINE_OPT",
102
  "IGNORE_REMOVE_FAILURES_OPT",
103
  "IGNORE_SECONDARIES_OPT",
104
  "IGNORE_SIZE_OPT",
105
  "INTERVAL_OPT",
106
  "MAC_PREFIX_OPT",
107
  "MAINTAIN_NODE_HEALTH_OPT",
108
  "MASTER_NETDEV_OPT",
109
  "MASTER_NETMASK_OPT",
110
  "MC_OPT",
111
  "MIGRATION_MODE_OPT",
112
  "NET_OPT",
113
  "NEW_CLUSTER_CERT_OPT",
114
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
115
  "NEW_CONFD_HMAC_KEY_OPT",
116
  "NEW_RAPI_CERT_OPT",
117
  "NEW_SECONDARY_OPT",
118
  "NEW_SPICE_CERT_OPT",
119
  "NIC_PARAMS_OPT",
120
  "NODE_FORCE_JOIN_OPT",
121
  "NODE_LIST_OPT",
122
  "NODE_PLACEMENT_OPT",
123
  "NODEGROUP_OPT",
124
  "NODE_PARAMS_OPT",
125
  "NODE_POWERED_OPT",
126
  "NODRBD_STORAGE_OPT",
127
  "NOHDR_OPT",
128
  "NOIPCHECK_OPT",
129
  "NO_INSTALL_OPT",
130
  "NONAMECHECK_OPT",
131
  "NOLVM_STORAGE_OPT",
132
  "NOMODIFY_ETCHOSTS_OPT",
133
  "NOMODIFY_SSH_SETUP_OPT",
134
  "NONICS_OPT",
135
  "NONLIVE_OPT",
136
  "NONPLUS1_OPT",
137
  "NORUNTIME_CHGS_OPT",
138
  "NOSHUTDOWN_OPT",
139
  "NOSTART_OPT",
140
  "NOSSH_KEYCHECK_OPT",
141
  "NOVOTING_OPT",
142
  "NO_REMEMBER_OPT",
143
  "NWSYNC_OPT",
144
  "OFFLINE_INST_OPT",
145
  "ONLINE_INST_OPT",
146
  "ON_PRIMARY_OPT",
147
  "ON_SECONDARY_OPT",
148
  "OFFLINE_OPT",
149
  "OSPARAMS_OPT",
150
  "OS_OPT",
151
  "OS_SIZE_OPT",
152
  "OOB_TIMEOUT_OPT",
153
  "POWER_DELAY_OPT",
154
  "PREALLOC_WIPE_DISKS_OPT",
155
  "PRIMARY_IP_VERSION_OPT",
156
  "PRIMARY_ONLY_OPT",
157
  "PRIORITY_OPT",
158
  "RAPI_CERT_OPT",
159
  "READD_OPT",
160
  "REBOOT_TYPE_OPT",
161
  "REMOVE_INSTANCE_OPT",
162
  "REMOVE_UIDS_OPT",
163
  "RESERVED_LVS_OPT",
164
  "RUNTIME_MEM_OPT",
165
  "ROMAN_OPT",
166
  "SECONDARY_IP_OPT",
167
  "SECONDARY_ONLY_OPT",
168
  "SELECT_OS_OPT",
169
  "SEP_OPT",
170
  "SHOWCMD_OPT",
171
  "SHUTDOWN_TIMEOUT_OPT",
172
  "SINGLE_NODE_OPT",
173
  "SPECS_CPU_COUNT_OPT",
174
  "SPECS_DISK_COUNT_OPT",
175
  "SPECS_DISK_SIZE_OPT",
176
  "SPECS_MEM_SIZE_OPT",
177
  "SPECS_NIC_COUNT_OPT",
178
  "IPOLICY_DISK_TEMPLATES",
179
  "IPOLICY_VCPU_RATIO",
180
  "SPICE_CACERT_OPT",
181
  "SPICE_CERT_OPT",
182
  "SRC_DIR_OPT",
183
  "SRC_NODE_OPT",
184
  "SUBMIT_OPT",
185
  "STARTUP_PAUSED_OPT",
186
  "STATIC_OPT",
187
  "SYNC_OPT",
188
  "TAG_ADD_OPT",
189
  "TAG_SRC_OPT",
190
  "TIMEOUT_OPT",
191
  "TO_GROUP_OPT",
192
  "UIDPOOL_OPT",
193
  "USEUNITS_OPT",
194
  "USE_EXTERNAL_MIP_SCRIPT",
195
  "USE_REPL_NET_OPT",
196
  "VERBOSE_OPT",
197
  "VG_NAME_OPT",
198
  "WFSYNC_OPT",
199
  "YES_DOIT_OPT",
200
  "DISK_STATE_OPT",
201
  "HV_STATE_OPT",
202
  "IGNORE_IPOLICY_OPT",
203
  "INSTANCE_POLICY_OPTS",
204
  # Generic functions for CLI programs
205
  "ConfirmOperation",
206
  "CreateIPolicyFromOpts",
207
  "GenericMain",
208
  "GenericInstanceCreate",
209
  "GenericList",
210
  "GenericListFields",
211
  "GetClient",
212
  "GetOnlineNodes",
213
  "JobExecutor",
214
  "JobSubmittedException",
215
  "ParseTimespec",
216
  "RunWhileClusterStopped",
217
  "SubmitOpCode",
218
  "SubmitOrSend",
219
  "UsesRPC",
220
  # Formatting functions
221
  "ToStderr", "ToStdout",
222
  "FormatError",
223
  "FormatQueryResult",
224
  "FormatParameterDict",
225
  "GenerateTable",
226
  "AskUser",
227
  "FormatTimestamp",
228
  "FormatLogMessage",
229
  # Tags functions
230
  "ListTags",
231
  "AddTags",
232
  "RemoveTags",
233
  # command line options support infrastructure
234
  "ARGS_MANY_INSTANCES",
235
  "ARGS_MANY_NODES",
236
  "ARGS_MANY_GROUPS",
237
  "ARGS_NONE",
238
  "ARGS_ONE_INSTANCE",
239
  "ARGS_ONE_NODE",
240
  "ARGS_ONE_GROUP",
241
  "ARGS_ONE_OS",
242
  "ArgChoice",
243
  "ArgCommand",
244
  "ArgFile",
245
  "ArgGroup",
246
  "ArgHost",
247
  "ArgInstance",
248
  "ArgJobId",
249
  "ArgNode",
250
  "ArgOs",
251
  "ArgSuggest",
252
  "ArgUnknown",
253
  "OPT_COMPL_INST_ADD_NODES",
254
  "OPT_COMPL_MANY_NODES",
255
  "OPT_COMPL_ONE_IALLOCATOR",
256
  "OPT_COMPL_ONE_INSTANCE",
257
  "OPT_COMPL_ONE_NODE",
258
  "OPT_COMPL_ONE_NODEGROUP",
259
  "OPT_COMPL_ONE_OS",
260
  "cli_option",
261
  "SplitNodeOption",
262
  "CalculateOSNames",
263
  "ParseFields",
264
  "COMMON_CREATE_OPTS",
265
  ]
266

    
267
NO_PREFIX = "no_"
268
UN_PREFIX = "-"
269

    
270
#: Priorities (sorted)
271
_PRIORITY_NAMES = [
272
  ("low", constants.OP_PRIO_LOW),
273
  ("normal", constants.OP_PRIO_NORMAL),
274
  ("high", constants.OP_PRIO_HIGH),
275
  ]
276

    
277
#: Priority dictionary for easier lookup
278
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
279
# we migrate to Python 2.6
280
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
281

    
282
# Query result status for clients
283
(QR_NORMAL,
284
 QR_UNKNOWN,
285
 QR_INCOMPLETE) = range(3)
286

    
287
#: Maximum batch size for ChooseJob
288
_CHOOSE_BATCH = 25
289

    
290

    
291
# constants used to create InstancePolicy dictionary
292
TISPECS_GROUP_TYPES = {
293
  constants.ISPECS_MIN: constants.VTYPE_INT,
294
  constants.ISPECS_MAX: constants.VTYPE_INT,
295
  }
296

    
297
TISPECS_CLUSTER_TYPES = {
298
  constants.ISPECS_MIN: constants.VTYPE_INT,
299
  constants.ISPECS_MAX: constants.VTYPE_INT,
300
  constants.ISPECS_STD: constants.VTYPE_INT,
301
  }
302

    
303

    
304
class _Argument:
305
  def __init__(self, min=0, max=None): # pylint: disable=W0622
306
    self.min = min
307
    self.max = max
308

    
309
  def __repr__(self):
310
    return ("<%s min=%s max=%s>" %
311
            (self.__class__.__name__, self.min, self.max))
312

    
313

    
314
class ArgSuggest(_Argument):
315
  """Suggesting argument.
316

317
  Value can be any of the ones passed to the constructor.
318

319
  """
320
  # pylint: disable=W0622
321
  def __init__(self, min=0, max=None, choices=None):
322
    _Argument.__init__(self, min=min, max=max)
323
    self.choices = choices
324

    
325
  def __repr__(self):
326
    return ("<%s min=%s max=%s choices=%r>" %
327
            (self.__class__.__name__, self.min, self.max, self.choices))
328

    
329

    
330
class ArgChoice(ArgSuggest):
331
  """Choice argument.
332

333
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
334
  but value must be one of the choices.
335

336
  """
337

    
338

    
339
class ArgUnknown(_Argument):
340
  """Unknown argument to program (e.g. determined at runtime).
341

342
  """
343

    
344

    
345
class ArgInstance(_Argument):
346
  """Instances argument.
347

348
  """
349

    
350

    
351
class ArgNode(_Argument):
352
  """Node argument.
353

354
  """
355

    
356

    
357
class ArgGroup(_Argument):
358
  """Node group argument.
359

360
  """
361

    
362

    
363
class ArgJobId(_Argument):
364
  """Job ID argument.
365

366
  """
367

    
368

    
369
class ArgFile(_Argument):
370
  """File path argument.
371

372
  """
373

    
374

    
375
class ArgCommand(_Argument):
376
  """Command argument.
377

378
  """
379

    
380

    
381
class ArgHost(_Argument):
382
  """Host argument.
383

384
  """
385

    
386

    
387
class ArgOs(_Argument):
388
  """OS argument.
389

390
  """
391

    
392

    
393
ARGS_NONE = []
394
ARGS_MANY_INSTANCES = [ArgInstance()]
395
ARGS_MANY_NODES = [ArgNode()]
396
ARGS_MANY_GROUPS = [ArgGroup()]
397
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
398
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
399
# TODO
400
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
401
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
402

    
403

    
404
def _ExtractTagsObject(opts, args):
405
  """Extract the tag type object.
406

407
  Note that this function will modify its args parameter.
408

409
  """
410
  if not hasattr(opts, "tag_type"):
411
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
412
  kind = opts.tag_type
413
  if kind == constants.TAG_CLUSTER:
414
    retval = kind, kind
415
  elif kind in (constants.TAG_NODEGROUP,
416
                constants.TAG_NODE,
417
                constants.TAG_INSTANCE):
418
    if not args:
419
      raise errors.OpPrereqError("no arguments passed to the command",
420
                                 errors.ECODE_INVAL)
421
    name = args.pop(0)
422
    retval = kind, name
423
  else:
424
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
425
  return retval
426

    
427

    
428
def _ExtendTags(opts, args):
429
  """Extend the args if a source file has been given.
430

431
  This function will extend the tags with the contents of the file
432
  passed in the 'tags_source' attribute of the opts parameter. A file
433
  named '-' will be replaced by stdin.
434

435
  """
436
  fname = opts.tags_source
437
  if fname is None:
438
    return
439
  if fname == "-":
440
    new_fh = sys.stdin
441
  else:
442
    new_fh = open(fname, "r")
443
  new_data = []
444
  try:
445
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
446
    # because of python bug 1633941
447
    while True:
448
      line = new_fh.readline()
449
      if not line:
450
        break
451
      new_data.append(line.strip())
452
  finally:
453
    new_fh.close()
454
  args.extend(new_data)
455

    
456

    
457
def ListTags(opts, args):
458
  """List the tags on a given object.
459

460
  This is a generic implementation that knows how to deal with all
461
  three cases of tag objects (cluster, node, instance). The opts
462
  argument is expected to contain a tag_type field denoting what
463
  object type we work on.
464

465
  """
466
  kind, name = _ExtractTagsObject(opts, args)
467
  cl = GetClient()
468
  result = cl.QueryTags(kind, name)
469
  result = list(result)
470
  result.sort()
471
  for tag in result:
472
    ToStdout(tag)
473

    
474

    
475
def AddTags(opts, args):
476
  """Add tags on a given object.
477

478
  This is a generic implementation that knows how to deal with all
479
  three cases of tag objects (cluster, node, instance). The opts
480
  argument is expected to contain a tag_type field denoting what
481
  object type we work on.
482

483
  """
484
  kind, name = _ExtractTagsObject(opts, args)
485
  _ExtendTags(opts, args)
486
  if not args:
487
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
488
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
489
  SubmitOrSend(op, opts)
490

    
491

    
492
def RemoveTags(opts, args):
493
  """Remove tags from a given object.
494

495
  This is a generic implementation that knows how to deal with all
496
  three cases of tag objects (cluster, node, instance). The opts
497
  argument is expected to contain a tag_type field denoting what
498
  object type we work on.
499

500
  """
501
  kind, name = _ExtractTagsObject(opts, args)
502
  _ExtendTags(opts, args)
503
  if not args:
504
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
505
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
506
  SubmitOrSend(op, opts)
507

    
508

    
509
def check_unit(option, opt, value): # pylint: disable=W0613
510
  """OptParsers custom converter for units.
511

512
  """
513
  try:
514
    return utils.ParseUnit(value)
515
  except errors.UnitParseError, err:
516
    raise OptionValueError("option %s: %s" % (opt, err))
517

    
518

    
519
def _SplitKeyVal(opt, data):
520
  """Convert a KeyVal string into a dict.
521

522
  This function will convert a key=val[,...] string into a dict. Empty
523
  values will be converted specially: keys which have the prefix 'no_'
524
  will have the value=False and the prefix stripped, the others will
525
  have value=True.
526

527
  @type opt: string
528
  @param opt: a string holding the option name for which we process the
529
      data, used in building error messages
530
  @type data: string
531
  @param data: a string of the format key=val,key=val,...
532
  @rtype: dict
533
  @return: {key=val, key=val}
534
  @raises errors.ParameterError: if there are duplicate keys
535

536
  """
537
  kv_dict = {}
538
  if data:
539
    for elem in utils.UnescapeAndSplit(data, sep=","):
540
      if "=" in elem:
541
        key, val = elem.split("=", 1)
542
      else:
543
        if elem.startswith(NO_PREFIX):
544
          key, val = elem[len(NO_PREFIX):], False
545
        elif elem.startswith(UN_PREFIX):
546
          key, val = elem[len(UN_PREFIX):], None
547
        else:
548
          key, val = elem, True
549
      if key in kv_dict:
550
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
551
                                    (key, opt))
552
      kv_dict[key] = val
553
  return kv_dict
554

    
555

    
556
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
557
  """Custom parser for ident:key=val,key=val options.
558

559
  This will store the parsed values as a tuple (ident, {key: val}). As such,
560
  multiple uses of this option via action=append is possible.
561

562
  """
563
  if ":" not in value:
564
    ident, rest = value, ""
565
  else:
566
    ident, rest = value.split(":", 1)
567

    
568
  if ident.startswith(NO_PREFIX):
569
    if rest:
570
      msg = "Cannot pass options when removing parameter groups: %s" % value
571
      raise errors.ParameterError(msg)
572
    retval = (ident[len(NO_PREFIX):], False)
573
  elif (ident.startswith(UN_PREFIX) and
574
        (len(ident) <= len(UN_PREFIX) or
575
         not ident[len(UN_PREFIX)][0].isdigit())):
576
    if rest:
577
      msg = "Cannot pass options when removing parameter groups: %s" % value
578
      raise errors.ParameterError(msg)
579
    retval = (ident[len(UN_PREFIX):], None)
580
  else:
581
    kv_dict = _SplitKeyVal(opt, rest)
582
    retval = (ident, kv_dict)
583
  return retval
584

    
585

    
586
def check_key_val(option, opt, value):  # pylint: disable=W0613
587
  """Custom parser class for key=val,key=val options.
588

589
  This will store the parsed values as a dict {key: val}.
590

591
  """
592
  return _SplitKeyVal(opt, value)
593

    
594

    
595
def check_bool(option, opt, value): # pylint: disable=W0613
596
  """Custom parser for yes/no options.
597

598
  This will store the parsed value as either True or False.
599

600
  """
601
  value = value.lower()
602
  if value == constants.VALUE_FALSE or value == "no":
603
    return False
604
  elif value == constants.VALUE_TRUE or value == "yes":
605
    return True
606
  else:
607
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
608

    
609

    
610
def check_list(option, opt, value): # pylint: disable=W0613
611
  """Custom parser for comma-separated lists.
612

613
  """
614
  # we have to make this explicit check since "".split(",") is [""],
615
  # not an empty list :(
616
  if not value:
617
    return []
618
  else:
619
    return utils.UnescapeAndSplit(value)
620

    
621

    
622
def check_maybefloat(option, opt, value): # pylint: disable=W0613
623
  """Custom parser for float numbers which might be also defaults.
624

625
  """
626
  value = value.lower()
627

    
628
  if value == constants.VALUE_DEFAULT:
629
    return value
630
  else:
631
    return float(value)
632

    
633

    
634
# completion_suggestion is normally a list. Using numeric values not evaluating
635
# to False for dynamic completion.
636
(OPT_COMPL_MANY_NODES,
637
 OPT_COMPL_ONE_NODE,
638
 OPT_COMPL_ONE_INSTANCE,
639
 OPT_COMPL_ONE_OS,
640
 OPT_COMPL_ONE_IALLOCATOR,
641
 OPT_COMPL_INST_ADD_NODES,
642
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
643

    
644
OPT_COMPL_ALL = frozenset([
645
  OPT_COMPL_MANY_NODES,
646
  OPT_COMPL_ONE_NODE,
647
  OPT_COMPL_ONE_INSTANCE,
648
  OPT_COMPL_ONE_OS,
649
  OPT_COMPL_ONE_IALLOCATOR,
650
  OPT_COMPL_INST_ADD_NODES,
651
  OPT_COMPL_ONE_NODEGROUP,
652
  ])
653

    
654

    
655
class CliOption(Option):
656
  """Custom option class for optparse.
657

658
  """
659
  ATTRS = Option.ATTRS + [
660
    "completion_suggest",
661
    ]
662
  TYPES = Option.TYPES + (
663
    "identkeyval",
664
    "keyval",
665
    "unit",
666
    "bool",
667
    "list",
668
    "maybefloat",
669
    )
670
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
671
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
672
  TYPE_CHECKER["keyval"] = check_key_val
673
  TYPE_CHECKER["unit"] = check_unit
674
  TYPE_CHECKER["bool"] = check_bool
675
  TYPE_CHECKER["list"] = check_list
676
  TYPE_CHECKER["maybefloat"] = check_maybefloat
677

    
678

    
679
# optparse.py sets make_option, so we do it for our own option class, too
680
cli_option = CliOption
681

    
682

    
683
_YORNO = "yes|no"
684

    
685
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
686
                       help="Increase debugging level")
687

    
688
NOHDR_OPT = cli_option("--no-headers", default=False,
689
                       action="store_true", dest="no_headers",
690
                       help="Don't display column headers")
691

    
692
SEP_OPT = cli_option("--separator", default=None,
693
                     action="store", dest="separator",
694
                     help=("Separator between output fields"
695
                           " (defaults to one space)"))
696

    
697
USEUNITS_OPT = cli_option("--units", default=None,
698
                          dest="units", choices=("h", "m", "g", "t"),
699
                          help="Specify units for output (one of h/m/g/t)")
700

    
701
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
702
                        type="string", metavar="FIELDS",
703
                        help="Comma separated list of output fields")
704

    
705
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
706
                       default=False, help="Force the operation")
707

    
708
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
709
                         default=False, help="Do not require confirmation")
710

    
711
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
712
                                  action="store_true", default=False,
713
                                  help=("Ignore offline nodes and do as much"
714
                                        " as possible"))
715

    
716
TAG_ADD_OPT = cli_option("--tags", dest="tags",
717
                         default=None, help="Comma-separated list of instance"
718
                                            " tags")
719

    
720
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
721
                         default=None, help="File with tag names")
722

    
723
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
724
                        default=False, action="store_true",
725
                        help=("Submit the job and return the job ID, but"
726
                              " don't wait for the job to finish"))
727

    
728
SYNC_OPT = cli_option("--sync", dest="do_locking",
729
                      default=False, action="store_true",
730
                      help=("Grab locks while doing the queries"
731
                            " in order to ensure more consistent results"))
732

    
733
DRY_RUN_OPT = cli_option("--dry-run", default=False,
734
                         action="store_true",
735
                         help=("Do not execute the operation, just run the"
736
                               " check steps and verify it it could be"
737
                               " executed"))
738

    
739
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
740
                         action="store_true",
741
                         help="Increase the verbosity of the operation")
742

    
743
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
744
                              action="store_true", dest="simulate_errors",
745
                              help="Debugging option that makes the operation"
746
                              " treat most runtime checks as failed")
747

    
748
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
749
                        default=True, action="store_false",
750
                        help="Don't wait for sync (DANGEROUS!)")
751

    
752
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
753
                        default=False, action="store_true",
754
                        help="Wait for disks to sync")
755

    
756
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
757
                             action="store_true", default=False,
758
                             help="Enable offline instance")
759

    
760
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
761
                              action="store_true", default=False,
762
                              help="Disable down instance")
763

    
764
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
765
                               help=("Custom disk setup (%s)" %
766
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
767
                               default=None, metavar="TEMPL",
768
                               choices=list(constants.DISK_TEMPLATES))
769

    
770
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
771
                        help="Do not create any network cards for"
772
                        " the instance")
773

    
774
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
775
                               help="Relative path under default cluster-wide"
776
                               " file storage dir to store file-based disks",
777
                               default=None, metavar="<DIR>")
778

    
779
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
780
                                  help="Driver to use for image files",
781
                                  default="loop", metavar="<DRIVER>",
782
                                  choices=list(constants.FILE_DRIVER))
783

    
784
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
785
                            help="Select nodes for the instance automatically"
786
                            " using the <NAME> iallocator plugin",
787
                            default=None, type="string",
788
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
789

    
790
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
791
                                    metavar="<NAME>",
792
                                    help="Set the default instance"
793
                                    " allocator plugin",
794
                                    default=None, type="string",
795
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
796

    
797
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
798
                    metavar="<os>",
799
                    completion_suggest=OPT_COMPL_ONE_OS)
800

    
801
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
802
                          type="keyval", default={},
803
                          help="OS parameters")
804

    
805
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
806
                               action="store_true", default=False,
807
                               help="Force an unknown variant")
808

    
809
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
810
                            action="store_true", default=False,
811
                            help="Do not install the OS (will"
812
                            " enable no-start)")
813

    
814
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
815
                                dest="allow_runtime_chgs",
816
                                default=True, action="store_false",
817
                                help="Don't allow runtime changes")
818

    
819
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
820
                         type="keyval", default={},
821
                         help="Backend parameters")
822

    
823
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
824
                        default={}, dest="hvparams",
825
                        help="Hypervisor parameters")
826

    
827
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
828
                             help="Disk template parameters, in the format"
829
                             " template:option=value,option=value,...",
830
                             type="identkeyval", action="append", default=[])
831

    
832
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
833
                                 type="keyval", default={},
834
                                 help="Memory size specs: list of key=value,"
835
                                " where key is one of min, max, std"
836
                                 " (in MB or using a unit)")
837

    
838
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
839
                                 type="keyval", default={},
840
                                 help="CPU count specs: list of key=value,"
841
                                 " where key is one of min, max, std")
842

    
843
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
844
                                  dest="ispecs_disk_count",
845
                                  type="keyval", default={},
846
                                  help="Disk count specs: list of key=value,"
847
                                  " where key is one of min, max, std")
848

    
849
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
850
                                 type="keyval", default={},
851
                                 help="Disk size specs: list of key=value,"
852
                                 " where key is one of min, max, std"
853
                                 " (in MB or using a unit)")
854

    
855
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
856
                                 type="keyval", default={},
857
                                 help="NIC count specs: list of key=value,"
858
                                 " where key is one of min, max, std")
859

    
860
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
861
                                    dest="ipolicy_disk_templates",
862
                                    type="list", default=None,
863
                                    help="Comma-separated list of"
864
                                    " enabled disk templates")
865

    
866
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
867
                                 dest="ipolicy_vcpu_ratio",
868
                                 type="maybefloat", default=None,
869
                                 help="The maximum allowed vcpu-to-cpu ratio")
870

    
871
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
872
                                   dest="ipolicy_spindle_ratio",
873
                                   type="maybefloat", default=None,
874
                                   help=("The maximum allowed instances to"
875
                                         " spindle ratio"))
876

    
877
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
878
                            help="Hypervisor and hypervisor options, in the"
879
                            " format hypervisor:option=value,option=value,...",
880
                            default=None, type="identkeyval")
881

    
882
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
883
                        help="Hypervisor and hypervisor options, in the"
884
                        " format hypervisor:option=value,option=value,...",
885
                        default=[], action="append", type="identkeyval")
886

    
887
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
888
                           action="store_false",
889
                           help="Don't check that the instance's IP"
890
                           " is alive")
891

    
892
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
893
                             default=True, action="store_false",
894
                             help="Don't check that the instance's name"
895
                             " is resolvable")
896

    
897
NET_OPT = cli_option("--net",
898
                     help="NIC parameters", default=[],
899
                     dest="nics", action="append", type="identkeyval")
900

    
901
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
902
                      dest="disks", action="append", type="identkeyval")
903

    
904
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
905
                         help="Comma-separated list of disks"
906
                         " indices to act on (e.g. 0,2) (optional,"
907
                         " defaults to all disks)")
908

    
909
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
910
                         help="Enforces a single-disk configuration using the"
911
                         " given disk size, in MiB unless a suffix is used",
912
                         default=None, type="unit", metavar="<size>")
913

    
914
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
915
                                dest="ignore_consistency",
916
                                action="store_true", default=False,
917
                                help="Ignore the consistency of the disks on"
918
                                " the secondary")
919

    
920
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
921
                                dest="allow_failover",
922
                                action="store_true", default=False,
923
                                help="If migration is not possible fallback to"
924
                                     " failover")
925

    
926
NONLIVE_OPT = cli_option("--non-live", dest="live",
927
                         default=True, action="store_false",
928
                         help="Do a non-live migration (this usually means"
929
                         " freeze the instance, save the state, transfer and"
930
                         " only then resume running on the secondary node)")
931

    
932
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
933
                                default=None,
934
                                choices=list(constants.HT_MIGRATION_MODES),
935
                                help="Override default migration mode (choose"
936
                                " either live or non-live")
937

    
938
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
939
                                help="Target node and optional secondary node",
940
                                metavar="<pnode>[:<snode>]",
941
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
942

    
943
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
944
                           action="append", metavar="<node>",
945
                           help="Use only this node (can be used multiple"
946
                           " times, if not given defaults to all nodes)",
947
                           completion_suggest=OPT_COMPL_ONE_NODE)
948

    
949
NODEGROUP_OPT_NAME = "--node-group"
950
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
951
                           dest="nodegroup",
952
                           help="Node group (name or uuid)",
953
                           metavar="<nodegroup>",
954
                           default=None, type="string",
955
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
956

    
957
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
958
                             metavar="<node>",
959
                             completion_suggest=OPT_COMPL_ONE_NODE)
960

    
961
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
962
                         action="store_false",
963
                         help="Don't start the instance after creation")
964

    
965
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
966
                         action="store_true", default=False,
967
                         help="Show command instead of executing it")
968

    
969
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
970
                         default=False, action="store_true",
971
                         help="Instead of performing the migration, try to"
972
                         " recover from a failed cleanup. This is safe"
973
                         " to run even if the instance is healthy, but it"
974
                         " will create extra replication traffic and "
975
                         " disrupt briefly the replication (like during the"
976
                         " migration")
977

    
978
STATIC_OPT = cli_option("-s", "--static", dest="static",
979
                        action="store_true", default=False,
980
                        help="Only show configuration data, not runtime data")
981

    
982
ALL_OPT = cli_option("--all", dest="show_all",
983
                     default=False, action="store_true",
984
                     help="Show info on all instances on the cluster."
985
                     " This can take a long time to run, use wisely")
986

    
987
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
988
                           action="store_true", default=False,
989
                           help="Interactive OS reinstall, lists available"
990
                           " OS templates for selection")
991

    
992
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
993
                                 action="store_true", default=False,
994
                                 help="Remove the instance from the cluster"
995
                                 " configuration even if there are failures"
996
                                 " during the removal process")
997

    
998
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
999
                                        dest="ignore_remove_failures",
1000
                                        action="store_true", default=False,
1001
                                        help="Remove the instance from the"
1002
                                        " cluster configuration even if there"
1003
                                        " are failures during the removal"
1004
                                        " process")
1005

    
1006
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1007
                                 action="store_true", default=False,
1008
                                 help="Remove the instance from the cluster")
1009

    
1010
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1011
                               help="Specifies the new node for the instance",
1012
                               metavar="NODE", default=None,
1013
                               completion_suggest=OPT_COMPL_ONE_NODE)
1014

    
1015
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1016
                               help="Specifies the new secondary node",
1017
                               metavar="NODE", default=None,
1018
                               completion_suggest=OPT_COMPL_ONE_NODE)
1019

    
1020
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1021
                            default=False, action="store_true",
1022
                            help="Replace the disk(s) on the primary"
1023
                                 " node (applies only to internally mirrored"
1024
                                 " disk templates, e.g. %s)" %
1025
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1026

    
1027
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1028
                              default=False, action="store_true",
1029
                              help="Replace the disk(s) on the secondary"
1030
                                   " node (applies only to internally mirrored"
1031
                                   " disk templates, e.g. %s)" %
1032
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1033

    
1034
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1035
                              default=False, action="store_true",
1036
                              help="Lock all nodes and auto-promote as needed"
1037
                              " to MC status")
1038

    
1039
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1040
                              default=False, action="store_true",
1041
                              help="Automatically replace faulty disks"
1042
                                   " (applies only to internally mirrored"
1043
                                   " disk templates, e.g. %s)" %
1044
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1045

    
1046
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1047
                             default=False, action="store_true",
1048
                             help="Ignore current recorded size"
1049
                             " (useful for forcing activation when"
1050
                             " the recorded size is wrong)")
1051

    
1052
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1053
                          metavar="<node>",
1054
                          completion_suggest=OPT_COMPL_ONE_NODE)
1055

    
1056
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1057
                         metavar="<dir>")
1058

    
1059
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1060
                              help="Specify the secondary ip for the node",
1061
                              metavar="ADDRESS", default=None)
1062

    
1063
READD_OPT = cli_option("--readd", dest="readd",
1064
                       default=False, action="store_true",
1065
                       help="Readd old node after replacing it")
1066

    
1067
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1068
                                default=True, action="store_false",
1069
                                help="Disable SSH key fingerprint checking")
1070

    
1071
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1072
                                 default=False, action="store_true",
1073
                                 help="Force the joining of a node")
1074

    
1075
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1076
                    type="bool", default=None, metavar=_YORNO,
1077
                    help="Set the master_candidate flag on the node")
1078

    
1079
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1080
                         type="bool", default=None,
1081
                         help=("Set the offline flag on the node"
1082
                               " (cluster does not communicate with offline"
1083
                               " nodes)"))
1084

    
1085
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1086
                         type="bool", default=None,
1087
                         help=("Set the drained flag on the node"
1088
                               " (excluded from allocation operations)"))
1089

    
1090
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1091
                              type="bool", default=None, metavar=_YORNO,
1092
                              help="Set the master_capable flag on the node")
1093

    
1094
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1095
                          type="bool", default=None, metavar=_YORNO,
1096
                          help="Set the vm_capable flag on the node")
1097

    
1098
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1099
                             type="bool", default=None, metavar=_YORNO,
1100
                             help="Set the allocatable flag on a volume")
1101

    
1102
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1103
                               help="Disable support for lvm based instances"
1104
                               " (cluster-wide)",
1105
                               action="store_false", default=True)
1106

    
1107
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1108
                            dest="enabled_hypervisors",
1109
                            help="Comma-separated list of hypervisors",
1110
                            type="string", default=None)
1111

    
1112
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1113
                            type="keyval", default={},
1114
                            help="NIC parameters")
1115

    
1116
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1117
                         dest="candidate_pool_size", type="int",
1118
                         help="Set the candidate pool size")
1119

    
1120
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1121
                         help=("Enables LVM and specifies the volume group"
1122
                               " name (cluster-wide) for disk allocation"
1123
                               " [%s]" % constants.DEFAULT_VG),
1124
                         metavar="VG", default=None)
1125

    
1126
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1127
                          help="Destroy cluster", action="store_true")
1128

    
1129
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1130
                          help="Skip node agreement check (dangerous)",
1131
                          action="store_true", default=False)
1132

    
1133
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1134
                            help="Specify the mac prefix for the instance IP"
1135
                            " addresses, in the format XX:XX:XX",
1136
                            metavar="PREFIX",
1137
                            default=None)
1138

    
1139
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1140
                               help="Specify the node interface (cluster-wide)"
1141
                               " on which the master IP address will be added"
1142
                               " (cluster init default: %s)" %
1143
                               constants.DEFAULT_BRIDGE,
1144
                               metavar="NETDEV",
1145
                               default=None)
1146

    
1147
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1148
                                help="Specify the netmask of the master IP",
1149
                                metavar="NETMASK",
1150
                                default=None)
1151

    
1152
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1153
                                     dest="use_external_mip_script",
1154
                                     help="Specify whether to run a"
1155
                                     " user-provided script for the master"
1156
                                     " IP address turnup and"
1157
                                     " turndown operations",
1158
                                     type="bool", metavar=_YORNO, default=None)
1159

    
1160
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1161
                                help="Specify the default directory (cluster-"
1162
                                "wide) for storing the file-based disks [%s]" %
1163
                                constants.DEFAULT_FILE_STORAGE_DIR,
1164
                                metavar="DIR",
1165
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
1166

    
1167
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1168
  "--shared-file-storage-dir",
1169
  dest="shared_file_storage_dir",
1170
  help="Specify the default directory (cluster-wide) for storing the"
1171
  " shared file-based disks [%s]" %
1172
  constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1173
  metavar="SHAREDDIR", default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1174

    
1175
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1176
                                   help="Don't modify /etc/hosts",
1177
                                   action="store_false", default=True)
1178

    
1179
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1180
                                    help="Don't initialize SSH keys",
1181
                                    action="store_false", default=True)
1182

    
1183
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1184
                             help="Enable parseable error messages",
1185
                             action="store_true", default=False)
1186

    
1187
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1188
                          help="Skip N+1 memory redundancy tests",
1189
                          action="store_true", default=False)
1190

    
1191
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1192
                             help="Type of reboot: soft/hard/full",
1193
                             default=constants.INSTANCE_REBOOT_HARD,
1194
                             metavar="<REBOOT>",
1195
                             choices=list(constants.REBOOT_TYPES))
1196

    
1197
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1198
                                    dest="ignore_secondaries",
1199
                                    default=False, action="store_true",
1200
                                    help="Ignore errors from secondaries")
1201

    
1202
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1203
                            action="store_false", default=True,
1204
                            help="Don't shutdown the instance (unsafe)")
1205

    
1206
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1207
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1208
                         help="Maximum time to wait")
1209

    
1210
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1211
                                  dest="shutdown_timeout", type="int",
1212
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1213
                                  help="Maximum time to wait for instance"
1214
                                  " shutdown")
1215

    
1216
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1217
                          default=None,
1218
                          help=("Number of seconds between repetions of the"
1219
                                " command"))
1220

    
1221
EARLY_RELEASE_OPT = cli_option("--early-release",
1222
                               dest="early_release", default=False,
1223
                               action="store_true",
1224
                               help="Release the locks on the secondary"
1225
                               " node(s) early")
1226

    
1227
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1228
                                  dest="new_cluster_cert",
1229
                                  default=False, action="store_true",
1230
                                  help="Generate a new cluster certificate")
1231

    
1232
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1233
                           default=None,
1234
                           help="File containing new RAPI certificate")
1235

    
1236
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1237
                               default=None, action="store_true",
1238
                               help=("Generate a new self-signed RAPI"
1239
                                     " certificate"))
1240

    
1241
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1242
                            default=None,
1243
                            help="File containing new SPICE certificate")
1244

    
1245
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1246
                              default=None,
1247
                              help="File containing the certificate of the CA"
1248
                              " which signed the SPICE certificate")
1249

    
1250
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1251
                                dest="new_spice_cert", default=None,
1252
                                action="store_true",
1253
                                help=("Generate a new self-signed SPICE"
1254
                                      " certificate"))
1255

    
1256
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1257
                                    dest="new_confd_hmac_key",
1258
                                    default=False, action="store_true",
1259
                                    help=("Create a new HMAC key for %s" %
1260
                                          constants.CONFD))
1261

    
1262
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1263
                                       dest="cluster_domain_secret",
1264
                                       default=None,
1265
                                       help=("Load new new cluster domain"
1266
                                             " secret from file"))
1267

    
1268
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1269
                                           dest="new_cluster_domain_secret",
1270
                                           default=False, action="store_true",
1271
                                           help=("Create a new cluster domain"
1272
                                                 " secret"))
1273

    
1274
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1275
                              dest="use_replication_network",
1276
                              help="Whether to use the replication network"
1277
                              " for talking to the nodes",
1278
                              action="store_true", default=False)
1279

    
1280
MAINTAIN_NODE_HEALTH_OPT = \
1281
    cli_option("--maintain-node-health", dest="maintain_node_health",
1282
               metavar=_YORNO, default=None, type="bool",
1283
               help="Configure the cluster to automatically maintain node"
1284
               " health, by shutting down unknown instances, shutting down"
1285
               " unknown DRBD devices, etc.")
1286

    
1287
IDENTIFY_DEFAULTS_OPT = \
1288
    cli_option("--identify-defaults", dest="identify_defaults",
1289
               default=False, action="store_true",
1290
               help="Identify which saved instance parameters are equal to"
1291
               " the current cluster defaults and set them as such, instead"
1292
               " of marking them as overridden")
1293

    
1294
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1295
                         action="store", dest="uid_pool",
1296
                         help=("A list of user-ids or user-id"
1297
                               " ranges separated by commas"))
1298

    
1299
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1300
                          action="store", dest="add_uids",
1301
                          help=("A list of user-ids or user-id"
1302
                                " ranges separated by commas, to be"
1303
                                " added to the user-id pool"))
1304

    
1305
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1306
                             action="store", dest="remove_uids",
1307
                             help=("A list of user-ids or user-id"
1308
                                   " ranges separated by commas, to be"
1309
                                   " removed from the user-id pool"))
1310

    
1311
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1312
                              action="store", dest="reserved_lvs",
1313
                              help=("A comma-separated list of reserved"
1314
                                    " logical volumes names, that will be"
1315
                                    " ignored by cluster verify"))
1316

    
1317
ROMAN_OPT = cli_option("--roman",
1318
                       dest="roman_integers", default=False,
1319
                       action="store_true",
1320
                       help="Use roman numbers for positive integers")
1321

    
1322
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1323
                             action="store", default=None,
1324
                             help="Specifies usermode helper for DRBD")
1325

    
1326
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1327
                                action="store_false", default=True,
1328
                                help="Disable support for DRBD")
1329

    
1330
PRIMARY_IP_VERSION_OPT = \
1331
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1332
               action="store", dest="primary_ip_version",
1333
               metavar="%d|%d" % (constants.IP4_VERSION,
1334
                                  constants.IP6_VERSION),
1335
               help="Cluster-wide IP version for primary IP")
1336

    
1337
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1338
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1339
                          choices=_PRIONAME_TO_VALUE.keys(),
1340
                          help="Priority for opcode processing")
1341

    
1342
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1343
                        type="bool", default=None, metavar=_YORNO,
1344
                        help="Sets the hidden flag on the OS")
1345

    
1346
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1347
                        type="bool", default=None, metavar=_YORNO,
1348
                        help="Sets the blacklisted flag on the OS")
1349

    
1350
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1351
                                     type="bool", metavar=_YORNO,
1352
                                     dest="prealloc_wipe_disks",
1353
                                     help=("Wipe disks prior to instance"
1354
                                           " creation"))
1355

    
1356
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1357
                             type="keyval", default=None,
1358
                             help="Node parameters")
1359

    
1360
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1361
                              action="store", metavar="POLICY", default=None,
1362
                              help="Allocation policy for the node group")
1363

    
1364
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1365
                              type="bool", metavar=_YORNO,
1366
                              dest="node_powered",
1367
                              help="Specify if the SoR for node is powered")
1368

    
1369
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1370
                             default=constants.OOB_TIMEOUT,
1371
                             help="Maximum time to wait for out-of-band helper")
1372

    
1373
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1374
                             default=constants.OOB_POWER_DELAY,
1375
                             help="Time in seconds to wait between power-ons")
1376

    
1377
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1378
                              action="store_true", default=False,
1379
                              help=("Whether command argument should be treated"
1380
                                    " as filter"))
1381

    
1382
NO_REMEMBER_OPT = cli_option("--no-remember",
1383
                             dest="no_remember",
1384
                             action="store_true", default=False,
1385
                             help="Perform but do not record the change"
1386
                             " in the configuration")
1387

    
1388
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1389
                              default=False, action="store_true",
1390
                              help="Evacuate primary instances only")
1391

    
1392
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1393
                                default=False, action="store_true",
1394
                                help="Evacuate secondary instances only"
1395
                                     " (applies only to internally mirrored"
1396
                                     " disk templates, e.g. %s)" %
1397
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1398

    
1399
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1400
                                action="store_true", default=False,
1401
                                help="Pause instance at startup")
1402

    
1403
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1404
                          help="Destination node group (name or uuid)",
1405
                          default=None, action="append",
1406
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1407

    
1408
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1409
                               action="append", dest="ignore_errors",
1410
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1411
                               help="Error code to be ignored")
1412

    
1413
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1414
                            action="append",
1415
                            help=("Specify disk state information in the"
1416
                                  " format"
1417
                                  " storage_type/identifier:option=value,...;"
1418
                                  " note this is unused for now"),
1419
                            type="identkeyval")
1420

    
1421
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1422
                          action="append",
1423
                          help=("Specify hypervisor state information in the"
1424
                                " format hypervisor:option=value,...;"
1425
                                " note this is unused for now"),
1426
                          type="identkeyval")
1427

    
1428
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1429
                                action="store_true", default=False,
1430
                                help="Ignore instance policy violations")
1431

    
1432
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1433
                             help="Sets the instance's runtime memory,"
1434
                             " ballooning it up or down to the new value",
1435
                             default=None, type="unit", metavar="<size>")
1436

    
1437
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1438
                          action="store_true", default=False,
1439
                          help="Marks the grow as absolute instead of the"
1440
                          " (default) relative mode")
1441

    
1442
#: Options provided by all commands
1443
COMMON_OPTS = [DEBUG_OPT]
1444

    
1445
# common options for creating instances. add and import then add their own
1446
# specific ones.
1447
COMMON_CREATE_OPTS = [
1448
  BACKEND_OPT,
1449
  DISK_OPT,
1450
  DISK_TEMPLATE_OPT,
1451
  FILESTORE_DIR_OPT,
1452
  FILESTORE_DRIVER_OPT,
1453
  HYPERVISOR_OPT,
1454
  IALLOCATOR_OPT,
1455
  NET_OPT,
1456
  NODE_PLACEMENT_OPT,
1457
  NOIPCHECK_OPT,
1458
  NONAMECHECK_OPT,
1459
  NONICS_OPT,
1460
  NWSYNC_OPT,
1461
  OSPARAMS_OPT,
1462
  OS_SIZE_OPT,
1463
  SUBMIT_OPT,
1464
  TAG_ADD_OPT,
1465
  DRY_RUN_OPT,
1466
  PRIORITY_OPT,
1467
  ]
1468

    
1469
# common instance policy options
1470
INSTANCE_POLICY_OPTS = [
1471
  SPECS_CPU_COUNT_OPT,
1472
  SPECS_DISK_COUNT_OPT,
1473
  SPECS_DISK_SIZE_OPT,
1474
  SPECS_MEM_SIZE_OPT,
1475
  SPECS_NIC_COUNT_OPT,
1476
  IPOLICY_DISK_TEMPLATES,
1477
  IPOLICY_VCPU_RATIO,
1478
  IPOLICY_SPINDLE_RATIO,
1479
  ]
1480

    
1481

    
1482
def _ParseArgs(argv, commands, aliases, env_override):
1483
  """Parser for the command line arguments.
1484

1485
  This function parses the arguments and returns the function which
1486
  must be executed together with its (modified) arguments.
1487

1488
  @param argv: the command line
1489
  @param commands: dictionary with special contents, see the design
1490
      doc for cmdline handling
1491
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1492
  @param env_override: list of env variables allowed for default args
1493

1494
  """
1495
  assert not (env_override - set(commands))
1496

    
1497
  if len(argv) == 0:
1498
    binary = "<command>"
1499
  else:
1500
    binary = argv[0].split("/")[-1]
1501

    
1502
  if len(argv) > 1 and argv[1] == "--version":
1503
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1504
             constants.RELEASE_VERSION)
1505
    # Quit right away. That way we don't have to care about this special
1506
    # argument. optparse.py does it the same.
1507
    sys.exit(0)
1508

    
1509
  if len(argv) < 2 or not (argv[1] in commands or
1510
                           argv[1] in aliases):
1511
    # let's do a nice thing
1512
    sortedcmds = commands.keys()
1513
    sortedcmds.sort()
1514

    
1515
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1516
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1517
    ToStdout("")
1518

    
1519
    # compute the max line length for cmd + usage
1520
    mlen = max([len(" %s" % cmd) for cmd in commands])
1521
    mlen = min(60, mlen) # should not get here...
1522

    
1523
    # and format a nice command list
1524
    ToStdout("Commands:")
1525
    for cmd in sortedcmds:
1526
      cmdstr = " %s" % (cmd,)
1527
      help_text = commands[cmd][4]
1528
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1529
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1530
      for line in help_lines:
1531
        ToStdout("%-*s   %s", mlen, "", line)
1532

    
1533
    ToStdout("")
1534

    
1535
    return None, None, None
1536

    
1537
  # get command, unalias it, and look it up in commands
1538
  cmd = argv.pop(1)
1539
  if cmd in aliases:
1540
    if cmd in commands:
1541
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1542
                                   " command" % cmd)
1543

    
1544
    if aliases[cmd] not in commands:
1545
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1546
                                   " command '%s'" % (cmd, aliases[cmd]))
1547

    
1548
    cmd = aliases[cmd]
1549

    
1550
  if cmd in env_override:
1551
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1552
    env_args = os.environ.get(args_env_name)
1553
    if env_args:
1554
      argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
1555

    
1556
  func, args_def, parser_opts, usage, description = commands[cmd]
1557
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1558
                        description=description,
1559
                        formatter=TitledHelpFormatter(),
1560
                        usage="%%prog %s %s" % (cmd, usage))
1561
  parser.disable_interspersed_args()
1562
  options, args = parser.parse_args(args=argv[1:])
1563

    
1564
  if not _CheckArguments(cmd, args_def, args):
1565
    return None, None, None
1566

    
1567
  return func, options, args
1568

    
1569

    
1570
def _CheckArguments(cmd, args_def, args):
1571
  """Verifies the arguments using the argument definition.
1572

1573
  Algorithm:
1574

1575
    1. Abort with error if values specified by user but none expected.
1576

1577
    1. For each argument in definition
1578

1579
      1. Keep running count of minimum number of values (min_count)
1580
      1. Keep running count of maximum number of values (max_count)
1581
      1. If it has an unlimited number of values
1582

1583
        1. Abort with error if it's not the last argument in the definition
1584

1585
    1. If last argument has limited number of values
1586

1587
      1. Abort with error if number of values doesn't match or is too large
1588

1589
    1. Abort with error if user didn't pass enough values (min_count)
1590

1591
  """
1592
  if args and not args_def:
1593
    ToStderr("Error: Command %s expects no arguments", cmd)
1594
    return False
1595

    
1596
  min_count = None
1597
  max_count = None
1598
  check_max = None
1599

    
1600
  last_idx = len(args_def) - 1
1601

    
1602
  for idx, arg in enumerate(args_def):
1603
    if min_count is None:
1604
      min_count = arg.min
1605
    elif arg.min is not None:
1606
      min_count += arg.min
1607

    
1608
    if max_count is None:
1609
      max_count = arg.max
1610
    elif arg.max is not None:
1611
      max_count += arg.max
1612

    
1613
    if idx == last_idx:
1614
      check_max = (arg.max is not None)
1615

    
1616
    elif arg.max is None:
1617
      raise errors.ProgrammerError("Only the last argument can have max=None")
1618

    
1619
  if check_max:
1620
    # Command with exact number of arguments
1621
    if (min_count is not None and max_count is not None and
1622
        min_count == max_count and len(args) != min_count):
1623
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1624
      return False
1625

    
1626
    # Command with limited number of arguments
1627
    if max_count is not None and len(args) > max_count:
1628
      ToStderr("Error: Command %s expects only %d argument(s)",
1629
               cmd, max_count)
1630
      return False
1631

    
1632
  # Command with some required arguments
1633
  if min_count is not None and len(args) < min_count:
1634
    ToStderr("Error: Command %s expects at least %d argument(s)",
1635
             cmd, min_count)
1636
    return False
1637

    
1638
  return True
1639

    
1640

    
1641
def SplitNodeOption(value):
1642
  """Splits the value of a --node option.
1643

1644
  """
1645
  if value and ":" in value:
1646
    return value.split(":", 1)
1647
  else:
1648
    return (value, None)
1649

    
1650

    
1651
def CalculateOSNames(os_name, os_variants):
1652
  """Calculates all the names an OS can be called, according to its variants.
1653

1654
  @type os_name: string
1655
  @param os_name: base name of the os
1656
  @type os_variants: list or None
1657
  @param os_variants: list of supported variants
1658
  @rtype: list
1659
  @return: list of valid names
1660

1661
  """
1662
  if os_variants:
1663
    return ["%s+%s" % (os_name, v) for v in os_variants]
1664
  else:
1665
    return [os_name]
1666

    
1667

    
1668
def ParseFields(selected, default):
1669
  """Parses the values of "--field"-like options.
1670

1671
  @type selected: string or None
1672
  @param selected: User-selected options
1673
  @type default: list
1674
  @param default: Default fields
1675

1676
  """
1677
  if selected is None:
1678
    return default
1679

    
1680
  if selected.startswith("+"):
1681
    return default + selected[1:].split(",")
1682

    
1683
  return selected.split(",")
1684

    
1685

    
1686
UsesRPC = rpc.RunWithRPC
1687

    
1688

    
1689
def AskUser(text, choices=None):
1690
  """Ask the user a question.
1691

1692
  @param text: the question to ask
1693

1694
  @param choices: list with elements tuples (input_char, return_value,
1695
      description); if not given, it will default to: [('y', True,
1696
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1697
      note that the '?' char is reserved for help
1698

1699
  @return: one of the return values from the choices list; if input is
1700
      not possible (i.e. not running with a tty, we return the last
1701
      entry from the list
1702

1703
  """
1704
  if choices is None:
1705
    choices = [("y", True, "Perform the operation"),
1706
               ("n", False, "Do not perform the operation")]
1707
  if not choices or not isinstance(choices, list):
1708
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1709
  for entry in choices:
1710
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1711
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1712

    
1713
  answer = choices[-1][1]
1714
  new_text = []
1715
  for line in text.splitlines():
1716
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1717
  text = "\n".join(new_text)
1718
  try:
1719
    f = file("/dev/tty", "a+")
1720
  except IOError:
1721
    return answer
1722
  try:
1723
    chars = [entry[0] for entry in choices]
1724
    chars[-1] = "[%s]" % chars[-1]
1725
    chars.append("?")
1726
    maps = dict([(entry[0], entry[1]) for entry in choices])
1727
    while True:
1728
      f.write(text)
1729
      f.write("\n")
1730
      f.write("/".join(chars))
1731
      f.write(": ")
1732
      line = f.readline(2).strip().lower()
1733
      if line in maps:
1734
        answer = maps[line]
1735
        break
1736
      elif line == "?":
1737
        for entry in choices:
1738
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1739
        f.write("\n")
1740
        continue
1741
  finally:
1742
    f.close()
1743
  return answer
1744

    
1745

    
1746
class JobSubmittedException(Exception):
1747
  """Job was submitted, client should exit.
1748

1749
  This exception has one argument, the ID of the job that was
1750
  submitted. The handler should print this ID.
1751

1752
  This is not an error, just a structured way to exit from clients.
1753

1754
  """
1755

    
1756

    
1757
def SendJob(ops, cl=None):
1758
  """Function to submit an opcode without waiting for the results.
1759

1760
  @type ops: list
1761
  @param ops: list of opcodes
1762
  @type cl: luxi.Client
1763
  @param cl: the luxi client to use for communicating with the master;
1764
             if None, a new client will be created
1765

1766
  """
1767
  if cl is None:
1768
    cl = GetClient()
1769

    
1770
  job_id = cl.SubmitJob(ops)
1771

    
1772
  return job_id
1773

    
1774

    
1775
def GenericPollJob(job_id, cbs, report_cbs):
1776
  """Generic job-polling function.
1777

1778
  @type job_id: number
1779
  @param job_id: Job ID
1780
  @type cbs: Instance of L{JobPollCbBase}
1781
  @param cbs: Data callbacks
1782
  @type report_cbs: Instance of L{JobPollReportCbBase}
1783
  @param report_cbs: Reporting callbacks
1784

1785
  """
1786
  prev_job_info = None
1787
  prev_logmsg_serial = None
1788

    
1789
  status = None
1790

    
1791
  while True:
1792
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1793
                                      prev_logmsg_serial)
1794
    if not result:
1795
      # job not found, go away!
1796
      raise errors.JobLost("Job with id %s lost" % job_id)
1797

    
1798
    if result == constants.JOB_NOTCHANGED:
1799
      report_cbs.ReportNotChanged(job_id, status)
1800

    
1801
      # Wait again
1802
      continue
1803

    
1804
    # Split result, a tuple of (field values, log entries)
1805
    (job_info, log_entries) = result
1806
    (status, ) = job_info
1807

    
1808
    if log_entries:
1809
      for log_entry in log_entries:
1810
        (serial, timestamp, log_type, message) = log_entry
1811
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1812
                                    log_type, message)
1813
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1814

    
1815
    # TODO: Handle canceled and archived jobs
1816
    elif status in (constants.JOB_STATUS_SUCCESS,
1817
                    constants.JOB_STATUS_ERROR,
1818
                    constants.JOB_STATUS_CANCELING,
1819
                    constants.JOB_STATUS_CANCELED):
1820
      break
1821

    
1822
    prev_job_info = job_info
1823

    
1824
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1825
  if not jobs:
1826
    raise errors.JobLost("Job with id %s lost" % job_id)
1827

    
1828
  status, opstatus, result = jobs[0]
1829

    
1830
  if status == constants.JOB_STATUS_SUCCESS:
1831
    return result
1832

    
1833
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1834
    raise errors.OpExecError("Job was canceled")
1835

    
1836
  has_ok = False
1837
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1838
    if status == constants.OP_STATUS_SUCCESS:
1839
      has_ok = True
1840
    elif status == constants.OP_STATUS_ERROR:
1841
      errors.MaybeRaise(msg)
1842

    
1843
      if has_ok:
1844
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1845
                                 (idx, msg))
1846

    
1847
      raise errors.OpExecError(str(msg))
1848

    
1849
  # default failure mode
1850
  raise errors.OpExecError(result)
1851

    
1852

    
1853
class JobPollCbBase:
1854
  """Base class for L{GenericPollJob} callbacks.
1855

1856
  """
1857
  def __init__(self):
1858
    """Initializes this class.
1859

1860
    """
1861

    
1862
  def WaitForJobChangeOnce(self, job_id, fields,
1863
                           prev_job_info, prev_log_serial):
1864
    """Waits for changes on a job.
1865

1866
    """
1867
    raise NotImplementedError()
1868

    
1869
  def QueryJobs(self, job_ids, fields):
1870
    """Returns the selected fields for the selected job IDs.
1871

1872
    @type job_ids: list of numbers
1873
    @param job_ids: Job IDs
1874
    @type fields: list of strings
1875
    @param fields: Fields
1876

1877
    """
1878
    raise NotImplementedError()
1879

    
1880

    
1881
class JobPollReportCbBase:
1882
  """Base class for L{GenericPollJob} reporting callbacks.
1883

1884
  """
1885
  def __init__(self):
1886
    """Initializes this class.
1887

1888
    """
1889

    
1890
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1891
    """Handles a log message.
1892

1893
    """
1894
    raise NotImplementedError()
1895

    
1896
  def ReportNotChanged(self, job_id, status):
1897
    """Called for if a job hasn't changed in a while.
1898

1899
    @type job_id: number
1900
    @param job_id: Job ID
1901
    @type status: string or None
1902
    @param status: Job status if available
1903

1904
    """
1905
    raise NotImplementedError()
1906

    
1907

    
1908
class _LuxiJobPollCb(JobPollCbBase):
1909
  def __init__(self, cl):
1910
    """Initializes this class.
1911

1912
    """
1913
    JobPollCbBase.__init__(self)
1914
    self.cl = cl
1915

    
1916
  def WaitForJobChangeOnce(self, job_id, fields,
1917
                           prev_job_info, prev_log_serial):
1918
    """Waits for changes on a job.
1919

1920
    """
1921
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1922
                                        prev_job_info, prev_log_serial)
1923

    
1924
  def QueryJobs(self, job_ids, fields):
1925
    """Returns the selected fields for the selected job IDs.
1926

1927
    """
1928
    return self.cl.QueryJobs(job_ids, fields)
1929

    
1930

    
1931
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1932
  def __init__(self, feedback_fn):
1933
    """Initializes this class.
1934

1935
    """
1936
    JobPollReportCbBase.__init__(self)
1937

    
1938
    self.feedback_fn = feedback_fn
1939

    
1940
    assert callable(feedback_fn)
1941

    
1942
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1943
    """Handles a log message.
1944

1945
    """
1946
    self.feedback_fn((timestamp, log_type, log_msg))
1947

    
1948
  def ReportNotChanged(self, job_id, status):
1949
    """Called if a job hasn't changed in a while.
1950

1951
    """
1952
    # Ignore
1953

    
1954

    
1955
class StdioJobPollReportCb(JobPollReportCbBase):
1956
  def __init__(self):
1957
    """Initializes this class.
1958

1959
    """
1960
    JobPollReportCbBase.__init__(self)
1961

    
1962
    self.notified_queued = False
1963
    self.notified_waitlock = False
1964

    
1965
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1966
    """Handles a log message.
1967

1968
    """
1969
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1970
             FormatLogMessage(log_type, log_msg))
1971

    
1972
  def ReportNotChanged(self, job_id, status):
1973
    """Called if a job hasn't changed in a while.
1974

1975
    """
1976
    if status is None:
1977
      return
1978

    
1979
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1980
      ToStderr("Job %s is waiting in queue", job_id)
1981
      self.notified_queued = True
1982

    
1983
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1984
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1985
      self.notified_waitlock = True
1986

    
1987

    
1988
def FormatLogMessage(log_type, log_msg):
1989
  """Formats a job message according to its type.
1990

1991
  """
1992
  if log_type != constants.ELOG_MESSAGE:
1993
    log_msg = str(log_msg)
1994

    
1995
  return utils.SafeEncode(log_msg)
1996

    
1997

    
1998
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1999
  """Function to poll for the result of a job.
2000

2001
  @type job_id: job identified
2002
  @param job_id: the job to poll for results
2003
  @type cl: luxi.Client
2004
  @param cl: the luxi client to use for communicating with the master;
2005
             if None, a new client will be created
2006

2007
  """
2008
  if cl is None:
2009
    cl = GetClient()
2010

    
2011
  if reporter is None:
2012
    if feedback_fn:
2013
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2014
    else:
2015
      reporter = StdioJobPollReportCb()
2016
  elif feedback_fn:
2017
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2018

    
2019
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2020

    
2021

    
2022
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2023
  """Legacy function to submit an opcode.
2024

2025
  This is just a simple wrapper over the construction of the processor
2026
  instance. It should be extended to better handle feedback and
2027
  interaction functions.
2028

2029
  """
2030
  if cl is None:
2031
    cl = GetClient()
2032

    
2033
  SetGenericOpcodeOpts([op], opts)
2034

    
2035
  job_id = SendJob([op], cl=cl)
2036

    
2037
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2038
                       reporter=reporter)
2039

    
2040
  return op_results[0]
2041

    
2042

    
2043
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2044
  """Wrapper around SubmitOpCode or SendJob.
2045

2046
  This function will decide, based on the 'opts' parameter, whether to
2047
  submit and wait for the result of the opcode (and return it), or
2048
  whether to just send the job and print its identifier. It is used in
2049
  order to simplify the implementation of the '--submit' option.
2050

2051
  It will also process the opcodes if we're sending the via SendJob
2052
  (otherwise SubmitOpCode does it).
2053

2054
  """
2055
  if opts and opts.submit_only:
2056
    job = [op]
2057
    SetGenericOpcodeOpts(job, opts)
2058
    job_id = SendJob(job, cl=cl)
2059
    raise JobSubmittedException(job_id)
2060
  else:
2061
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2062

    
2063

    
2064
def SetGenericOpcodeOpts(opcode_list, options):
2065
  """Processor for generic options.
2066

2067
  This function updates the given opcodes based on generic command
2068
  line options (like debug, dry-run, etc.).
2069

2070
  @param opcode_list: list of opcodes
2071
  @param options: command line options or None
2072
  @return: None (in-place modification)
2073

2074
  """
2075
  if not options:
2076
    return
2077
  for op in opcode_list:
2078
    op.debug_level = options.debug
2079
    if hasattr(options, "dry_run"):
2080
      op.dry_run = options.dry_run
2081
    if getattr(options, "priority", None) is not None:
2082
      op.priority = _PRIONAME_TO_VALUE[options.priority]
2083

    
2084

    
2085
def GetClient():
2086
  # TODO: Cache object?
2087
  try:
2088
    client = luxi.Client()
2089
  except luxi.NoMasterError:
2090
    ss = ssconf.SimpleStore()
2091

    
2092
    # Try to read ssconf file
2093
    try:
2094
      ss.GetMasterNode()
2095
    except errors.ConfigurationError:
2096
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2097
                                 " not part of a cluster",
2098
                                 errors.ECODE_INVAL)
2099

    
2100
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2101
    if master != myself:
2102
      raise errors.OpPrereqError("This is not the master node, please connect"
2103
                                 " to node '%s' and rerun the command" %
2104
                                 master, errors.ECODE_INVAL)
2105
    raise
2106
  return client
2107

    
2108

    
2109
def FormatError(err):
2110
  """Return a formatted error message for a given error.
2111

2112
  This function takes an exception instance and returns a tuple
2113
  consisting of two values: first, the recommended exit code, and
2114
  second, a string describing the error message (not
2115
  newline-terminated).
2116

2117
  """
2118
  retcode = 1
2119
  obuf = StringIO()
2120
  msg = str(err)
2121
  if isinstance(err, errors.ConfigurationError):
2122
    txt = "Corrupt configuration file: %s" % msg
2123
    logging.error(txt)
2124
    obuf.write(txt + "\n")
2125
    obuf.write("Aborting.")
2126
    retcode = 2
2127
  elif isinstance(err, errors.HooksAbort):
2128
    obuf.write("Failure: hooks execution failed:\n")
2129
    for node, script, out in err.args[0]:
2130
      if out:
2131
        obuf.write("  node: %s, script: %s, output: %s\n" %
2132
                   (node, script, out))
2133
      else:
2134
        obuf.write("  node: %s, script: %s (no output)\n" %
2135
                   (node, script))
2136
  elif isinstance(err, errors.HooksFailure):
2137
    obuf.write("Failure: hooks general failure: %s" % msg)
2138
  elif isinstance(err, errors.ResolverError):
2139
    this_host = netutils.Hostname.GetSysName()
2140
    if err.args[0] == this_host:
2141
      msg = "Failure: can't resolve my own hostname ('%s')"
2142
    else:
2143
      msg = "Failure: can't resolve hostname '%s'"
2144
    obuf.write(msg % err.args[0])
2145
  elif isinstance(err, errors.OpPrereqError):
2146
    if len(err.args) == 2:
2147
      obuf.write("Failure: prerequisites not met for this"
2148
                 " operation:\nerror type: %s, error details:\n%s" %
2149
                 (err.args[1], err.args[0]))
2150
    else:
2151
      obuf.write("Failure: prerequisites not met for this"
2152
                 " operation:\n%s" % msg)
2153
  elif isinstance(err, errors.OpExecError):
2154
    obuf.write("Failure: command execution error:\n%s" % msg)
2155
  elif isinstance(err, errors.TagError):
2156
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2157
  elif isinstance(err, errors.JobQueueDrainError):
2158
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2159
               " accept new requests\n")
2160
  elif isinstance(err, errors.JobQueueFull):
2161
    obuf.write("Failure: the job queue is full and doesn't accept new"
2162
               " job submissions until old jobs are archived\n")
2163
  elif isinstance(err, errors.TypeEnforcementError):
2164
    obuf.write("Parameter Error: %s" % msg)
2165
  elif isinstance(err, errors.ParameterError):
2166
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2167
  elif isinstance(err, luxi.NoMasterError):
2168
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
2169
               " and listening for connections?")
2170
  elif isinstance(err, luxi.TimeoutError):
2171
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2172
               " been submitted and will continue to run even if the call"
2173
               " timed out. Useful commands in this situation are \"gnt-job"
2174
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2175
    obuf.write(msg)
2176
  elif isinstance(err, luxi.PermissionError):
2177
    obuf.write("It seems you don't have permissions to connect to the"
2178
               " master daemon.\nPlease retry as a different user.")
2179
  elif isinstance(err, luxi.ProtocolError):
2180
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2181
               "%s" % msg)
2182
  elif isinstance(err, errors.JobLost):
2183
    obuf.write("Error checking job status: %s" % msg)
2184
  elif isinstance(err, errors.QueryFilterParseError):
2185
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2186
    obuf.write("\n".join(err.GetDetails()))
2187
  elif isinstance(err, errors.GenericError):
2188
    obuf.write("Unhandled Ganeti error: %s" % msg)
2189
  elif isinstance(err, JobSubmittedException):
2190
    obuf.write("JobID: %s\n" % err.args[0])
2191
    retcode = 0
2192
  else:
2193
    obuf.write("Unhandled exception: %s" % msg)
2194
  return retcode, obuf.getvalue().rstrip("\n")
2195

    
2196

    
2197
def GenericMain(commands, override=None, aliases=None,
2198
                env_override=frozenset()):
2199
  """Generic main function for all the gnt-* commands.
2200

2201
  @param commands: a dictionary with a special structure, see the design doc
2202
                   for command line handling.
2203
  @param override: if not None, we expect a dictionary with keys that will
2204
                   override command line options; this can be used to pass
2205
                   options from the scripts to generic functions
2206
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2207
  @param env_override: list of environment names which are allowed to submit
2208
                       default args for commands
2209

2210
  """
2211
  # save the program name and the entire command line for later logging
2212
  if sys.argv:
2213
    binary = os.path.basename(sys.argv[0])
2214
    if not binary:
2215
      binary = sys.argv[0]
2216

    
2217
    if len(sys.argv) >= 2:
2218
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2219
    else:
2220
      logname = binary
2221

    
2222
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2223
  else:
2224
    binary = "<unknown program>"
2225
    cmdline = "<unknown>"
2226

    
2227
  if aliases is None:
2228
    aliases = {}
2229

    
2230
  try:
2231
    func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
2232
  except errors.ParameterError, err:
2233
    result, err_msg = FormatError(err)
2234
    ToStderr(err_msg)
2235
    return 1
2236

    
2237
  if func is None: # parse error
2238
    return 1
2239

    
2240
  if override is not None:
2241
    for key, val in override.iteritems():
2242
      setattr(options, key, val)
2243

    
2244
  utils.SetupLogging(constants.LOG_COMMANDS, logname, debug=options.debug,
2245
                     stderr_logging=True)
2246

    
2247
  logging.info("Command line: %s", cmdline)
2248

    
2249
  try:
2250
    result = func(options, args)
2251
  except (errors.GenericError, luxi.ProtocolError,
2252
          JobSubmittedException), err:
2253
    result, err_msg = FormatError(err)
2254
    logging.exception("Error during command processing")
2255
    ToStderr(err_msg)
2256
  except KeyboardInterrupt:
2257
    result = constants.EXIT_FAILURE
2258
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2259
             " might have been submitted and"
2260
             " will continue to run in the background.")
2261
  except IOError, err:
2262
    if err.errno == errno.EPIPE:
2263
      # our terminal went away, we'll exit
2264
      sys.exit(constants.EXIT_FAILURE)
2265
    else:
2266
      raise
2267

    
2268
  return result
2269

    
2270

    
2271
def ParseNicOption(optvalue):
2272
  """Parses the value of the --net option(s).
2273

2274
  """
2275
  try:
2276
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2277
  except (TypeError, ValueError), err:
2278
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2279
                               errors.ECODE_INVAL)
2280

    
2281
  nics = [{}] * nic_max
2282
  for nidx, ndict in optvalue:
2283
    nidx = int(nidx)
2284

    
2285
    if not isinstance(ndict, dict):
2286
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2287
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2288

    
2289
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2290

    
2291
    nics[nidx] = ndict
2292

    
2293
  return nics
2294

    
2295

    
2296
def GenericInstanceCreate(mode, opts, args):
2297
  """Add an instance to the cluster via either creation or import.
2298

2299
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2300
  @param opts: the command line options selected by the user
2301
  @type args: list
2302
  @param args: should contain only one element, the new instance name
2303
  @rtype: int
2304
  @return: the desired exit code
2305

2306
  """
2307
  instance = args[0]
2308

    
2309
  (pnode, snode) = SplitNodeOption(opts.node)
2310

    
2311
  hypervisor = None
2312
  hvparams = {}
2313
  if opts.hypervisor:
2314
    hypervisor, hvparams = opts.hypervisor
2315

    
2316
  if opts.nics:
2317
    nics = ParseNicOption(opts.nics)
2318
  elif opts.no_nics:
2319
    # no nics
2320
    nics = []
2321
  elif mode == constants.INSTANCE_CREATE:
2322
    # default of one nic, all auto
2323
    nics = [{}]
2324
  else:
2325
    # mode == import
2326
    nics = []
2327

    
2328
  if opts.disk_template == constants.DT_DISKLESS:
2329
    if opts.disks or opts.sd_size is not None:
2330
      raise errors.OpPrereqError("Diskless instance but disk"
2331
                                 " information passed", errors.ECODE_INVAL)
2332
    disks = []
2333
  else:
2334
    if (not opts.disks and not opts.sd_size
2335
        and mode == constants.INSTANCE_CREATE):
2336
      raise errors.OpPrereqError("No disk information specified",
2337
                                 errors.ECODE_INVAL)
2338
    if opts.disks and opts.sd_size is not None:
2339
      raise errors.OpPrereqError("Please use either the '--disk' or"
2340
                                 " '-s' option", errors.ECODE_INVAL)
2341
    if opts.sd_size is not None:
2342
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2343

    
2344
    if opts.disks:
2345
      try:
2346
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2347
      except ValueError, err:
2348
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2349
                                   errors.ECODE_INVAL)
2350
      disks = [{}] * disk_max
2351
    else:
2352
      disks = []
2353
    for didx, ddict in opts.disks:
2354
      didx = int(didx)
2355
      if not isinstance(ddict, dict):
2356
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2357
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2358
      elif constants.IDISK_SIZE in ddict:
2359
        if constants.IDISK_ADOPT in ddict:
2360
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2361
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2362
        try:
2363
          ddict[constants.IDISK_SIZE] = \
2364
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2365
        except ValueError, err:
2366
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2367
                                     (didx, err), errors.ECODE_INVAL)
2368
      elif constants.IDISK_ADOPT in ddict:
2369
        if mode == constants.INSTANCE_IMPORT:
2370
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2371
                                     " import", errors.ECODE_INVAL)
2372
        ddict[constants.IDISK_SIZE] = 0
2373
      else:
2374
        raise errors.OpPrereqError("Missing size or adoption source for"
2375
                                   " disk %d" % didx, errors.ECODE_INVAL)
2376
      disks[didx] = ddict
2377

    
2378
  if opts.tags is not None:
2379
    tags = opts.tags.split(",")
2380
  else:
2381
    tags = []
2382

    
2383
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2384
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2385

    
2386
  if mode == constants.INSTANCE_CREATE:
2387
    start = opts.start
2388
    os_type = opts.os
2389
    force_variant = opts.force_variant
2390
    src_node = None
2391
    src_path = None
2392
    no_install = opts.no_install
2393
    identify_defaults = False
2394
  elif mode == constants.INSTANCE_IMPORT:
2395
    start = False
2396
    os_type = None
2397
    force_variant = False
2398
    src_node = opts.src_node
2399
    src_path = opts.src_dir
2400
    no_install = None
2401
    identify_defaults = opts.identify_defaults
2402
  else:
2403
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2404

    
2405
  op = opcodes.OpInstanceCreate(instance_name=instance,
2406
                                disks=disks,
2407
                                disk_template=opts.disk_template,
2408
                                nics=nics,
2409
                                pnode=pnode, snode=snode,
2410
                                ip_check=opts.ip_check,
2411
                                name_check=opts.name_check,
2412
                                wait_for_sync=opts.wait_for_sync,
2413
                                file_storage_dir=opts.file_storage_dir,
2414
                                file_driver=opts.file_driver,
2415
                                iallocator=opts.iallocator,
2416
                                hypervisor=hypervisor,
2417
                                hvparams=hvparams,
2418
                                beparams=opts.beparams,
2419
                                osparams=opts.osparams,
2420
                                mode=mode,
2421
                                start=start,
2422
                                os_type=os_type,
2423
                                force_variant=force_variant,
2424
                                src_node=src_node,
2425
                                src_path=src_path,
2426
                                tags=tags,
2427
                                no_install=no_install,
2428
                                identify_defaults=identify_defaults,
2429
                                ignore_ipolicy=opts.ignore_ipolicy)
2430

    
2431
  SubmitOrSend(op, opts)
2432
  return 0
2433

    
2434

    
2435
class _RunWhileClusterStoppedHelper:
2436
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2437

2438
  """
2439
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2440
    """Initializes this class.
2441

2442
    @type feedback_fn: callable
2443
    @param feedback_fn: Feedback function
2444
    @type cluster_name: string
2445
    @param cluster_name: Cluster name
2446
    @type master_node: string
2447
    @param master_node Master node name
2448
    @type online_nodes: list
2449
    @param online_nodes: List of names of online nodes
2450

2451
    """
2452
    self.feedback_fn = feedback_fn
2453
    self.cluster_name = cluster_name
2454
    self.master_node = master_node
2455
    self.online_nodes = online_nodes
2456

    
2457
    self.ssh = ssh.SshRunner(self.cluster_name)
2458

    
2459
    self.nonmaster_nodes = [name for name in online_nodes
2460
                            if name != master_node]
2461

    
2462
    assert self.master_node not in self.nonmaster_nodes
2463

    
2464
  def _RunCmd(self, node_name, cmd):
2465
    """Runs a command on the local or a remote machine.
2466

2467
    @type node_name: string
2468
    @param node_name: Machine name
2469
    @type cmd: list
2470
    @param cmd: Command
2471

2472
    """
2473
    if node_name is None or node_name == self.master_node:
2474
      # No need to use SSH
2475
      result = utils.RunCmd(cmd)
2476
    else:
2477
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2478

    
2479
    if result.failed:
2480
      errmsg = ["Failed to run command %s" % result.cmd]
2481
      if node_name:
2482
        errmsg.append("on node %s" % node_name)
2483
      errmsg.append(": exitcode %s and error %s" %
2484
                    (result.exit_code, result.output))
2485
      raise errors.OpExecError(" ".join(errmsg))
2486

    
2487
  def Call(self, fn, *args):
2488
    """Call function while all daemons are stopped.
2489

2490
    @type fn: callable
2491
    @param fn: Function to be called
2492

2493
    """
2494
    # Pause watcher by acquiring an exclusive lock on watcher state file
2495
    self.feedback_fn("Blocking watcher")
2496
    watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2497
    try:
2498
      # TODO: Currently, this just blocks. There's no timeout.
2499
      # TODO: Should it be a shared lock?
2500
      watcher_block.Exclusive(blocking=True)
2501

    
2502
      # Stop master daemons, so that no new jobs can come in and all running
2503
      # ones are finished
2504
      self.feedback_fn("Stopping master daemons")
2505
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2506
      try:
2507
        # Stop daemons on all nodes
2508
        for node_name in self.online_nodes:
2509
          self.feedback_fn("Stopping daemons on %s" % node_name)
2510
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2511

    
2512
        # All daemons are shut down now
2513
        try:
2514
          return fn(self, *args)
2515
        except Exception, err:
2516
          _, errmsg = FormatError(err)
2517
          logging.exception("Caught exception")
2518
          self.feedback_fn(errmsg)
2519
          raise
2520
      finally:
2521
        # Start cluster again, master node last
2522
        for node_name in self.nonmaster_nodes + [self.master_node]:
2523
          self.feedback_fn("Starting daemons on %s" % node_name)
2524
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2525
    finally:
2526
      # Resume watcher
2527
      watcher_block.Close()
2528

    
2529

    
2530
def RunWhileClusterStopped(feedback_fn, fn, *args):
2531
  """Calls a function while all cluster daemons are stopped.
2532

2533
  @type feedback_fn: callable
2534
  @param feedback_fn: Feedback function
2535
  @type fn: callable
2536
  @param fn: Function to be called when daemons are stopped
2537

2538
  """
2539
  feedback_fn("Gathering cluster information")
2540

    
2541
  # This ensures we're running on the master daemon
2542
  cl = GetClient()
2543

    
2544
  (cluster_name, master_node) = \
2545
    cl.QueryConfigValues(["cluster_name", "master_node"])
2546

    
2547
  online_nodes = GetOnlineNodes([], cl=cl)
2548

    
2549
  # Don't keep a reference to the client. The master daemon will go away.
2550
  del cl
2551

    
2552
  assert master_node in online_nodes
2553

    
2554
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2555
                                       online_nodes).Call(fn, *args)
2556

    
2557

    
2558
def GenerateTable(headers, fields, separator, data,
2559
                  numfields=None, unitfields=None,
2560
                  units=None):
2561
  """Prints a table with headers and different fields.
2562

2563
  @type headers: dict
2564
  @param headers: dictionary mapping field names to headers for
2565
      the table
2566
  @type fields: list
2567
  @param fields: the field names corresponding to each row in
2568
      the data field
2569
  @param separator: the separator to be used; if this is None,
2570
      the default 'smart' algorithm is used which computes optimal
2571
      field width, otherwise just the separator is used between
2572
      each field
2573
  @type data: list
2574
  @param data: a list of lists, each sublist being one row to be output
2575
  @type numfields: list
2576
  @param numfields: a list with the fields that hold numeric
2577
      values and thus should be right-aligned
2578
  @type unitfields: list
2579
  @param unitfields: a list with the fields that hold numeric
2580
      values that should be formatted with the units field
2581
  @type units: string or None
2582
  @param units: the units we should use for formatting, or None for
2583
      automatic choice (human-readable for non-separator usage, otherwise
2584
      megabytes); this is a one-letter string
2585

2586
  """
2587
  if units is None:
2588
    if separator:
2589
      units = "m"
2590
    else:
2591
      units = "h"
2592

    
2593
  if numfields is None:
2594
    numfields = []
2595
  if unitfields is None:
2596
    unitfields = []
2597

    
2598
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2599
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2600

    
2601
  format_fields = []
2602
  for field in fields:
2603
    if headers and field not in headers:
2604
      # TODO: handle better unknown fields (either revert to old
2605
      # style of raising exception, or deal more intelligently with
2606
      # variable fields)
2607
      headers[field] = field
2608
    if separator is not None:
2609
      format_fields.append("%s")
2610
    elif numfields.Matches(field):
2611
      format_fields.append("%*s")
2612
    else:
2613
      format_fields.append("%-*s")
2614

    
2615
  if separator is None:
2616
    mlens = [0 for name in fields]
2617
    format_str = " ".join(format_fields)
2618
  else:
2619
    format_str = separator.replace("%", "%%").join(format_fields)
2620

    
2621
  for row in data:
2622
    if row is None:
2623
      continue
2624
    for idx, val in enumerate(row):
2625
      if unitfields.Matches(fields[idx]):
2626
        try:
2627
          val = int(val)
2628
        except (TypeError, ValueError):
2629
          pass
2630
        else:
2631
          val = row[idx] = utils.FormatUnit(val, units)
2632
      val = row[idx] = str(val)
2633
      if separator is None:
2634
        mlens[idx] = max(mlens[idx], len(val))
2635

    
2636
  result = []
2637
  if headers:
2638
    args = []
2639
    for idx, name in enumerate(fields):
2640
      hdr = headers[name]
2641
      if separator is None:
2642
        mlens[idx] = max(mlens[idx], len(hdr))
2643
        args.append(mlens[idx])
2644
      args.append(hdr)
2645
    result.append(format_str % tuple(args))
2646

    
2647
  if separator is None:
2648
    assert len(mlens) == len(fields)
2649

    
2650
    if fields and not numfields.Matches(fields[-1]):
2651
      mlens[-1] = 0
2652

    
2653
  for line in data:
2654
    args = []
2655
    if line is None:
2656
      line = ["-" for _ in fields]
2657
    for idx in range(len(fields)):
2658
      if separator is None:
2659
        args.append(mlens[idx])
2660
      args.append(line[idx])
2661
    result.append(format_str % tuple(args))
2662

    
2663
  return result
2664

    
2665

    
2666
def _FormatBool(value):
2667
  """Formats a boolean value as a string.
2668

2669
  """
2670
  if value:
2671
    return "Y"
2672
  return "N"
2673

    
2674

    
2675
#: Default formatting for query results; (callback, align right)
2676
_DEFAULT_FORMAT_QUERY = {
2677
  constants.QFT_TEXT: (str, False),
2678
  constants.QFT_BOOL: (_FormatBool, False),
2679
  constants.QFT_NUMBER: (str, True),
2680
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2681
  constants.QFT_OTHER: (str, False),
2682
  constants.QFT_UNKNOWN: (str, False),
2683
  }
2684

    
2685

    
2686
def _GetColumnFormatter(fdef, override, unit):
2687
  """Returns formatting function for a field.
2688

2689
  @type fdef: L{objects.QueryFieldDefinition}
2690
  @type override: dict
2691
  @param override: Dictionary for overriding field formatting functions,
2692
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2693
  @type unit: string
2694
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2695
  @rtype: tuple; (callable, bool)
2696
  @return: Returns the function to format a value (takes one parameter) and a
2697
    boolean for aligning the value on the right-hand side
2698

2699
  """
2700
  fmt = override.get(fdef.name, None)
2701
  if fmt is not None:
2702
    return fmt
2703

    
2704
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2705

    
2706
  if fdef.kind == constants.QFT_UNIT:
2707
    # Can't keep this information in the static dictionary
2708
    return (lambda value: utils.FormatUnit(value, unit), True)
2709

    
2710
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2711
  if fmt is not None:
2712
    return fmt
2713

    
2714
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2715

    
2716

    
2717
class _QueryColumnFormatter:
2718
  """Callable class for formatting fields of a query.
2719

2720
  """
2721
  def __init__(self, fn, status_fn, verbose):
2722
    """Initializes this class.
2723

2724
    @type fn: callable
2725
    @param fn: Formatting function
2726
    @type status_fn: callable
2727
    @param status_fn: Function to report fields' status
2728
    @type verbose: boolean
2729
    @param verbose: whether to use verbose field descriptions or not
2730

2731
    """
2732
    self._fn = fn
2733
    self._status_fn = status_fn
2734
    self._verbose = verbose
2735

    
2736
  def __call__(self, data):
2737
    """Returns a field's string representation.
2738

2739
    """
2740
    (status, value) = data
2741

    
2742
    # Report status
2743
    self._status_fn(status)
2744

    
2745
    if status == constants.RS_NORMAL:
2746
      return self._fn(value)
2747

    
2748
    assert value is None, \
2749
           "Found value %r for abnormal status %s" % (value, status)
2750

    
2751
    return FormatResultError(status, self._verbose)
2752

    
2753

    
2754
def FormatResultError(status, verbose):
2755
  """Formats result status other than L{constants.RS_NORMAL}.
2756

2757
  @param status: The result status
2758
  @type verbose: boolean
2759
  @param verbose: Whether to return the verbose text
2760
  @return: Text of result status
2761

2762
  """
2763
  assert status != constants.RS_NORMAL, \
2764
         "FormatResultError called with status equal to constants.RS_NORMAL"
2765
  try:
2766
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2767
  except KeyError:
2768
    raise NotImplementedError("Unknown status %s" % status)
2769
  else:
2770
    if verbose:
2771
      return verbose_text
2772
    return normal_text
2773

    
2774

    
2775
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2776
                      header=False, verbose=False):
2777
  """Formats data in L{objects.QueryResponse}.
2778

2779
  @type result: L{objects.QueryResponse}
2780
  @param result: result of query operation
2781
  @type unit: string
2782
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2783
    see L{utils.text.FormatUnit}
2784
  @type format_override: dict
2785
  @param format_override: Dictionary for overriding field formatting functions,
2786
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2787
  @type separator: string or None
2788
  @param separator: String used to separate fields
2789
  @type header: bool
2790
  @param header: Whether to output header row
2791
  @type verbose: boolean
2792
  @param verbose: whether to use verbose field descriptions or not
2793

2794
  """
2795
  if unit is None:
2796
    if separator:
2797
      unit = "m"
2798
    else:
2799
      unit = "h"
2800

    
2801
  if format_override is None:
2802
    format_override = {}
2803

    
2804
  stats = dict.fromkeys(constants.RS_ALL, 0)
2805

    
2806
  def _RecordStatus(status):
2807
    if status in stats:
2808
      stats[status] += 1
2809

    
2810
  columns = []
2811
  for fdef in result.fields:
2812
    assert fdef.title and fdef.name
2813
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2814
    columns.append(TableColumn(fdef.title,
2815
                               _QueryColumnFormatter(fn, _RecordStatus,
2816
                                                     verbose),
2817
                               align_right))
2818

    
2819
  table = FormatTable(result.data, columns, header, separator)
2820

    
2821
  # Collect statistics
2822
  assert len(stats) == len(constants.RS_ALL)
2823
  assert compat.all(count >= 0 for count in stats.values())
2824

    
2825
  # Determine overall status. If there was no data, unknown fields must be
2826
  # detected via the field definitions.
2827
  if (stats[constants.RS_UNKNOWN] or
2828
      (not result.data and _GetUnknownFields(result.fields))):
2829
    status = QR_UNKNOWN
2830
  elif compat.any(count > 0 for key, count in stats.items()
2831
                  if key != constants.RS_NORMAL):
2832
    status = QR_INCOMPLETE
2833
  else:
2834
    status = QR_NORMAL
2835

    
2836
  return (status, table)
2837

    
2838

    
2839
def _GetUnknownFields(fdefs):
2840
  """Returns list of unknown fields included in C{fdefs}.
2841

2842
  @type fdefs: list of L{objects.QueryFieldDefinition}
2843

2844
  """
2845
  return [fdef for fdef in fdefs
2846
          if fdef.kind == constants.QFT_UNKNOWN]
2847

    
2848

    
2849
def _WarnUnknownFields(fdefs):
2850
  """Prints a warning to stderr if a query included unknown fields.
2851

2852
  @type fdefs: list of L{objects.QueryFieldDefinition}
2853

2854
  """
2855
  unknown = _GetUnknownFields(fdefs)
2856
  if unknown:
2857
    ToStderr("Warning: Queried for unknown fields %s",
2858
             utils.CommaJoin(fdef.name for fdef in unknown))
2859
    return True
2860

    
2861
  return False
2862

    
2863

    
2864
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2865
                format_override=None, verbose=False, force_filter=False,
2866
                namefield=None, qfilter=None, isnumeric=False):
2867
  """Generic implementation for listing all items of a resource.
2868

2869
  @param resource: One of L{constants.QR_VIA_LUXI}
2870
  @type fields: list of strings
2871
  @param fields: List of fields to query for
2872
  @type names: list of strings
2873
  @param names: Names of items to query for
2874
  @type unit: string or None
2875
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2876
    None for automatic choice (human-readable for non-separator usage,
2877
    otherwise megabytes); this is a one-letter string
2878
  @type separator: string or None
2879
  @param separator: String used to separate fields
2880
  @type header: bool
2881
  @param header: Whether to show header row
2882
  @type force_filter: bool
2883
  @param force_filter: Whether to always treat names as filter
2884
  @type format_override: dict
2885
  @param format_override: Dictionary for overriding field formatting functions,
2886
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2887
  @type verbose: boolean
2888
  @param verbose: whether to use verbose field descriptions or not
2889
  @type namefield: string
2890
  @param namefield: Name of field to use for simple filters (see
2891
    L{qlang.MakeFilter} for details)
2892
  @type qfilter: list or None
2893
  @param qfilter: Query filter (in addition to names)
2894
  @param isnumeric: bool
2895
  @param isnumeric: Whether the namefield's type is numeric, and therefore
2896
    any simple filters built by namefield should use integer values to
2897
    reflect that
2898

2899
  """
2900
  if not names:
2901
    names = None
2902

    
2903
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
2904
                                isnumeric=isnumeric)
2905

    
2906
  if qfilter is None:
2907
    qfilter = namefilter
2908
  elif namefilter is not None:
2909
    qfilter = [qlang.OP_AND, namefilter, qfilter]
2910

    
2911
  if cl is None:
2912
    cl = GetClient()
2913

    
2914
  response = cl.Query(resource, fields, qfilter)
2915

    
2916
  found_unknown = _WarnUnknownFields(response.fields)
2917

    
2918
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2919
                                     header=header,
2920
                                     format_override=format_override,
2921
                                     verbose=verbose)
2922

    
2923
  for line in data:
2924
    ToStdout(line)
2925

    
2926
  assert ((found_unknown and status == QR_UNKNOWN) or
2927
          (not found_unknown and status != QR_UNKNOWN))
2928

    
2929
  if status == QR_UNKNOWN:
2930
    return constants.EXIT_UNKNOWN_FIELD
2931

    
2932
  # TODO: Should the list command fail if not all data could be collected?
2933
  return constants.EXIT_SUCCESS
2934

    
2935

    
2936
def GenericListFields(resource, fields, separator, header, cl=None):
2937
  """Generic implementation for listing fields for a resource.
2938

2939
  @param resource: One of L{constants.QR_VIA_LUXI}
2940
  @type fields: list of strings
2941
  @param fields: List of fields to query for
2942
  @type separator: string or None
2943
  @param separator: String used to separate fields
2944
  @type header: bool
2945
  @param header: Whether to show header row
2946

2947
  """
2948
  if cl is None:
2949
    cl = GetClient()
2950

    
2951
  if not fields:
2952
    fields = None
2953

    
2954
  response = cl.QueryFields(resource, fields)
2955

    
2956
  found_unknown = _WarnUnknownFields(response.fields)
2957

    
2958
  columns = [
2959
    TableColumn("Name", str, False),
2960
    TableColumn("Title", str, False),
2961
    TableColumn("Description", str, False),
2962
    ]
2963

    
2964
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2965

    
2966
  for line in FormatTable(rows, columns, header, separator):
2967
    ToStdout(line)
2968

    
2969
  if found_unknown:
2970
    return constants.EXIT_UNKNOWN_FIELD
2971

    
2972
  return constants.EXIT_SUCCESS
2973

    
2974

    
2975
class TableColumn:
2976
  """Describes a column for L{FormatTable}.
2977

2978
  """
2979
  def __init__(self, title, fn, align_right):
2980
    """Initializes this class.
2981

2982
    @type title: string
2983
    @param title: Column title
2984
    @type fn: callable
2985
    @param fn: Formatting function
2986
    @type align_right: bool
2987
    @param align_right: Whether to align values on the right-hand side
2988

2989
    """
2990
    self.title = title
2991
    self.format = fn
2992
    self.align_right = align_right
2993

    
2994

    
2995
def _GetColFormatString(width, align_right):
2996
  """Returns the format string for a field.
2997

2998
  """
2999
  if align_right:
3000
    sign = ""
3001
  else:
3002
    sign = "-"
3003

    
3004
  return "%%%s%ss" % (sign, width)
3005

    
3006

    
3007
def FormatTable(rows, columns, header, separator):
3008
  """Formats data as a table.
3009

3010
  @type rows: list of lists
3011
  @param rows: Row data, one list per row
3012
  @type columns: list of L{TableColumn}
3013
  @param columns: Column descriptions
3014
  @type header: bool
3015
  @param header: Whether to show header row
3016
  @type separator: string or None
3017
  @param separator: String used to separate columns
3018

3019
  """
3020
  if header:
3021
    data = [[col.title for col in columns]]
3022
    colwidth = [len(col.title) for col in columns]
3023
  else:
3024
    data = []
3025
    colwidth = [0 for _ in columns]
3026

    
3027
  # Format row data
3028
  for row in rows:
3029
    assert len(row) == len(columns)
3030

    
3031
    formatted = [col.format(value) for value, col in zip(row, columns)]
3032

    
3033
    if separator is None:
3034
      # Update column widths
3035
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3036
        # Modifying a list's items while iterating is fine
3037
        colwidth[idx] = max(oldwidth, len(value))
3038

    
3039
    data.append(formatted)
3040

    
3041
  if separator is not None:
3042
    # Return early if a separator is used
3043
    return [separator.join(row) for row in data]
3044

    
3045
  if columns and not columns[-1].align_right:
3046
    # Avoid unnecessary spaces at end of line
3047
    colwidth[-1] = 0
3048

    
3049
  # Build format string
3050
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3051
                  for col, width in zip(columns, colwidth)])
3052

    
3053
  return [fmt % tuple(row) for row in data]
3054

    
3055

    
3056
def FormatTimestamp(ts):
3057
  """Formats a given timestamp.
3058

3059
  @type ts: timestamp
3060
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3061

3062
  @rtype: string
3063
  @return: a string with the formatted timestamp
3064

3065
  """
3066
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3067
    return "?"
3068

    
3069
  (sec, usecs) = ts
3070
  return utils.FormatTime(sec, usecs=usecs)
3071

    
3072

    
3073
def ParseTimespec(value):
3074
  """Parse a time specification.
3075

3076
  The following suffixed will be recognized:
3077

3078
    - s: seconds
3079
    - m: minutes
3080
    - h: hours
3081
    - d: day
3082
    - w: weeks
3083

3084
  Without any suffix, the value will be taken to be in seconds.
3085

3086
  """
3087
  value = str(value)
3088
  if not value:
3089
    raise errors.OpPrereqError("Empty time specification passed",
3090
                               errors.ECODE_INVAL)
3091
  suffix_map = {
3092
    "s": 1,
3093
    "m": 60,
3094
    "h": 3600,
3095
    "d": 86400,
3096
    "w": 604800,
3097
    }
3098
  if value[-1] not in suffix_map:
3099
    try:
3100
      value = int(value)
3101
    except (TypeError, ValueError):
3102
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3103
                                 errors.ECODE_INVAL)
3104
  else:
3105
    multiplier = suffix_map[value[-1]]
3106
    value = value[:-1]
3107
    if not value: # no data left after stripping the suffix
3108
      raise errors.OpPrereqError("Invalid time specification (only"
3109
                                 " suffix passed)", errors.ECODE_INVAL)
3110
    try:
3111
      value = int(value) * multiplier
3112
    except (TypeError, ValueError):
3113
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3114
                                 errors.ECODE_INVAL)
3115
  return value
3116

    
3117

    
3118
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3119
                   filter_master=False, nodegroup=None):
3120
  """Returns the names of online nodes.
3121

3122
  This function will also log a warning on stderr with the names of
3123
  the online nodes.
3124

3125
  @param nodes: if not empty, use only this subset of nodes (minus the
3126
      offline ones)
3127
  @param cl: if not None, luxi client to use
3128
  @type nowarn: boolean
3129
  @param nowarn: by default, this function will output a note with the
3130
      offline nodes that are skipped; if this parameter is True the
3131
      note is not displayed
3132
  @type secondary_ips: boolean
3133
  @param secondary_ips: if True, return the secondary IPs instead of the
3134
      names, useful for doing network traffic over the replication interface
3135
      (if any)
3136
  @type filter_master: boolean
3137
  @param filter_master: if True, do not return the master node in the list
3138
      (useful in coordination with secondary_ips where we cannot check our
3139
      node name against the list)
3140
  @type nodegroup: string
3141
  @param nodegroup: If set, only return nodes in this node group
3142

3143
  """
3144
  if cl is None:
3145
    cl = GetClient()
3146

    
3147
  qfilter = []
3148

    
3149
  if nodes:
3150
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3151

    
3152
  if nodegroup is not None:
3153
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3154
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3155

    
3156
  if filter_master:
3157
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3158

    
3159
  if qfilter:
3160
    if len(qfilter) > 1:
3161
      final_filter = [qlang.OP_AND] + qfilter
3162
    else:
3163
      assert len(qfilter) == 1
3164
      final_filter = qfilter[0]
3165
  else:
3166
    final_filter = None
3167

    
3168
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3169

    
3170
  def _IsOffline(row):
3171
    (_, (_, offline), _) = row
3172
    return offline
3173

    
3174
  def _GetName(row):
3175
    ((_, name), _, _) = row
3176
    return name
3177

    
3178
  def _GetSip(row):
3179
    (_, _, (_, sip)) = row
3180
    return sip
3181

    
3182
  (offline, online) = compat.partition(result.data, _IsOffline)
3183

    
3184
  if offline and not nowarn:
3185
    ToStderr("Note: skipping offline node(s): %s" %
3186
             utils.CommaJoin(map(_GetName, offline)))
3187

    
3188
  if secondary_ips:
3189
    fn = _GetSip
3190
  else:
3191
    fn = _GetName
3192

    
3193
  return map(fn, online)
3194

    
3195

    
3196
def _ToStream(stream, txt, *args):
3197
  """Write a message to a stream, bypassing the logging system
3198

3199
  @type stream: file object
3200
  @param stream: the file to which we should write
3201
  @type txt: str
3202
  @param txt: the message
3203

3204
  """
3205
  try:
3206
    if args:
3207
      args = tuple(args)
3208
      stream.write(txt % args)
3209
    else:
3210
      stream.write(txt)
3211
    stream.write("\n")
3212
    stream.flush()
3213
  except IOError, err:
3214
    if err.errno == errno.EPIPE:
3215
      # our terminal went away, we'll exit
3216
      sys.exit(constants.EXIT_FAILURE)
3217
    else:
3218
      raise
3219

    
3220

    
3221
def ToStdout(txt, *args):
3222
  """Write a message to stdout only, bypassing the logging system
3223

3224
  This is just a wrapper over _ToStream.
3225

3226
  @type txt: str
3227
  @param txt: the message
3228

3229
  """
3230
  _ToStream(sys.stdout, txt, *args)
3231

    
3232

    
3233
def ToStderr(txt, *args):
3234
  """Write a message to stderr only, bypassing the logging system
3235

3236
  This is just a wrapper over _ToStream.
3237

3238
  @type txt: str
3239
  @param txt: the message
3240

3241
  """
3242
  _ToStream(sys.stderr, txt, *args)
3243

    
3244

    
3245
class JobExecutor(object):
3246
  """Class which manages the submission and execution of multiple jobs.
3247

3248
  Note that instances of this class should not be reused between
3249
  GetResults() calls.
3250

3251
  """
3252
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3253
    self.queue = []
3254
    if cl is None:
3255
      cl = GetClient()
3256
    self.cl = cl
3257
    self.verbose = verbose
3258
    self.jobs = []
3259
    self.opts = opts
3260
    self.feedback_fn = feedback_fn
3261
    self._counter = itertools.count()
3262

    
3263
  @staticmethod
3264
  def _IfName(name, fmt):
3265
    """Helper function for formatting name.
3266

3267
    """
3268
    if name:
3269
      return fmt % name
3270

    
3271
    return ""
3272

    
3273
  def QueueJob(self, name, *ops):
3274
    """Record a job for later submit.
3275

3276
    @type name: string
3277
    @param name: a description of the job, will be used in WaitJobSet
3278

3279
    """
3280
    SetGenericOpcodeOpts(ops, self.opts)
3281
    self.queue.append((self._counter.next(), name, ops))
3282

    
3283
  def AddJobId(self, name, status, job_id):
3284
    """Adds a job ID to the internal queue.
3285

3286
    """
3287
    self.jobs.append((self._counter.next(), status, job_id, name))
3288

    
3289
  def SubmitPending(self, each=False):
3290
    """Submit all pending jobs.
3291

3292
    """
3293
    if each:
3294
      results = []
3295
      for (_, _, ops) in self.queue:
3296
        # SubmitJob will remove the success status, but raise an exception if
3297
        # the submission fails, so we'll notice that anyway.
3298
        results.append([True, self.cl.SubmitJob(ops)[0]])
3299
    else:
3300
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3301
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3302
      self.jobs.append((idx, status, data, name))
3303

    
3304
  def _ChooseJob(self):
3305
    """Choose a non-waiting/queued job to poll next.
3306

3307
    """
3308
    assert self.jobs, "_ChooseJob called with empty job list"
3309

    
3310
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3311
                               ["status"])
3312
    assert result
3313

    
3314
    for job_data, status in zip(self.jobs, result):
3315
      if (isinstance(status, list) and status and
3316
          status[0] in (constants.JOB_STATUS_QUEUED,
3317
                        constants.JOB_STATUS_WAITING,
3318
                        constants.JOB_STATUS_CANCELING)):
3319
        # job is still present and waiting
3320
        continue
3321
      # good candidate found (either running job or lost job)
3322
      self.jobs.remove(job_data)
3323
      return job_data
3324

    
3325
    # no job found
3326
    return self.jobs.pop(0)
3327

    
3328
  def GetResults(self):
3329
    """Wait for and return the results of all jobs.
3330

3331
    @rtype: list
3332
    @return: list of tuples (success, job results), in the same order
3333
        as the submitted jobs; if a job has failed, instead of the result
3334
        there will be the error message
3335

3336
    """
3337
    if not self.jobs:
3338
      self.SubmitPending()
3339
    results = []
3340
    if self.verbose:
3341
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3342
      if ok_jobs:
3343
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3344

    
3345
    # first, remove any non-submitted jobs
3346
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3347
    for idx, _, jid, name in failures:
3348
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3349
      results.append((idx, False, jid))
3350

    
3351
    while self.jobs:
3352
      (idx, _, jid, name) = self._ChooseJob()
3353
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3354
      try:
3355
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3356
        success = True
3357
      except errors.JobLost, err:
3358
        _, job_result = FormatError(err)
3359
        ToStderr("Job %s%s has been archived, cannot check its result",
3360
                 jid, self._IfName(name, " for %s"))
3361
        success = False
3362
      except (errors.GenericError, luxi.ProtocolError), err:
3363
        _, job_result = FormatError(err)
3364
        success = False
3365
        # the error message will always be shown, verbose or not
3366
        ToStderr("Job %s%s has failed: %s",
3367
                 jid, self._IfName(name, " for %s"), job_result)
3368

    
3369
      results.append((idx, success, job_result))
3370

    
3371
    # sort based on the index, then drop it
3372
    results.sort()
3373
    results = [i[1:] for i in results]
3374

    
3375
    return results
3376

    
3377
  def WaitOrShow(self, wait):
3378
    """Wait for job results or only print the job IDs.
3379

3380
    @type wait: boolean
3381
    @param wait: whether to wait or not
3382

3383
    """
3384
    if wait:
3385
      return self.GetResults()
3386
    else:
3387
      if not self.jobs:
3388
        self.SubmitPending()
3389
      for _, status, result, name in self.jobs:
3390
        if status:
3391
          ToStdout("%s: %s", result, name)
3392
        else:
3393
          ToStderr("Failure for %s: %s", name, result)
3394
      return [row[1:3] for row in self.jobs]
3395

    
3396

    
3397
def FormatParameterDict(buf, param_dict, actual, level=1):
3398
  """Formats a parameter dictionary.
3399

3400
  @type buf: L{StringIO}
3401
  @param buf: the buffer into which to write
3402
  @type param_dict: dict
3403
  @param param_dict: the own parameters
3404
  @type actual: dict
3405
  @param actual: the current parameter set (including defaults)
3406
  @param level: Level of indent
3407

3408
  """
3409
  indent = "  " * level
3410

    
3411
  for key in sorted(actual):
3412
    data = actual[key]
3413
    buf.write("%s- %s:" % (indent, key))
3414

    
3415
    if isinstance(data, dict) and data:
3416
      buf.write("\n")
3417
      FormatParameterDict(buf, param_dict.get(key, {}), data,
3418
                          level=level + 1)
3419
    else:
3420
      val = param_dict.get(key, "default (%s)" % data)
3421
      buf.write(" %s\n" % val)
3422

    
3423

    
3424
def ConfirmOperation(names, list_type, text, extra=""):
3425
  """Ask the user to confirm an operation on a list of list_type.
3426

3427
  This function is used to request confirmation for doing an operation
3428
  on a given list of list_type.
3429

3430
  @type names: list
3431
  @param names: the list of names that we display when
3432
      we ask for confirmation
3433
  @type list_type: str
3434
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3435
  @type text: str
3436
  @param text: the operation that the user should confirm
3437
  @rtype: boolean
3438
  @return: True or False depending on user's confirmation.
3439

3440
  """
3441
  count = len(names)
3442
  msg = ("The %s will operate on %d %s.\n%s"
3443
         "Do you want to continue?" % (text, count, list_type, extra))
3444
  affected = (("\nAffected %s:\n" % list_type) +
3445
              "\n".join(["  %s" % name for name in names]))
3446

    
3447
  choices = [("y", True, "Yes, execute the %s" % text),
3448
             ("n", False, "No, abort the %s" % text)]
3449

    
3450
  if count > 20:
3451
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3452
    question = msg
3453
  else:
3454
    question = msg + affected
3455

    
3456
  choice = AskUser(question, choices)
3457
  if choice == "v":
3458
    choices.pop(1)
3459
    choice = AskUser(msg + affected, choices)
3460
  return choice
3461

    
3462

    
3463
def _MaybeParseUnit(elements):
3464
  """Parses and returns an array of potential values with units.
3465

3466
  """
3467
  parsed = {}
3468
  for k, v in elements.items():
3469
    if v == constants.VALUE_DEFAULT:
3470
      parsed[k] = v
3471
    else:
3472
      parsed[k] = utils.ParseUnit(v)
3473
  return parsed
3474

    
3475

    
3476
def CreateIPolicyFromOpts(ispecs_mem_size=None,
3477
                          ispecs_cpu_count=None,
3478
                          ispecs_disk_count=None,
3479
                          ispecs_disk_size=None,
3480
                          ispecs_nic_count=None,
3481
                          ipolicy_disk_templates=None,
3482
                          ipolicy_vcpu_ratio=None,
3483
                          ipolicy_spindle_ratio=None,
3484
                          group_ipolicy=False,
3485
                          allowed_values=None,
3486
                          fill_all=False):
3487
  """Creation of instance policy based on command line options.
3488

3489
  @param fill_all: whether for cluster policies we should ensure that
3490
    all values are filled
3491

3492

3493
  """
3494
  try:
3495
    if ispecs_mem_size:
3496
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3497
    if ispecs_disk_size:
3498
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3499
  except (TypeError, ValueError, errors.UnitParseError), err:
3500
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3501
                               " in policy: %s" %
3502
                               (ispecs_disk_size, ispecs_mem_size, err),
3503
                               errors.ECODE_INVAL)
3504

    
3505
  # prepare ipolicy dict
3506
  ipolicy_transposed = {
3507
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3508
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3509
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3510
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3511
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3512
    }
3513

    
3514
  # first, check that the values given are correct
3515
  if group_ipolicy:
3516
    forced_type = TISPECS_GROUP_TYPES
3517
  else:
3518
    forced_type = TISPECS_CLUSTER_TYPES
3519

    
3520
  for specs in ipolicy_transposed.values():
3521
    utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3522

    
3523
  # then transpose
3524
  ipolicy_out = objects.MakeEmptyIPolicy()
3525
  for name, specs in ipolicy_transposed.iteritems():
3526
    assert name in constants.ISPECS_PARAMETERS
3527
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3528
      ipolicy_out[key][name] = val
3529

    
3530
  # no filldict for non-dicts
3531
  if not group_ipolicy and fill_all:
3532
    if ipolicy_disk_templates is None:
3533
      ipolicy_disk_templates = constants.DISK_TEMPLATES
3534
    if ipolicy_vcpu_ratio is None:
3535
      ipolicy_vcpu_ratio = \
3536
        constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
3537
    if ipolicy_spindle_ratio is None:
3538
      ipolicy_spindle_ratio = \
3539
        constants.IPOLICY_DEFAULTS[constants.IPOLICY_SPINDLE_RATIO]
3540
  if ipolicy_disk_templates is not None:
3541
    ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3542
  if ipolicy_vcpu_ratio is not None:
3543
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3544
  if ipolicy_spindle_ratio is not None:
3545
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
3546

    
3547
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
3548

    
3549
  return ipolicy_out