Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 052783ff

History | View | Annotate | Download (117.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ALLOCATABLE_OPT",
58
  "ALLOC_POLICY_OPT",
59
  "ALL_OPT",
60
  "ALLOW_FAILOVER_OPT",
61
  "AUTO_PROMOTE_OPT",
62
  "AUTO_REPLACE_OPT",
63
  "BACKEND_OPT",
64
  "BLK_OS_OPT",
65
  "CAPAB_MASTER_OPT",
66
  "CAPAB_VM_OPT",
67
  "CLEANUP_OPT",
68
  "CLUSTER_DOMAIN_SECRET_OPT",
69
  "CONFIRM_OPT",
70
  "CP_SIZE_OPT",
71
  "DEBUG_OPT",
72
  "DEBUG_SIMERR_OPT",
73
  "DISKIDX_OPT",
74
  "DISK_OPT",
75
  "DISK_PARAMS_OPT",
76
  "DISK_TEMPLATE_OPT",
77
  "DRAINED_OPT",
78
  "DRY_RUN_OPT",
79
  "DRBD_HELPER_OPT",
80
  "DST_NODE_OPT",
81
  "EARLY_RELEASE_OPT",
82
  "ENABLED_HV_OPT",
83
  "ERROR_CODES_OPT",
84
  "FIELDS_OPT",
85
  "FILESTORE_DIR_OPT",
86
  "FILESTORE_DRIVER_OPT",
87
  "FORCE_FILTER_OPT",
88
  "FORCE_OPT",
89
  "FORCE_VARIANT_OPT",
90
  "GLOBAL_FILEDIR_OPT",
91
  "HID_OS_OPT",
92
  "GLOBAL_SHARED_FILEDIR_OPT",
93
  "HVLIST_OPT",
94
  "HVOPTS_OPT",
95
  "HYPERVISOR_OPT",
96
  "IALLOCATOR_OPT",
97
  "DEFAULT_IALLOCATOR_OPT",
98
  "IDENTIFY_DEFAULTS_OPT",
99
  "IGNORE_CONSIST_OPT",
100
  "IGNORE_ERRORS_OPT",
101
  "IGNORE_FAILURES_OPT",
102
  "IGNORE_OFFLINE_OPT",
103
  "IGNORE_REMOVE_FAILURES_OPT",
104
  "IGNORE_SECONDARIES_OPT",
105
  "IGNORE_SIZE_OPT",
106
  "INTERVAL_OPT",
107
  "MAC_PREFIX_OPT",
108
  "MAINTAIN_NODE_HEALTH_OPT",
109
  "MASTER_NETDEV_OPT",
110
  "MASTER_NETMASK_OPT",
111
  "MC_OPT",
112
  "MIGRATION_MODE_OPT",
113
  "NET_OPT",
114
  "NEW_CLUSTER_CERT_OPT",
115
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
116
  "NEW_CONFD_HMAC_KEY_OPT",
117
  "NEW_RAPI_CERT_OPT",
118
  "NEW_SECONDARY_OPT",
119
  "NEW_SPICE_CERT_OPT",
120
  "NIC_PARAMS_OPT",
121
  "NODE_FORCE_JOIN_OPT",
122
  "NODE_LIST_OPT",
123
  "NODE_PLACEMENT_OPT",
124
  "NODEGROUP_OPT",
125
  "NODE_PARAMS_OPT",
126
  "NODE_POWERED_OPT",
127
  "NODRBD_STORAGE_OPT",
128
  "NOHDR_OPT",
129
  "NOIPCHECK_OPT",
130
  "NO_INSTALL_OPT",
131
  "NONAMECHECK_OPT",
132
  "NOLVM_STORAGE_OPT",
133
  "NOMODIFY_ETCHOSTS_OPT",
134
  "NOMODIFY_SSH_SETUP_OPT",
135
  "NONICS_OPT",
136
  "NONLIVE_OPT",
137
  "NONPLUS1_OPT",
138
  "NORUNTIME_CHGS_OPT",
139
  "NOSHUTDOWN_OPT",
140
  "NOSTART_OPT",
141
  "NOSSH_KEYCHECK_OPT",
142
  "NOVOTING_OPT",
143
  "NO_REMEMBER_OPT",
144
  "NWSYNC_OPT",
145
  "OFFLINE_INST_OPT",
146
  "ONLINE_INST_OPT",
147
  "ON_PRIMARY_OPT",
148
  "ON_SECONDARY_OPT",
149
  "OFFLINE_OPT",
150
  "OSPARAMS_OPT",
151
  "OS_OPT",
152
  "OS_SIZE_OPT",
153
  "OOB_TIMEOUT_OPT",
154
  "POWER_DELAY_OPT",
155
  "PREALLOC_WIPE_DISKS_OPT",
156
  "PRIMARY_IP_VERSION_OPT",
157
  "PRIMARY_ONLY_OPT",
158
  "PRIORITY_OPT",
159
  "RAPI_CERT_OPT",
160
  "READD_OPT",
161
  "REBOOT_TYPE_OPT",
162
  "REMOVE_INSTANCE_OPT",
163
  "REMOVE_UIDS_OPT",
164
  "RESERVED_LVS_OPT",
165
  "RUNTIME_MEM_OPT",
166
  "ROMAN_OPT",
167
  "SECONDARY_IP_OPT",
168
  "SECONDARY_ONLY_OPT",
169
  "SELECT_OS_OPT",
170
  "SEP_OPT",
171
  "SHOWCMD_OPT",
172
  "SHUTDOWN_TIMEOUT_OPT",
173
  "SINGLE_NODE_OPT",
174
  "SPECS_CPU_COUNT_OPT",
175
  "SPECS_DISK_COUNT_OPT",
176
  "SPECS_DISK_SIZE_OPT",
177
  "SPECS_MEM_SIZE_OPT",
178
  "SPECS_NIC_COUNT_OPT",
179
  "IPOLICY_DISK_TEMPLATES",
180
  "IPOLICY_VCPU_RATIO",
181
  "SPICE_CACERT_OPT",
182
  "SPICE_CERT_OPT",
183
  "SRC_DIR_OPT",
184
  "SRC_NODE_OPT",
185
  "SUBMIT_OPT",
186
  "STARTUP_PAUSED_OPT",
187
  "STATIC_OPT",
188
  "SYNC_OPT",
189
  "TAG_ADD_OPT",
190
  "TAG_SRC_OPT",
191
  "TIMEOUT_OPT",
192
  "TO_GROUP_OPT",
193
  "UIDPOOL_OPT",
194
  "USEUNITS_OPT",
195
  "USE_EXTERNAL_MIP_SCRIPT",
196
  "USE_REPL_NET_OPT",
197
  "VERBOSE_OPT",
198
  "VG_NAME_OPT",
199
  "WFSYNC_OPT",
200
  "YES_DOIT_OPT",
201
  "DISK_STATE_OPT",
202
  "HV_STATE_OPT",
203
  "IGNORE_IPOLICY_OPT",
204
  "INSTANCE_POLICY_OPTS",
205
  # Generic functions for CLI programs
206
  "ConfirmOperation",
207
  "CreateIPolicyFromOpts",
208
  "GenericMain",
209
  "GenericInstanceCreate",
210
  "GenericList",
211
  "GenericListFields",
212
  "GetClient",
213
  "GetOnlineNodes",
214
  "JobExecutor",
215
  "JobSubmittedException",
216
  "ParseTimespec",
217
  "RunWhileClusterStopped",
218
  "SubmitOpCode",
219
  "SubmitOrSend",
220
  "UsesRPC",
221
  # Formatting functions
222
  "ToStderr", "ToStdout",
223
  "FormatError",
224
  "FormatQueryResult",
225
  "FormatParameterDict",
226
  "GenerateTable",
227
  "AskUser",
228
  "FormatTimestamp",
229
  "FormatLogMessage",
230
  # Tags functions
231
  "ListTags",
232
  "AddTags",
233
  "RemoveTags",
234
  # command line options support infrastructure
235
  "ARGS_MANY_INSTANCES",
236
  "ARGS_MANY_NODES",
237
  "ARGS_MANY_GROUPS",
238
  "ARGS_NONE",
239
  "ARGS_ONE_INSTANCE",
240
  "ARGS_ONE_NODE",
241
  "ARGS_ONE_GROUP",
242
  "ARGS_ONE_OS",
243
  "ArgChoice",
244
  "ArgCommand",
245
  "ArgFile",
246
  "ArgGroup",
247
  "ArgHost",
248
  "ArgInstance",
249
  "ArgJobId",
250
  "ArgNode",
251
  "ArgOs",
252
  "ArgSuggest",
253
  "ArgUnknown",
254
  "OPT_COMPL_INST_ADD_NODES",
255
  "OPT_COMPL_MANY_NODES",
256
  "OPT_COMPL_ONE_IALLOCATOR",
257
  "OPT_COMPL_ONE_INSTANCE",
258
  "OPT_COMPL_ONE_NODE",
259
  "OPT_COMPL_ONE_NODEGROUP",
260
  "OPT_COMPL_ONE_OS",
261
  "cli_option",
262
  "SplitNodeOption",
263
  "CalculateOSNames",
264
  "ParseFields",
265
  "COMMON_CREATE_OPTS",
266
  ]
267

    
268
NO_PREFIX = "no_"
269
UN_PREFIX = "-"
270

    
271
#: Priorities (sorted)
272
_PRIORITY_NAMES = [
273
  ("low", constants.OP_PRIO_LOW),
274
  ("normal", constants.OP_PRIO_NORMAL),
275
  ("high", constants.OP_PRIO_HIGH),
276
  ]
277

    
278
#: Priority dictionary for easier lookup
279
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
280
# we migrate to Python 2.6
281
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
282

    
283
# Query result status for clients
284
(QR_NORMAL,
285
 QR_UNKNOWN,
286
 QR_INCOMPLETE) = range(3)
287

    
288
#: Maximum batch size for ChooseJob
289
_CHOOSE_BATCH = 25
290

    
291

    
292
# constants used to create InstancePolicy dictionary
293
TISPECS_GROUP_TYPES = {
294
  constants.ISPECS_MIN: constants.VTYPE_INT,
295
  constants.ISPECS_MAX: constants.VTYPE_INT,
296
  }
297

    
298
TISPECS_CLUSTER_TYPES = {
299
  constants.ISPECS_MIN: constants.VTYPE_INT,
300
  constants.ISPECS_MAX: constants.VTYPE_INT,
301
  constants.ISPECS_STD: constants.VTYPE_INT,
302
  }
303

    
304

    
305
class _Argument:
306
  def __init__(self, min=0, max=None): # pylint: disable=W0622
307
    self.min = min
308
    self.max = max
309

    
310
  def __repr__(self):
311
    return ("<%s min=%s max=%s>" %
312
            (self.__class__.__name__, self.min, self.max))
313

    
314

    
315
class ArgSuggest(_Argument):
316
  """Suggesting argument.
317

318
  Value can be any of the ones passed to the constructor.
319

320
  """
321
  # pylint: disable=W0622
322
  def __init__(self, min=0, max=None, choices=None):
323
    _Argument.__init__(self, min=min, max=max)
324
    self.choices = choices
325

    
326
  def __repr__(self):
327
    return ("<%s min=%s max=%s choices=%r>" %
328
            (self.__class__.__name__, self.min, self.max, self.choices))
329

    
330

    
331
class ArgChoice(ArgSuggest):
332
  """Choice argument.
333

334
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
335
  but value must be one of the choices.
336

337
  """
338

    
339

    
340
class ArgUnknown(_Argument):
341
  """Unknown argument to program (e.g. determined at runtime).
342

343
  """
344

    
345

    
346
class ArgInstance(_Argument):
347
  """Instances argument.
348

349
  """
350

    
351

    
352
class ArgNode(_Argument):
353
  """Node argument.
354

355
  """
356

    
357

    
358
class ArgGroup(_Argument):
359
  """Node group argument.
360

361
  """
362

    
363

    
364
class ArgJobId(_Argument):
365
  """Job ID argument.
366

367
  """
368

    
369

    
370
class ArgFile(_Argument):
371
  """File path argument.
372

373
  """
374

    
375

    
376
class ArgCommand(_Argument):
377
  """Command argument.
378

379
  """
380

    
381

    
382
class ArgHost(_Argument):
383
  """Host argument.
384

385
  """
386

    
387

    
388
class ArgOs(_Argument):
389
  """OS argument.
390

391
  """
392

    
393

    
394
ARGS_NONE = []
395
ARGS_MANY_INSTANCES = [ArgInstance()]
396
ARGS_MANY_NODES = [ArgNode()]
397
ARGS_MANY_GROUPS = [ArgGroup()]
398
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
399
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
400
# TODO
401
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
402
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
403

    
404

    
405
def _ExtractTagsObject(opts, args):
406
  """Extract the tag type object.
407

408
  Note that this function will modify its args parameter.
409

410
  """
411
  if not hasattr(opts, "tag_type"):
412
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
413
  kind = opts.tag_type
414
  if kind == constants.TAG_CLUSTER:
415
    retval = kind, kind
416
  elif kind in (constants.TAG_NODEGROUP,
417
                constants.TAG_NODE,
418
                constants.TAG_INSTANCE):
419
    if not args:
420
      raise errors.OpPrereqError("no arguments passed to the command",
421
                                 errors.ECODE_INVAL)
422
    name = args.pop(0)
423
    retval = kind, name
424
  else:
425
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
426
  return retval
427

    
428

    
429
def _ExtendTags(opts, args):
430
  """Extend the args if a source file has been given.
431

432
  This function will extend the tags with the contents of the file
433
  passed in the 'tags_source' attribute of the opts parameter. A file
434
  named '-' will be replaced by stdin.
435

436
  """
437
  fname = opts.tags_source
438
  if fname is None:
439
    return
440
  if fname == "-":
441
    new_fh = sys.stdin
442
  else:
443
    new_fh = open(fname, "r")
444
  new_data = []
445
  try:
446
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
447
    # because of python bug 1633941
448
    while True:
449
      line = new_fh.readline()
450
      if not line:
451
        break
452
      new_data.append(line.strip())
453
  finally:
454
    new_fh.close()
455
  args.extend(new_data)
456

    
457

    
458
def ListTags(opts, args):
459
  """List the tags on a given object.
460

461
  This is a generic implementation that knows how to deal with all
462
  three cases of tag objects (cluster, node, instance). The opts
463
  argument is expected to contain a tag_type field denoting what
464
  object type we work on.
465

466
  """
467
  kind, name = _ExtractTagsObject(opts, args)
468
  cl = GetClient(query=True)
469
  result = cl.QueryTags(kind, name)
470
  result = list(result)
471
  result.sort()
472
  for tag in result:
473
    ToStdout(tag)
474

    
475

    
476
def AddTags(opts, args):
477
  """Add tags on a given object.
478

479
  This is a generic implementation that knows how to deal with all
480
  three cases of tag objects (cluster, node, instance). The opts
481
  argument is expected to contain a tag_type field denoting what
482
  object type we work on.
483

484
  """
485
  kind, name = _ExtractTagsObject(opts, args)
486
  _ExtendTags(opts, args)
487
  if not args:
488
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
489
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
490
  SubmitOrSend(op, opts)
491

    
492

    
493
def RemoveTags(opts, args):
494
  """Remove tags from a given object.
495

496
  This is a generic implementation that knows how to deal with all
497
  three cases of tag objects (cluster, node, instance). The opts
498
  argument is expected to contain a tag_type field denoting what
499
  object type we work on.
500

501
  """
502
  kind, name = _ExtractTagsObject(opts, args)
503
  _ExtendTags(opts, args)
504
  if not args:
505
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
506
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
507
  SubmitOrSend(op, opts)
508

    
509

    
510
def check_unit(option, opt, value): # pylint: disable=W0613
511
  """OptParsers custom converter for units.
512

513
  """
514
  try:
515
    return utils.ParseUnit(value)
516
  except errors.UnitParseError, err:
517
    raise OptionValueError("option %s: %s" % (opt, err))
518

    
519

    
520
def _SplitKeyVal(opt, data):
521
  """Convert a KeyVal string into a dict.
522

523
  This function will convert a key=val[,...] string into a dict. Empty
524
  values will be converted specially: keys which have the prefix 'no_'
525
  will have the value=False and the prefix stripped, the others will
526
  have value=True.
527

528
  @type opt: string
529
  @param opt: a string holding the option name for which we process the
530
      data, used in building error messages
531
  @type data: string
532
  @param data: a string of the format key=val,key=val,...
533
  @rtype: dict
534
  @return: {key=val, key=val}
535
  @raises errors.ParameterError: if there are duplicate keys
536

537
  """
538
  kv_dict = {}
539
  if data:
540
    for elem in utils.UnescapeAndSplit(data, sep=","):
541
      if "=" in elem:
542
        key, val = elem.split("=", 1)
543
      else:
544
        if elem.startswith(NO_PREFIX):
545
          key, val = elem[len(NO_PREFIX):], False
546
        elif elem.startswith(UN_PREFIX):
547
          key, val = elem[len(UN_PREFIX):], None
548
        else:
549
          key, val = elem, True
550
      if key in kv_dict:
551
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
552
                                    (key, opt))
553
      kv_dict[key] = val
554
  return kv_dict
555

    
556

    
557
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
558
  """Custom parser for ident:key=val,key=val options.
559

560
  This will store the parsed values as a tuple (ident, {key: val}). As such,
561
  multiple uses of this option via action=append is possible.
562

563
  """
564
  if ":" not in value:
565
    ident, rest = value, ""
566
  else:
567
    ident, rest = value.split(":", 1)
568

    
569
  if ident.startswith(NO_PREFIX):
570
    if rest:
571
      msg = "Cannot pass options when removing parameter groups: %s" % value
572
      raise errors.ParameterError(msg)
573
    retval = (ident[len(NO_PREFIX):], False)
574
  elif (ident.startswith(UN_PREFIX) and
575
        (len(ident) <= len(UN_PREFIX) or
576
         not ident[len(UN_PREFIX)][0].isdigit())):
577
    if rest:
578
      msg = "Cannot pass options when removing parameter groups: %s" % value
579
      raise errors.ParameterError(msg)
580
    retval = (ident[len(UN_PREFIX):], None)
581
  else:
582
    kv_dict = _SplitKeyVal(opt, rest)
583
    retval = (ident, kv_dict)
584
  return retval
585

    
586

    
587
def check_key_val(option, opt, value):  # pylint: disable=W0613
588
  """Custom parser class for key=val,key=val options.
589

590
  This will store the parsed values as a dict {key: val}.
591

592
  """
593
  return _SplitKeyVal(opt, value)
594

    
595

    
596
def check_bool(option, opt, value): # pylint: disable=W0613
597
  """Custom parser for yes/no options.
598

599
  This will store the parsed value as either True or False.
600

601
  """
602
  value = value.lower()
603
  if value == constants.VALUE_FALSE or value == "no":
604
    return False
605
  elif value == constants.VALUE_TRUE or value == "yes":
606
    return True
607
  else:
608
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
609

    
610

    
611
def check_list(option, opt, value): # pylint: disable=W0613
612
  """Custom parser for comma-separated lists.
613

614
  """
615
  # we have to make this explicit check since "".split(",") is [""],
616
  # not an empty list :(
617
  if not value:
618
    return []
619
  else:
620
    return utils.UnescapeAndSplit(value)
621

    
622

    
623
def check_maybefloat(option, opt, value): # pylint: disable=W0613
624
  """Custom parser for float numbers which might be also defaults.
625

626
  """
627
  value = value.lower()
628

    
629
  if value == constants.VALUE_DEFAULT:
630
    return value
631
  else:
632
    return float(value)
633

    
634

    
635
# completion_suggestion is normally a list. Using numeric values not evaluating
636
# to False for dynamic completion.
637
(OPT_COMPL_MANY_NODES,
638
 OPT_COMPL_ONE_NODE,
639
 OPT_COMPL_ONE_INSTANCE,
640
 OPT_COMPL_ONE_OS,
641
 OPT_COMPL_ONE_IALLOCATOR,
642
 OPT_COMPL_INST_ADD_NODES,
643
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
644

    
645
OPT_COMPL_ALL = frozenset([
646
  OPT_COMPL_MANY_NODES,
647
  OPT_COMPL_ONE_NODE,
648
  OPT_COMPL_ONE_INSTANCE,
649
  OPT_COMPL_ONE_OS,
650
  OPT_COMPL_ONE_IALLOCATOR,
651
  OPT_COMPL_INST_ADD_NODES,
652
  OPT_COMPL_ONE_NODEGROUP,
653
  ])
654

    
655

    
656
class CliOption(Option):
657
  """Custom option class for optparse.
658

659
  """
660
  ATTRS = Option.ATTRS + [
661
    "completion_suggest",
662
    ]
663
  TYPES = Option.TYPES + (
664
    "identkeyval",
665
    "keyval",
666
    "unit",
667
    "bool",
668
    "list",
669
    "maybefloat",
670
    )
671
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
672
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
673
  TYPE_CHECKER["keyval"] = check_key_val
674
  TYPE_CHECKER["unit"] = check_unit
675
  TYPE_CHECKER["bool"] = check_bool
676
  TYPE_CHECKER["list"] = check_list
677
  TYPE_CHECKER["maybefloat"] = check_maybefloat
678

    
679

    
680
# optparse.py sets make_option, so we do it for our own option class, too
681
cli_option = CliOption
682

    
683

    
684
_YORNO = "yes|no"
685

    
686
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
687
                       help="Increase debugging level")
688

    
689
NOHDR_OPT = cli_option("--no-headers", default=False,
690
                       action="store_true", dest="no_headers",
691
                       help="Don't display column headers")
692

    
693
SEP_OPT = cli_option("--separator", default=None,
694
                     action="store", dest="separator",
695
                     help=("Separator between output fields"
696
                           " (defaults to one space)"))
697

    
698
USEUNITS_OPT = cli_option("--units", default=None,
699
                          dest="units", choices=("h", "m", "g", "t"),
700
                          help="Specify units for output (one of h/m/g/t)")
701

    
702
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
703
                        type="string", metavar="FIELDS",
704
                        help="Comma separated list of output fields")
705

    
706
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
707
                       default=False, help="Force the operation")
708

    
709
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
710
                         default=False, help="Do not require confirmation")
711

    
712
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
713
                                  action="store_true", default=False,
714
                                  help=("Ignore offline nodes and do as much"
715
                                        " as possible"))
716

    
717
TAG_ADD_OPT = cli_option("--tags", dest="tags",
718
                         default=None, help="Comma-separated list of instance"
719
                                            " tags")
720

    
721
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
722
                         default=None, help="File with tag names")
723

    
724
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
725
                        default=False, action="store_true",
726
                        help=("Submit the job and return the job ID, but"
727
                              " don't wait for the job to finish"))
728

    
729
SYNC_OPT = cli_option("--sync", dest="do_locking",
730
                      default=False, action="store_true",
731
                      help=("Grab locks while doing the queries"
732
                            " in order to ensure more consistent results"))
733

    
734
DRY_RUN_OPT = cli_option("--dry-run", default=False,
735
                         action="store_true",
736
                         help=("Do not execute the operation, just run the"
737
                               " check steps and verify it it could be"
738
                               " executed"))
739

    
740
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
741
                         action="store_true",
742
                         help="Increase the verbosity of the operation")
743

    
744
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
745
                              action="store_true", dest="simulate_errors",
746
                              help="Debugging option that makes the operation"
747
                              " treat most runtime checks as failed")
748

    
749
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
750
                        default=True, action="store_false",
751
                        help="Don't wait for sync (DANGEROUS!)")
752

    
753
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
754
                        default=False, action="store_true",
755
                        help="Wait for disks to sync")
756

    
757
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
758
                             action="store_true", default=False,
759
                             help="Enable offline instance")
760

    
761
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
762
                              action="store_true", default=False,
763
                              help="Disable down instance")
764

    
765
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
766
                               help=("Custom disk setup (%s)" %
767
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
768
                               default=None, metavar="TEMPL",
769
                               choices=list(constants.DISK_TEMPLATES))
770

    
771
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
772
                        help="Do not create any network cards for"
773
                        " the instance")
774

    
775
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
776
                               help="Relative path under default cluster-wide"
777
                               " file storage dir to store file-based disks",
778
                               default=None, metavar="<DIR>")
779

    
780
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
781
                                  help="Driver to use for image files",
782
                                  default="loop", metavar="<DRIVER>",
783
                                  choices=list(constants.FILE_DRIVER))
784

    
785
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
786
                            help="Select nodes for the instance automatically"
787
                            " using the <NAME> iallocator plugin",
788
                            default=None, type="string",
789
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
790

    
791
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
792
                                    metavar="<NAME>",
793
                                    help="Set the default instance"
794
                                    " allocator plugin",
795
                                    default=None, type="string",
796
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
797

    
798
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
799
                    metavar="<os>",
800
                    completion_suggest=OPT_COMPL_ONE_OS)
801

    
802
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
803
                          type="keyval", default={},
804
                          help="OS parameters")
805

    
806
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
807
                               action="store_true", default=False,
808
                               help="Force an unknown variant")
809

    
810
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
811
                            action="store_true", default=False,
812
                            help="Do not install the OS (will"
813
                            " enable no-start)")
814

    
815
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
816
                                dest="allow_runtime_chgs",
817
                                default=True, action="store_false",
818
                                help="Don't allow runtime changes")
819

    
820
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
821
                         type="keyval", default={},
822
                         help="Backend parameters")
823

    
824
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
825
                        default={}, dest="hvparams",
826
                        help="Hypervisor parameters")
827

    
828
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
829
                             help="Disk template parameters, in the format"
830
                             " template:option=value,option=value,...",
831
                             type="identkeyval", action="append", default=[])
832

    
833
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
834
                                 type="keyval", default={},
835
                                 help="Memory size specs: list of key=value,"
836
                                " where key is one of min, max, std"
837
                                 " (in MB or using a unit)")
838

    
839
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
840
                                 type="keyval", default={},
841
                                 help="CPU count specs: list of key=value,"
842
                                 " where key is one of min, max, std")
843

    
844
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
845
                                  dest="ispecs_disk_count",
846
                                  type="keyval", default={},
847
                                  help="Disk count specs: list of key=value,"
848
                                  " where key is one of min, max, std")
849

    
850
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
851
                                 type="keyval", default={},
852
                                 help="Disk size specs: list of key=value,"
853
                                 " where key is one of min, max, std"
854
                                 " (in MB or using a unit)")
855

    
856
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
857
                                 type="keyval", default={},
858
                                 help="NIC count specs: list of key=value,"
859
                                 " where key is one of min, max, std")
860

    
861
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
862
                                    dest="ipolicy_disk_templates",
863
                                    type="list", default=None,
864
                                    help="Comma-separated list of"
865
                                    " enabled disk templates")
866

    
867
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
868
                                 dest="ipolicy_vcpu_ratio",
869
                                 type="maybefloat", default=None,
870
                                 help="The maximum allowed vcpu-to-cpu ratio")
871

    
872
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
873
                                   dest="ipolicy_spindle_ratio",
874
                                   type="maybefloat", default=None,
875
                                   help=("The maximum allowed instances to"
876
                                         " spindle ratio"))
877

    
878
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
879
                            help="Hypervisor and hypervisor options, in the"
880
                            " format hypervisor:option=value,option=value,...",
881
                            default=None, type="identkeyval")
882

    
883
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
884
                        help="Hypervisor and hypervisor options, in the"
885
                        " format hypervisor:option=value,option=value,...",
886
                        default=[], action="append", type="identkeyval")
887

    
888
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
889
                           action="store_false",
890
                           help="Don't check that the instance's IP"
891
                           " is alive")
892

    
893
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
894
                             default=True, action="store_false",
895
                             help="Don't check that the instance's name"
896
                             " is resolvable")
897

    
898
NET_OPT = cli_option("--net",
899
                     help="NIC parameters", default=[],
900
                     dest="nics", action="append", type="identkeyval")
901

    
902
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
903
                      dest="disks", action="append", type="identkeyval")
904

    
905
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
906
                         help="Comma-separated list of disks"
907
                         " indices to act on (e.g. 0,2) (optional,"
908
                         " defaults to all disks)")
909

    
910
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
911
                         help="Enforces a single-disk configuration using the"
912
                         " given disk size, in MiB unless a suffix is used",
913
                         default=None, type="unit", metavar="<size>")
914

    
915
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
916
                                dest="ignore_consistency",
917
                                action="store_true", default=False,
918
                                help="Ignore the consistency of the disks on"
919
                                " the secondary")
920

    
921
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
922
                                dest="allow_failover",
923
                                action="store_true", default=False,
924
                                help="If migration is not possible fallback to"
925
                                     " failover")
926

    
927
NONLIVE_OPT = cli_option("--non-live", dest="live",
928
                         default=True, action="store_false",
929
                         help="Do a non-live migration (this usually means"
930
                         " freeze the instance, save the state, transfer and"
931
                         " only then resume running on the secondary node)")
932

    
933
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
934
                                default=None,
935
                                choices=list(constants.HT_MIGRATION_MODES),
936
                                help="Override default migration mode (choose"
937
                                " either live or non-live")
938

    
939
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
940
                                help="Target node and optional secondary node",
941
                                metavar="<pnode>[:<snode>]",
942
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
943

    
944
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
945
                           action="append", metavar="<node>",
946
                           help="Use only this node (can be used multiple"
947
                           " times, if not given defaults to all nodes)",
948
                           completion_suggest=OPT_COMPL_ONE_NODE)
949

    
950
NODEGROUP_OPT_NAME = "--node-group"
951
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
952
                           dest="nodegroup",
953
                           help="Node group (name or uuid)",
954
                           metavar="<nodegroup>",
955
                           default=None, type="string",
956
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
957

    
958
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
959
                             metavar="<node>",
960
                             completion_suggest=OPT_COMPL_ONE_NODE)
961

    
962
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
963
                         action="store_false",
964
                         help="Don't start the instance after creation")
965

    
966
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
967
                         action="store_true", default=False,
968
                         help="Show command instead of executing it")
969

    
970
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
971
                         default=False, action="store_true",
972
                         help="Instead of performing the migration, try to"
973
                         " recover from a failed cleanup. This is safe"
974
                         " to run even if the instance is healthy, but it"
975
                         " will create extra replication traffic and "
976
                         " disrupt briefly the replication (like during the"
977
                         " migration")
978

    
979
STATIC_OPT = cli_option("-s", "--static", dest="static",
980
                        action="store_true", default=False,
981
                        help="Only show configuration data, not runtime data")
982

    
983
ALL_OPT = cli_option("--all", dest="show_all",
984
                     default=False, action="store_true",
985
                     help="Show info on all instances on the cluster."
986
                     " This can take a long time to run, use wisely")
987

    
988
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
989
                           action="store_true", default=False,
990
                           help="Interactive OS reinstall, lists available"
991
                           " OS templates for selection")
992

    
993
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
994
                                 action="store_true", default=False,
995
                                 help="Remove the instance from the cluster"
996
                                 " configuration even if there are failures"
997
                                 " during the removal process")
998

    
999
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1000
                                        dest="ignore_remove_failures",
1001
                                        action="store_true", default=False,
1002
                                        help="Remove the instance from the"
1003
                                        " cluster configuration even if there"
1004
                                        " are failures during the removal"
1005
                                        " process")
1006

    
1007
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1008
                                 action="store_true", default=False,
1009
                                 help="Remove the instance from the cluster")
1010

    
1011
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1012
                               help="Specifies the new node for the instance",
1013
                               metavar="NODE", default=None,
1014
                               completion_suggest=OPT_COMPL_ONE_NODE)
1015

    
1016
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1017
                               help="Specifies the new secondary node",
1018
                               metavar="NODE", default=None,
1019
                               completion_suggest=OPT_COMPL_ONE_NODE)
1020

    
1021
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1022
                            default=False, action="store_true",
1023
                            help="Replace the disk(s) on the primary"
1024
                                 " node (applies only to internally mirrored"
1025
                                 " disk templates, e.g. %s)" %
1026
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1027

    
1028
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1029
                              default=False, action="store_true",
1030
                              help="Replace the disk(s) on the secondary"
1031
                                   " node (applies only to internally mirrored"
1032
                                   " disk templates, e.g. %s)" %
1033
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1034

    
1035
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1036
                              default=False, action="store_true",
1037
                              help="Lock all nodes and auto-promote as needed"
1038
                              " to MC status")
1039

    
1040
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1041
                              default=False, action="store_true",
1042
                              help="Automatically replace faulty disks"
1043
                                   " (applies only to internally mirrored"
1044
                                   " disk templates, e.g. %s)" %
1045
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1046

    
1047
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1048
                             default=False, action="store_true",
1049
                             help="Ignore current recorded size"
1050
                             " (useful for forcing activation when"
1051
                             " the recorded size is wrong)")
1052

    
1053
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1054
                          metavar="<node>",
1055
                          completion_suggest=OPT_COMPL_ONE_NODE)
1056

    
1057
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1058
                         metavar="<dir>")
1059

    
1060
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1061
                              help="Specify the secondary ip for the node",
1062
                              metavar="ADDRESS", default=None)
1063

    
1064
READD_OPT = cli_option("--readd", dest="readd",
1065
                       default=False, action="store_true",
1066
                       help="Readd old node after replacing it")
1067

    
1068
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1069
                                default=True, action="store_false",
1070
                                help="Disable SSH key fingerprint checking")
1071

    
1072
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1073
                                 default=False, action="store_true",
1074
                                 help="Force the joining of a node")
1075

    
1076
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1077
                    type="bool", default=None, metavar=_YORNO,
1078
                    help="Set the master_candidate flag on the node")
1079

    
1080
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1081
                         type="bool", default=None,
1082
                         help=("Set the offline flag on the node"
1083
                               " (cluster does not communicate with offline"
1084
                               " nodes)"))
1085

    
1086
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1087
                         type="bool", default=None,
1088
                         help=("Set the drained flag on the node"
1089
                               " (excluded from allocation operations)"))
1090

    
1091
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1092
                              type="bool", default=None, metavar=_YORNO,
1093
                              help="Set the master_capable flag on the node")
1094

    
1095
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1096
                          type="bool", default=None, metavar=_YORNO,
1097
                          help="Set the vm_capable flag on the node")
1098

    
1099
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1100
                             type="bool", default=None, metavar=_YORNO,
1101
                             help="Set the allocatable flag on a volume")
1102

    
1103
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1104
                               help="Disable support for lvm based instances"
1105
                               " (cluster-wide)",
1106
                               action="store_false", default=True)
1107

    
1108
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1109
                            dest="enabled_hypervisors",
1110
                            help="Comma-separated list of hypervisors",
1111
                            type="string", default=None)
1112

    
1113
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1114
                            type="keyval", default={},
1115
                            help="NIC parameters")
1116

    
1117
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1118
                         dest="candidate_pool_size", type="int",
1119
                         help="Set the candidate pool size")
1120

    
1121
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1122
                         help=("Enables LVM and specifies the volume group"
1123
                               " name (cluster-wide) for disk allocation"
1124
                               " [%s]" % constants.DEFAULT_VG),
1125
                         metavar="VG", default=None)
1126

    
1127
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1128
                          help="Destroy cluster", action="store_true")
1129

    
1130
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1131
                          help="Skip node agreement check (dangerous)",
1132
                          action="store_true", default=False)
1133

    
1134
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1135
                            help="Specify the mac prefix for the instance IP"
1136
                            " addresses, in the format XX:XX:XX",
1137
                            metavar="PREFIX",
1138
                            default=None)
1139

    
1140
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1141
                               help="Specify the node interface (cluster-wide)"
1142
                               " on which the master IP address will be added"
1143
                               " (cluster init default: %s)" %
1144
                               constants.DEFAULT_BRIDGE,
1145
                               metavar="NETDEV",
1146
                               default=None)
1147

    
1148
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1149
                                help="Specify the netmask of the master IP",
1150
                                metavar="NETMASK",
1151
                                default=None)
1152

    
1153
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1154
                                     dest="use_external_mip_script",
1155
                                     help="Specify whether to run a"
1156
                                     " user-provided script for the master"
1157
                                     " IP address turnup and"
1158
                                     " turndown operations",
1159
                                     type="bool", metavar=_YORNO, default=None)
1160

    
1161
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1162
                                help="Specify the default directory (cluster-"
1163
                                "wide) for storing the file-based disks [%s]" %
1164
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1165
                                metavar="DIR",
1166
                                default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1167

    
1168
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1169
  "--shared-file-storage-dir",
1170
  dest="shared_file_storage_dir",
1171
  help="Specify the default directory (cluster-wide) for storing the"
1172
  " shared file-based disks [%s]" %
1173
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1174
  metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1175

    
1176
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1177
                                   help="Don't modify /etc/hosts",
1178
                                   action="store_false", default=True)
1179

    
1180
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1181
                                    help="Don't initialize SSH keys",
1182
                                    action="store_false", default=True)
1183

    
1184
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1185
                             help="Enable parseable error messages",
1186
                             action="store_true", default=False)
1187

    
1188
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1189
                          help="Skip N+1 memory redundancy tests",
1190
                          action="store_true", default=False)
1191

    
1192
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1193
                             help="Type of reboot: soft/hard/full",
1194
                             default=constants.INSTANCE_REBOOT_HARD,
1195
                             metavar="<REBOOT>",
1196
                             choices=list(constants.REBOOT_TYPES))
1197

    
1198
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1199
                                    dest="ignore_secondaries",
1200
                                    default=False, action="store_true",
1201
                                    help="Ignore errors from secondaries")
1202

    
1203
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1204
                            action="store_false", default=True,
1205
                            help="Don't shutdown the instance (unsafe)")
1206

    
1207
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1208
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1209
                         help="Maximum time to wait")
1210

    
1211
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1212
                                  dest="shutdown_timeout", type="int",
1213
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1214
                                  help="Maximum time to wait for instance"
1215
                                  " shutdown")
1216

    
1217
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1218
                          default=None,
1219
                          help=("Number of seconds between repetions of the"
1220
                                " command"))
1221

    
1222
EARLY_RELEASE_OPT = cli_option("--early-release",
1223
                               dest="early_release", default=False,
1224
                               action="store_true",
1225
                               help="Release the locks on the secondary"
1226
                               " node(s) early")
1227

    
1228
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1229
                                  dest="new_cluster_cert",
1230
                                  default=False, action="store_true",
1231
                                  help="Generate a new cluster certificate")
1232

    
1233
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1234
                           default=None,
1235
                           help="File containing new RAPI certificate")
1236

    
1237
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1238
                               default=None, action="store_true",
1239
                               help=("Generate a new self-signed RAPI"
1240
                                     " certificate"))
1241

    
1242
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1243
                            default=None,
1244
                            help="File containing new SPICE certificate")
1245

    
1246
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1247
                              default=None,
1248
                              help="File containing the certificate of the CA"
1249
                              " which signed the SPICE certificate")
1250

    
1251
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1252
                                dest="new_spice_cert", default=None,
1253
                                action="store_true",
1254
                                help=("Generate a new self-signed SPICE"
1255
                                      " certificate"))
1256

    
1257
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1258
                                    dest="new_confd_hmac_key",
1259
                                    default=False, action="store_true",
1260
                                    help=("Create a new HMAC key for %s" %
1261
                                          constants.CONFD))
1262

    
1263
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1264
                                       dest="cluster_domain_secret",
1265
                                       default=None,
1266
                                       help=("Load new new cluster domain"
1267
                                             " secret from file"))
1268

    
1269
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1270
                                           dest="new_cluster_domain_secret",
1271
                                           default=False, action="store_true",
1272
                                           help=("Create a new cluster domain"
1273
                                                 " secret"))
1274

    
1275
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1276
                              dest="use_replication_network",
1277
                              help="Whether to use the replication network"
1278
                              " for talking to the nodes",
1279
                              action="store_true", default=False)
1280

    
1281
MAINTAIN_NODE_HEALTH_OPT = \
1282
    cli_option("--maintain-node-health", dest="maintain_node_health",
1283
               metavar=_YORNO, default=None, type="bool",
1284
               help="Configure the cluster to automatically maintain node"
1285
               " health, by shutting down unknown instances, shutting down"
1286
               " unknown DRBD devices, etc.")
1287

    
1288
IDENTIFY_DEFAULTS_OPT = \
1289
    cli_option("--identify-defaults", dest="identify_defaults",
1290
               default=False, action="store_true",
1291
               help="Identify which saved instance parameters are equal to"
1292
               " the current cluster defaults and set them as such, instead"
1293
               " of marking them as overridden")
1294

    
1295
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1296
                         action="store", dest="uid_pool",
1297
                         help=("A list of user-ids or user-id"
1298
                               " ranges separated by commas"))
1299

    
1300
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1301
                          action="store", dest="add_uids",
1302
                          help=("A list of user-ids or user-id"
1303
                                " ranges separated by commas, to be"
1304
                                " added to the user-id pool"))
1305

    
1306
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1307
                             action="store", dest="remove_uids",
1308
                             help=("A list of user-ids or user-id"
1309
                                   " ranges separated by commas, to be"
1310
                                   " removed from the user-id pool"))
1311

    
1312
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1313
                              action="store", dest="reserved_lvs",
1314
                              help=("A comma-separated list of reserved"
1315
                                    " logical volumes names, that will be"
1316
                                    " ignored by cluster verify"))
1317

    
1318
ROMAN_OPT = cli_option("--roman",
1319
                       dest="roman_integers", default=False,
1320
                       action="store_true",
1321
                       help="Use roman numbers for positive integers")
1322

    
1323
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1324
                             action="store", default=None,
1325
                             help="Specifies usermode helper for DRBD")
1326

    
1327
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1328
                                action="store_false", default=True,
1329
                                help="Disable support for DRBD")
1330

    
1331
PRIMARY_IP_VERSION_OPT = \
1332
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1333
               action="store", dest="primary_ip_version",
1334
               metavar="%d|%d" % (constants.IP4_VERSION,
1335
                                  constants.IP6_VERSION),
1336
               help="Cluster-wide IP version for primary IP")
1337

    
1338
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1339
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1340
                          choices=_PRIONAME_TO_VALUE.keys(),
1341
                          help="Priority for opcode processing")
1342

    
1343
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1344
                        type="bool", default=None, metavar=_YORNO,
1345
                        help="Sets the hidden flag on the OS")
1346

    
1347
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1348
                        type="bool", default=None, metavar=_YORNO,
1349
                        help="Sets the blacklisted flag on the OS")
1350

    
1351
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1352
                                     type="bool", metavar=_YORNO,
1353
                                     dest="prealloc_wipe_disks",
1354
                                     help=("Wipe disks prior to instance"
1355
                                           " creation"))
1356

    
1357
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1358
                             type="keyval", default=None,
1359
                             help="Node parameters")
1360

    
1361
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1362
                              action="store", metavar="POLICY", default=None,
1363
                              help="Allocation policy for the node group")
1364

    
1365
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1366
                              type="bool", metavar=_YORNO,
1367
                              dest="node_powered",
1368
                              help="Specify if the SoR for node is powered")
1369

    
1370
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1371
                             default=constants.OOB_TIMEOUT,
1372
                             help="Maximum time to wait for out-of-band helper")
1373

    
1374
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1375
                             default=constants.OOB_POWER_DELAY,
1376
                             help="Time in seconds to wait between power-ons")
1377

    
1378
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1379
                              action="store_true", default=False,
1380
                              help=("Whether command argument should be treated"
1381
                                    " as filter"))
1382

    
1383
NO_REMEMBER_OPT = cli_option("--no-remember",
1384
                             dest="no_remember",
1385
                             action="store_true", default=False,
1386
                             help="Perform but do not record the change"
1387
                             " in the configuration")
1388

    
1389
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1390
                              default=False, action="store_true",
1391
                              help="Evacuate primary instances only")
1392

    
1393
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1394
                                default=False, action="store_true",
1395
                                help="Evacuate secondary instances only"
1396
                                     " (applies only to internally mirrored"
1397
                                     " disk templates, e.g. %s)" %
1398
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1399

    
1400
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1401
                                action="store_true", default=False,
1402
                                help="Pause instance at startup")
1403

    
1404
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1405
                          help="Destination node group (name or uuid)",
1406
                          default=None, action="append",
1407
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1408

    
1409
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1410
                               action="append", dest="ignore_errors",
1411
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1412
                               help="Error code to be ignored")
1413

    
1414
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1415
                            action="append",
1416
                            help=("Specify disk state information in the"
1417
                                  " format"
1418
                                  " storage_type/identifier:option=value,...;"
1419
                                  " note this is unused for now"),
1420
                            type="identkeyval")
1421

    
1422
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1423
                          action="append",
1424
                          help=("Specify hypervisor state information in the"
1425
                                " format hypervisor:option=value,...;"
1426
                                " note this is unused for now"),
1427
                          type="identkeyval")
1428

    
1429
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1430
                                action="store_true", default=False,
1431
                                help="Ignore instance policy violations")
1432

    
1433
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1434
                             help="Sets the instance's runtime memory,"
1435
                             " ballooning it up or down to the new value",
1436
                             default=None, type="unit", metavar="<size>")
1437

    
1438
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1439
                          action="store_true", default=False,
1440
                          help="Marks the grow as absolute instead of the"
1441
                          " (default) relative mode")
1442

    
1443
#: Options provided by all commands
1444
COMMON_OPTS = [DEBUG_OPT]
1445

    
1446
# common options for creating instances. add and import then add their own
1447
# specific ones.
1448
COMMON_CREATE_OPTS = [
1449
  BACKEND_OPT,
1450
  DISK_OPT,
1451
  DISK_TEMPLATE_OPT,
1452
  FILESTORE_DIR_OPT,
1453
  FILESTORE_DRIVER_OPT,
1454
  HYPERVISOR_OPT,
1455
  IALLOCATOR_OPT,
1456
  NET_OPT,
1457
  NODE_PLACEMENT_OPT,
1458
  NOIPCHECK_OPT,
1459
  NONAMECHECK_OPT,
1460
  NONICS_OPT,
1461
  NWSYNC_OPT,
1462
  OSPARAMS_OPT,
1463
  OS_SIZE_OPT,
1464
  SUBMIT_OPT,
1465
  TAG_ADD_OPT,
1466
  DRY_RUN_OPT,
1467
  PRIORITY_OPT,
1468
  ]
1469

    
1470
# common instance policy options
1471
INSTANCE_POLICY_OPTS = [
1472
  SPECS_CPU_COUNT_OPT,
1473
  SPECS_DISK_COUNT_OPT,
1474
  SPECS_DISK_SIZE_OPT,
1475
  SPECS_MEM_SIZE_OPT,
1476
  SPECS_NIC_COUNT_OPT,
1477
  IPOLICY_DISK_TEMPLATES,
1478
  IPOLICY_VCPU_RATIO,
1479
  IPOLICY_SPINDLE_RATIO,
1480
  ]
1481

    
1482

    
1483
def _ParseArgs(argv, commands, aliases, env_override):
1484
  """Parser for the command line arguments.
1485

1486
  This function parses the arguments and returns the function which
1487
  must be executed together with its (modified) arguments.
1488

1489
  @param argv: the command line
1490
  @param commands: dictionary with special contents, see the design
1491
      doc for cmdline handling
1492
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1493
  @param env_override: list of env variables allowed for default args
1494

1495
  """
1496
  assert not (env_override - set(commands))
1497

    
1498
  if len(argv) == 0:
1499
    binary = "<command>"
1500
  else:
1501
    binary = argv[0].split("/")[-1]
1502

    
1503
  if len(argv) > 1 and argv[1] == "--version":
1504
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1505
             constants.RELEASE_VERSION)
1506
    # Quit right away. That way we don't have to care about this special
1507
    # argument. optparse.py does it the same.
1508
    sys.exit(0)
1509

    
1510
  if len(argv) < 2 or not (argv[1] in commands or
1511
                           argv[1] in aliases):
1512
    # let's do a nice thing
1513
    sortedcmds = commands.keys()
1514
    sortedcmds.sort()
1515

    
1516
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1517
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1518
    ToStdout("")
1519

    
1520
    # compute the max line length for cmd + usage
1521
    mlen = max([len(" %s" % cmd) for cmd in commands])
1522
    mlen = min(60, mlen) # should not get here...
1523

    
1524
    # and format a nice command list
1525
    ToStdout("Commands:")
1526
    for cmd in sortedcmds:
1527
      cmdstr = " %s" % (cmd,)
1528
      help_text = commands[cmd][4]
1529
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1530
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1531
      for line in help_lines:
1532
        ToStdout("%-*s   %s", mlen, "", line)
1533

    
1534
    ToStdout("")
1535

    
1536
    return None, None, None
1537

    
1538
  # get command, unalias it, and look it up in commands
1539
  cmd = argv.pop(1)
1540
  if cmd in aliases:
1541
    if cmd in commands:
1542
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1543
                                   " command" % cmd)
1544

    
1545
    if aliases[cmd] not in commands:
1546
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1547
                                   " command '%s'" % (cmd, aliases[cmd]))
1548

    
1549
    cmd = aliases[cmd]
1550

    
1551
  if cmd in env_override:
1552
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1553
    env_args = os.environ.get(args_env_name)
1554
    if env_args:
1555
      argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
1556

    
1557
  func, args_def, parser_opts, usage, description = commands[cmd]
1558
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1559
                        description=description,
1560
                        formatter=TitledHelpFormatter(),
1561
                        usage="%%prog %s %s" % (cmd, usage))
1562
  parser.disable_interspersed_args()
1563
  options, args = parser.parse_args(args=argv[1:])
1564

    
1565
  if not _CheckArguments(cmd, args_def, args):
1566
    return None, None, None
1567

    
1568
  return func, options, args
1569

    
1570

    
1571
def _CheckArguments(cmd, args_def, args):
1572
  """Verifies the arguments using the argument definition.
1573

1574
  Algorithm:
1575

1576
    1. Abort with error if values specified by user but none expected.
1577

1578
    1. For each argument in definition
1579

1580
      1. Keep running count of minimum number of values (min_count)
1581
      1. Keep running count of maximum number of values (max_count)
1582
      1. If it has an unlimited number of values
1583

1584
        1. Abort with error if it's not the last argument in the definition
1585

1586
    1. If last argument has limited number of values
1587

1588
      1. Abort with error if number of values doesn't match or is too large
1589

1590
    1. Abort with error if user didn't pass enough values (min_count)
1591

1592
  """
1593
  if args and not args_def:
1594
    ToStderr("Error: Command %s expects no arguments", cmd)
1595
    return False
1596

    
1597
  min_count = None
1598
  max_count = None
1599
  check_max = None
1600

    
1601
  last_idx = len(args_def) - 1
1602

    
1603
  for idx, arg in enumerate(args_def):
1604
    if min_count is None:
1605
      min_count = arg.min
1606
    elif arg.min is not None:
1607
      min_count += arg.min
1608

    
1609
    if max_count is None:
1610
      max_count = arg.max
1611
    elif arg.max is not None:
1612
      max_count += arg.max
1613

    
1614
    if idx == last_idx:
1615
      check_max = (arg.max is not None)
1616

    
1617
    elif arg.max is None:
1618
      raise errors.ProgrammerError("Only the last argument can have max=None")
1619

    
1620
  if check_max:
1621
    # Command with exact number of arguments
1622
    if (min_count is not None and max_count is not None and
1623
        min_count == max_count and len(args) != min_count):
1624
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1625
      return False
1626

    
1627
    # Command with limited number of arguments
1628
    if max_count is not None and len(args) > max_count:
1629
      ToStderr("Error: Command %s expects only %d argument(s)",
1630
               cmd, max_count)
1631
      return False
1632

    
1633
  # Command with some required arguments
1634
  if min_count is not None and len(args) < min_count:
1635
    ToStderr("Error: Command %s expects at least %d argument(s)",
1636
             cmd, min_count)
1637
    return False
1638

    
1639
  return True
1640

    
1641

    
1642
def SplitNodeOption(value):
1643
  """Splits the value of a --node option.
1644

1645
  """
1646
  if value and ":" in value:
1647
    return value.split(":", 1)
1648
  else:
1649
    return (value, None)
1650

    
1651

    
1652
def CalculateOSNames(os_name, os_variants):
1653
  """Calculates all the names an OS can be called, according to its variants.
1654

1655
  @type os_name: string
1656
  @param os_name: base name of the os
1657
  @type os_variants: list or None
1658
  @param os_variants: list of supported variants
1659
  @rtype: list
1660
  @return: list of valid names
1661

1662
  """
1663
  if os_variants:
1664
    return ["%s+%s" % (os_name, v) for v in os_variants]
1665
  else:
1666
    return [os_name]
1667

    
1668

    
1669
def ParseFields(selected, default):
1670
  """Parses the values of "--field"-like options.
1671

1672
  @type selected: string or None
1673
  @param selected: User-selected options
1674
  @type default: list
1675
  @param default: Default fields
1676

1677
  """
1678
  if selected is None:
1679
    return default
1680

    
1681
  if selected.startswith("+"):
1682
    return default + selected[1:].split(",")
1683

    
1684
  return selected.split(",")
1685

    
1686

    
1687
UsesRPC = rpc.RunWithRPC
1688

    
1689

    
1690
def AskUser(text, choices=None):
1691
  """Ask the user a question.
1692

1693
  @param text: the question to ask
1694

1695
  @param choices: list with elements tuples (input_char, return_value,
1696
      description); if not given, it will default to: [('y', True,
1697
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1698
      note that the '?' char is reserved for help
1699

1700
  @return: one of the return values from the choices list; if input is
1701
      not possible (i.e. not running with a tty, we return the last
1702
      entry from the list
1703

1704
  """
1705
  if choices is None:
1706
    choices = [("y", True, "Perform the operation"),
1707
               ("n", False, "Do not perform the operation")]
1708
  if not choices or not isinstance(choices, list):
1709
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1710
  for entry in choices:
1711
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1712
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1713

    
1714
  answer = choices[-1][1]
1715
  new_text = []
1716
  for line in text.splitlines():
1717
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1718
  text = "\n".join(new_text)
1719
  try:
1720
    f = file("/dev/tty", "a+")
1721
  except IOError:
1722
    return answer
1723
  try:
1724
    chars = [entry[0] for entry in choices]
1725
    chars[-1] = "[%s]" % chars[-1]
1726
    chars.append("?")
1727
    maps = dict([(entry[0], entry[1]) for entry in choices])
1728
    while True:
1729
      f.write(text)
1730
      f.write("\n")
1731
      f.write("/".join(chars))
1732
      f.write(": ")
1733
      line = f.readline(2).strip().lower()
1734
      if line in maps:
1735
        answer = maps[line]
1736
        break
1737
      elif line == "?":
1738
        for entry in choices:
1739
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1740
        f.write("\n")
1741
        continue
1742
  finally:
1743
    f.close()
1744
  return answer
1745

    
1746

    
1747
class JobSubmittedException(Exception):
1748
  """Job was submitted, client should exit.
1749

1750
  This exception has one argument, the ID of the job that was
1751
  submitted. The handler should print this ID.
1752

1753
  This is not an error, just a structured way to exit from clients.
1754

1755
  """
1756

    
1757

    
1758
def SendJob(ops, cl=None):
1759
  """Function to submit an opcode without waiting for the results.
1760

1761
  @type ops: list
1762
  @param ops: list of opcodes
1763
  @type cl: luxi.Client
1764
  @param cl: the luxi client to use for communicating with the master;
1765
             if None, a new client will be created
1766

1767
  """
1768
  if cl is None:
1769
    cl = GetClient()
1770

    
1771
  job_id = cl.SubmitJob(ops)
1772

    
1773
  return job_id
1774

    
1775

    
1776
def GenericPollJob(job_id, cbs, report_cbs):
1777
  """Generic job-polling function.
1778

1779
  @type job_id: number
1780
  @param job_id: Job ID
1781
  @type cbs: Instance of L{JobPollCbBase}
1782
  @param cbs: Data callbacks
1783
  @type report_cbs: Instance of L{JobPollReportCbBase}
1784
  @param report_cbs: Reporting callbacks
1785

1786
  """
1787
  prev_job_info = None
1788
  prev_logmsg_serial = None
1789

    
1790
  status = None
1791

    
1792
  while True:
1793
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1794
                                      prev_logmsg_serial)
1795
    if not result:
1796
      # job not found, go away!
1797
      raise errors.JobLost("Job with id %s lost" % job_id)
1798

    
1799
    if result == constants.JOB_NOTCHANGED:
1800
      report_cbs.ReportNotChanged(job_id, status)
1801

    
1802
      # Wait again
1803
      continue
1804

    
1805
    # Split result, a tuple of (field values, log entries)
1806
    (job_info, log_entries) = result
1807
    (status, ) = job_info
1808

    
1809
    if log_entries:
1810
      for log_entry in log_entries:
1811
        (serial, timestamp, log_type, message) = log_entry
1812
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1813
                                    log_type, message)
1814
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1815

    
1816
    # TODO: Handle canceled and archived jobs
1817
    elif status in (constants.JOB_STATUS_SUCCESS,
1818
                    constants.JOB_STATUS_ERROR,
1819
                    constants.JOB_STATUS_CANCELING,
1820
                    constants.JOB_STATUS_CANCELED):
1821
      break
1822

    
1823
    prev_job_info = job_info
1824

    
1825
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1826
  if not jobs:
1827
    raise errors.JobLost("Job with id %s lost" % job_id)
1828

    
1829
  status, opstatus, result = jobs[0]
1830

    
1831
  if status == constants.JOB_STATUS_SUCCESS:
1832
    return result
1833

    
1834
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1835
    raise errors.OpExecError("Job was canceled")
1836

    
1837
  has_ok = False
1838
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1839
    if status == constants.OP_STATUS_SUCCESS:
1840
      has_ok = True
1841
    elif status == constants.OP_STATUS_ERROR:
1842
      errors.MaybeRaise(msg)
1843

    
1844
      if has_ok:
1845
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1846
                                 (idx, msg))
1847

    
1848
      raise errors.OpExecError(str(msg))
1849

    
1850
  # default failure mode
1851
  raise errors.OpExecError(result)
1852

    
1853

    
1854
class JobPollCbBase:
1855
  """Base class for L{GenericPollJob} callbacks.
1856

1857
  """
1858
  def __init__(self):
1859
    """Initializes this class.
1860

1861
    """
1862

    
1863
  def WaitForJobChangeOnce(self, job_id, fields,
1864
                           prev_job_info, prev_log_serial):
1865
    """Waits for changes on a job.
1866

1867
    """
1868
    raise NotImplementedError()
1869

    
1870
  def QueryJobs(self, job_ids, fields):
1871
    """Returns the selected fields for the selected job IDs.
1872

1873
    @type job_ids: list of numbers
1874
    @param job_ids: Job IDs
1875
    @type fields: list of strings
1876
    @param fields: Fields
1877

1878
    """
1879
    raise NotImplementedError()
1880

    
1881

    
1882
class JobPollReportCbBase:
1883
  """Base class for L{GenericPollJob} reporting callbacks.
1884

1885
  """
1886
  def __init__(self):
1887
    """Initializes this class.
1888

1889
    """
1890

    
1891
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1892
    """Handles a log message.
1893

1894
    """
1895
    raise NotImplementedError()
1896

    
1897
  def ReportNotChanged(self, job_id, status):
1898
    """Called for if a job hasn't changed in a while.
1899

1900
    @type job_id: number
1901
    @param job_id: Job ID
1902
    @type status: string or None
1903
    @param status: Job status if available
1904

1905
    """
1906
    raise NotImplementedError()
1907

    
1908

    
1909
class _LuxiJobPollCb(JobPollCbBase):
1910
  def __init__(self, cl):
1911
    """Initializes this class.
1912

1913
    """
1914
    JobPollCbBase.__init__(self)
1915
    self.cl = cl
1916

    
1917
  def WaitForJobChangeOnce(self, job_id, fields,
1918
                           prev_job_info, prev_log_serial):
1919
    """Waits for changes on a job.
1920

1921
    """
1922
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1923
                                        prev_job_info, prev_log_serial)
1924

    
1925
  def QueryJobs(self, job_ids, fields):
1926
    """Returns the selected fields for the selected job IDs.
1927

1928
    """
1929
    return self.cl.QueryJobs(job_ids, fields)
1930

    
1931

    
1932
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1933
  def __init__(self, feedback_fn):
1934
    """Initializes this class.
1935

1936
    """
1937
    JobPollReportCbBase.__init__(self)
1938

    
1939
    self.feedback_fn = feedback_fn
1940

    
1941
    assert callable(feedback_fn)
1942

    
1943
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1944
    """Handles a log message.
1945

1946
    """
1947
    self.feedback_fn((timestamp, log_type, log_msg))
1948

    
1949
  def ReportNotChanged(self, job_id, status):
1950
    """Called if a job hasn't changed in a while.
1951

1952
    """
1953
    # Ignore
1954

    
1955

    
1956
class StdioJobPollReportCb(JobPollReportCbBase):
1957
  def __init__(self):
1958
    """Initializes this class.
1959

1960
    """
1961
    JobPollReportCbBase.__init__(self)
1962

    
1963
    self.notified_queued = False
1964
    self.notified_waitlock = False
1965

    
1966
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1967
    """Handles a log message.
1968

1969
    """
1970
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1971
             FormatLogMessage(log_type, log_msg))
1972

    
1973
  def ReportNotChanged(self, job_id, status):
1974
    """Called if a job hasn't changed in a while.
1975

1976
    """
1977
    if status is None:
1978
      return
1979

    
1980
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1981
      ToStderr("Job %s is waiting in queue", job_id)
1982
      self.notified_queued = True
1983

    
1984
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1985
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1986
      self.notified_waitlock = True
1987

    
1988

    
1989
def FormatLogMessage(log_type, log_msg):
1990
  """Formats a job message according to its type.
1991

1992
  """
1993
  if log_type != constants.ELOG_MESSAGE:
1994
    log_msg = str(log_msg)
1995

    
1996
  return utils.SafeEncode(log_msg)
1997

    
1998

    
1999
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2000
  """Function to poll for the result of a job.
2001

2002
  @type job_id: job identified
2003
  @param job_id: the job to poll for results
2004
  @type cl: luxi.Client
2005
  @param cl: the luxi client to use for communicating with the master;
2006
             if None, a new client will be created
2007

2008
  """
2009
  if cl is None:
2010
    cl = GetClient()
2011

    
2012
  if reporter is None:
2013
    if feedback_fn:
2014
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2015
    else:
2016
      reporter = StdioJobPollReportCb()
2017
  elif feedback_fn:
2018
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2019

    
2020
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2021

    
2022

    
2023
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2024
  """Legacy function to submit an opcode.
2025

2026
  This is just a simple wrapper over the construction of the processor
2027
  instance. It should be extended to better handle feedback and
2028
  interaction functions.
2029

2030
  """
2031
  if cl is None:
2032
    cl = GetClient()
2033

    
2034
  SetGenericOpcodeOpts([op], opts)
2035

    
2036
  job_id = SendJob([op], cl=cl)
2037

    
2038
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2039
                       reporter=reporter)
2040

    
2041
  return op_results[0]
2042

    
2043

    
2044
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2045
  """Wrapper around SubmitOpCode or SendJob.
2046

2047
  This function will decide, based on the 'opts' parameter, whether to
2048
  submit and wait for the result of the opcode (and return it), or
2049
  whether to just send the job and print its identifier. It is used in
2050
  order to simplify the implementation of the '--submit' option.
2051

2052
  It will also process the opcodes if we're sending the via SendJob
2053
  (otherwise SubmitOpCode does it).
2054

2055
  """
2056
  if opts and opts.submit_only:
2057
    job = [op]
2058
    SetGenericOpcodeOpts(job, opts)
2059
    job_id = SendJob(job, cl=cl)
2060
    raise JobSubmittedException(job_id)
2061
  else:
2062
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2063

    
2064

    
2065
def SetGenericOpcodeOpts(opcode_list, options):
2066
  """Processor for generic options.
2067

2068
  This function updates the given opcodes based on generic command
2069
  line options (like debug, dry-run, etc.).
2070

2071
  @param opcode_list: list of opcodes
2072
  @param options: command line options or None
2073
  @return: None (in-place modification)
2074

2075
  """
2076
  if not options:
2077
    return
2078
  for op in opcode_list:
2079
    op.debug_level = options.debug
2080
    if hasattr(options, "dry_run"):
2081
      op.dry_run = options.dry_run
2082
    if getattr(options, "priority", None) is not None:
2083
      op.priority = _PRIONAME_TO_VALUE[options.priority]
2084

    
2085

    
2086
def GetClient(query=False):
2087
  """Connects to the a luxi socket and returns a client.
2088

2089
  @type query: boolean
2090
  @param query: this signifies that the client will only be
2091
      used for queries; if the build-time parameter
2092
      enable-split-queries is enabled, then the client will be
2093
      connected to the query socket instead of the masterd socket
2094

2095
  """
2096
  if query and constants.ENABLE_SPLIT_QUERY:
2097
    address = pathutils.QUERY_SOCKET
2098
  else:
2099
    address = None
2100
  # TODO: Cache object?
2101
  try:
2102
    client = luxi.Client(address=address)
2103
  except luxi.NoMasterError:
2104
    ss = ssconf.SimpleStore()
2105

    
2106
    # Try to read ssconf file
2107
    try:
2108
      ss.GetMasterNode()
2109
    except errors.ConfigurationError:
2110
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2111
                                 " not part of a cluster",
2112
                                 errors.ECODE_INVAL)
2113

    
2114
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2115
    if master != myself:
2116
      raise errors.OpPrereqError("This is not the master node, please connect"
2117
                                 " to node '%s' and rerun the command" %
2118
                                 master, errors.ECODE_INVAL)
2119
    raise
2120
  return client
2121

    
2122

    
2123
def FormatError(err):
2124
  """Return a formatted error message for a given error.
2125

2126
  This function takes an exception instance and returns a tuple
2127
  consisting of two values: first, the recommended exit code, and
2128
  second, a string describing the error message (not
2129
  newline-terminated).
2130

2131
  """
2132
  retcode = 1
2133
  obuf = StringIO()
2134
  msg = str(err)
2135
  if isinstance(err, errors.ConfigurationError):
2136
    txt = "Corrupt configuration file: %s" % msg
2137
    logging.error(txt)
2138
    obuf.write(txt + "\n")
2139
    obuf.write("Aborting.")
2140
    retcode = 2
2141
  elif isinstance(err, errors.HooksAbort):
2142
    obuf.write("Failure: hooks execution failed:\n")
2143
    for node, script, out in err.args[0]:
2144
      if out:
2145
        obuf.write("  node: %s, script: %s, output: %s\n" %
2146
                   (node, script, out))
2147
      else:
2148
        obuf.write("  node: %s, script: %s (no output)\n" %
2149
                   (node, script))
2150
  elif isinstance(err, errors.HooksFailure):
2151
    obuf.write("Failure: hooks general failure: %s" % msg)
2152
  elif isinstance(err, errors.ResolverError):
2153
    this_host = netutils.Hostname.GetSysName()
2154
    if err.args[0] == this_host:
2155
      msg = "Failure: can't resolve my own hostname ('%s')"
2156
    else:
2157
      msg = "Failure: can't resolve hostname '%s'"
2158
    obuf.write(msg % err.args[0])
2159
  elif isinstance(err, errors.OpPrereqError):
2160
    if len(err.args) == 2:
2161
      obuf.write("Failure: prerequisites not met for this"
2162
                 " operation:\nerror type: %s, error details:\n%s" %
2163
                 (err.args[1], err.args[0]))
2164
    else:
2165
      obuf.write("Failure: prerequisites not met for this"
2166
                 " operation:\n%s" % msg)
2167
  elif isinstance(err, errors.OpExecError):
2168
    obuf.write("Failure: command execution error:\n%s" % msg)
2169
  elif isinstance(err, errors.TagError):
2170
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2171
  elif isinstance(err, errors.JobQueueDrainError):
2172
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2173
               " accept new requests\n")
2174
  elif isinstance(err, errors.JobQueueFull):
2175
    obuf.write("Failure: the job queue is full and doesn't accept new"
2176
               " job submissions until old jobs are archived\n")
2177
  elif isinstance(err, errors.TypeEnforcementError):
2178
    obuf.write("Parameter Error: %s" % msg)
2179
  elif isinstance(err, errors.ParameterError):
2180
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2181
  elif isinstance(err, luxi.NoMasterError):
2182
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
2183
               " and listening for connections?")
2184
  elif isinstance(err, luxi.TimeoutError):
2185
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2186
               " been submitted and will continue to run even if the call"
2187
               " timed out. Useful commands in this situation are \"gnt-job"
2188
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2189
    obuf.write(msg)
2190
  elif isinstance(err, luxi.PermissionError):
2191
    obuf.write("It seems you don't have permissions to connect to the"
2192
               " master daemon.\nPlease retry as a different user.")
2193
  elif isinstance(err, luxi.ProtocolError):
2194
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2195
               "%s" % msg)
2196
  elif isinstance(err, errors.JobLost):
2197
    obuf.write("Error checking job status: %s" % msg)
2198
  elif isinstance(err, errors.QueryFilterParseError):
2199
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2200
    obuf.write("\n".join(err.GetDetails()))
2201
  elif isinstance(err, errors.GenericError):
2202
    obuf.write("Unhandled Ganeti error: %s" % msg)
2203
  elif isinstance(err, JobSubmittedException):
2204
    obuf.write("JobID: %s\n" % err.args[0])
2205
    retcode = 0
2206
  else:
2207
    obuf.write("Unhandled exception: %s" % msg)
2208
  return retcode, obuf.getvalue().rstrip("\n")
2209

    
2210

    
2211
def GenericMain(commands, override=None, aliases=None,
2212
                env_override=frozenset()):
2213
  """Generic main function for all the gnt-* commands.
2214

2215
  @param commands: a dictionary with a special structure, see the design doc
2216
                   for command line handling.
2217
  @param override: if not None, we expect a dictionary with keys that will
2218
                   override command line options; this can be used to pass
2219
                   options from the scripts to generic functions
2220
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2221
  @param env_override: list of environment names which are allowed to submit
2222
                       default args for commands
2223

2224
  """
2225
  # save the program name and the entire command line for later logging
2226
  if sys.argv:
2227
    binary = os.path.basename(sys.argv[0])
2228
    if not binary:
2229
      binary = sys.argv[0]
2230

    
2231
    if len(sys.argv) >= 2:
2232
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2233
    else:
2234
      logname = binary
2235

    
2236
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2237
  else:
2238
    binary = "<unknown program>"
2239
    cmdline = "<unknown>"
2240

    
2241
  if aliases is None:
2242
    aliases = {}
2243

    
2244
  try:
2245
    func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
2246
  except errors.ParameterError, err:
2247
    result, err_msg = FormatError(err)
2248
    ToStderr(err_msg)
2249
    return 1
2250

    
2251
  if func is None: # parse error
2252
    return 1
2253

    
2254
  if override is not None:
2255
    for key, val in override.iteritems():
2256
      setattr(options, key, val)
2257

    
2258
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2259
                     stderr_logging=True)
2260

    
2261
  logging.info("Command line: %s", cmdline)
2262

    
2263
  try:
2264
    result = func(options, args)
2265
  except (errors.GenericError, luxi.ProtocolError,
2266
          JobSubmittedException), err:
2267
    result, err_msg = FormatError(err)
2268
    logging.exception("Error during command processing")
2269
    ToStderr(err_msg)
2270
  except KeyboardInterrupt:
2271
    result = constants.EXIT_FAILURE
2272
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2273
             " might have been submitted and"
2274
             " will continue to run in the background.")
2275
  except IOError, err:
2276
    if err.errno == errno.EPIPE:
2277
      # our terminal went away, we'll exit
2278
      sys.exit(constants.EXIT_FAILURE)
2279
    else:
2280
      raise
2281

    
2282
  return result
2283

    
2284

    
2285
def ParseNicOption(optvalue):
2286
  """Parses the value of the --net option(s).
2287

2288
  """
2289
  try:
2290
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2291
  except (TypeError, ValueError), err:
2292
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2293
                               errors.ECODE_INVAL)
2294

    
2295
  nics = [{}] * nic_max
2296
  for nidx, ndict in optvalue:
2297
    nidx = int(nidx)
2298

    
2299
    if not isinstance(ndict, dict):
2300
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2301
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2302

    
2303
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2304

    
2305
    nics[nidx] = ndict
2306

    
2307
  return nics
2308

    
2309

    
2310
def GenericInstanceCreate(mode, opts, args):
2311
  """Add an instance to the cluster via either creation or import.
2312

2313
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2314
  @param opts: the command line options selected by the user
2315
  @type args: list
2316
  @param args: should contain only one element, the new instance name
2317
  @rtype: int
2318
  @return: the desired exit code
2319

2320
  """
2321
  instance = args[0]
2322

    
2323
  (pnode, snode) = SplitNodeOption(opts.node)
2324

    
2325
  hypervisor = None
2326
  hvparams = {}
2327
  if opts.hypervisor:
2328
    hypervisor, hvparams = opts.hypervisor
2329

    
2330
  if opts.nics:
2331
    nics = ParseNicOption(opts.nics)
2332
  elif opts.no_nics:
2333
    # no nics
2334
    nics = []
2335
  elif mode == constants.INSTANCE_CREATE:
2336
    # default of one nic, all auto
2337
    nics = [{}]
2338
  else:
2339
    # mode == import
2340
    nics = []
2341

    
2342
  if opts.disk_template == constants.DT_DISKLESS:
2343
    if opts.disks or opts.sd_size is not None:
2344
      raise errors.OpPrereqError("Diskless instance but disk"
2345
                                 " information passed", errors.ECODE_INVAL)
2346
    disks = []
2347
  else:
2348
    if (not opts.disks and not opts.sd_size
2349
        and mode == constants.INSTANCE_CREATE):
2350
      raise errors.OpPrereqError("No disk information specified",
2351
                                 errors.ECODE_INVAL)
2352
    if opts.disks and opts.sd_size is not None:
2353
      raise errors.OpPrereqError("Please use either the '--disk' or"
2354
                                 " '-s' option", errors.ECODE_INVAL)
2355
    if opts.sd_size is not None:
2356
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2357

    
2358
    if opts.disks:
2359
      try:
2360
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2361
      except ValueError, err:
2362
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2363
                                   errors.ECODE_INVAL)
2364
      disks = [{}] * disk_max
2365
    else:
2366
      disks = []
2367
    for didx, ddict in opts.disks:
2368
      didx = int(didx)
2369
      if not isinstance(ddict, dict):
2370
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2371
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2372
      elif constants.IDISK_SIZE in ddict:
2373
        if constants.IDISK_ADOPT in ddict:
2374
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2375
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2376
        try:
2377
          ddict[constants.IDISK_SIZE] = \
2378
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2379
        except ValueError, err:
2380
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2381
                                     (didx, err), errors.ECODE_INVAL)
2382
      elif constants.IDISK_ADOPT in ddict:
2383
        if mode == constants.INSTANCE_IMPORT:
2384
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2385
                                     " import", errors.ECODE_INVAL)
2386
        ddict[constants.IDISK_SIZE] = 0
2387
      else:
2388
        raise errors.OpPrereqError("Missing size or adoption source for"
2389
                                   " disk %d" % didx, errors.ECODE_INVAL)
2390
      disks[didx] = ddict
2391

    
2392
  if opts.tags is not None:
2393
    tags = opts.tags.split(",")
2394
  else:
2395
    tags = []
2396

    
2397
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2398
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2399

    
2400
  if mode == constants.INSTANCE_CREATE:
2401
    start = opts.start
2402
    os_type = opts.os
2403
    force_variant = opts.force_variant
2404
    src_node = None
2405
    src_path = None
2406
    no_install = opts.no_install
2407
    identify_defaults = False
2408
  elif mode == constants.INSTANCE_IMPORT:
2409
    start = False
2410
    os_type = None
2411
    force_variant = False
2412
    src_node = opts.src_node
2413
    src_path = opts.src_dir
2414
    no_install = None
2415
    identify_defaults = opts.identify_defaults
2416
  else:
2417
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2418

    
2419
  op = opcodes.OpInstanceCreate(instance_name=instance,
2420
                                disks=disks,
2421
                                disk_template=opts.disk_template,
2422
                                nics=nics,
2423
                                pnode=pnode, snode=snode,
2424
                                ip_check=opts.ip_check,
2425
                                name_check=opts.name_check,
2426
                                wait_for_sync=opts.wait_for_sync,
2427
                                file_storage_dir=opts.file_storage_dir,
2428
                                file_driver=opts.file_driver,
2429
                                iallocator=opts.iallocator,
2430
                                hypervisor=hypervisor,
2431
                                hvparams=hvparams,
2432
                                beparams=opts.beparams,
2433
                                osparams=opts.osparams,
2434
                                mode=mode,
2435
                                start=start,
2436
                                os_type=os_type,
2437
                                force_variant=force_variant,
2438
                                src_node=src_node,
2439
                                src_path=src_path,
2440
                                tags=tags,
2441
                                no_install=no_install,
2442
                                identify_defaults=identify_defaults,
2443
                                ignore_ipolicy=opts.ignore_ipolicy)
2444

    
2445
  SubmitOrSend(op, opts)
2446
  return 0
2447

    
2448

    
2449
class _RunWhileClusterStoppedHelper:
2450
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2451

2452
  """
2453
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2454
    """Initializes this class.
2455

2456
    @type feedback_fn: callable
2457
    @param feedback_fn: Feedback function
2458
    @type cluster_name: string
2459
    @param cluster_name: Cluster name
2460
    @type master_node: string
2461
    @param master_node Master node name
2462
    @type online_nodes: list
2463
    @param online_nodes: List of names of online nodes
2464

2465
    """
2466
    self.feedback_fn = feedback_fn
2467
    self.cluster_name = cluster_name
2468
    self.master_node = master_node
2469
    self.online_nodes = online_nodes
2470

    
2471
    self.ssh = ssh.SshRunner(self.cluster_name)
2472

    
2473
    self.nonmaster_nodes = [name for name in online_nodes
2474
                            if name != master_node]
2475

    
2476
    assert self.master_node not in self.nonmaster_nodes
2477

    
2478
  def _RunCmd(self, node_name, cmd):
2479
    """Runs a command on the local or a remote machine.
2480

2481
    @type node_name: string
2482
    @param node_name: Machine name
2483
    @type cmd: list
2484
    @param cmd: Command
2485

2486
    """
2487
    if node_name is None or node_name == self.master_node:
2488
      # No need to use SSH
2489
      result = utils.RunCmd(cmd)
2490
    else:
2491
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2492
                            utils.ShellQuoteArgs(cmd))
2493

    
2494
    if result.failed:
2495
      errmsg = ["Failed to run command %s" % result.cmd]
2496
      if node_name:
2497
        errmsg.append("on node %s" % node_name)
2498
      errmsg.append(": exitcode %s and error %s" %
2499
                    (result.exit_code, result.output))
2500
      raise errors.OpExecError(" ".join(errmsg))
2501

    
2502
  def Call(self, fn, *args):
2503
    """Call function while all daemons are stopped.
2504

2505
    @type fn: callable
2506
    @param fn: Function to be called
2507

2508
    """
2509
    # Pause watcher by acquiring an exclusive lock on watcher state file
2510
    self.feedback_fn("Blocking watcher")
2511
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2512
    try:
2513
      # TODO: Currently, this just blocks. There's no timeout.
2514
      # TODO: Should it be a shared lock?
2515
      watcher_block.Exclusive(blocking=True)
2516

    
2517
      # Stop master daemons, so that no new jobs can come in and all running
2518
      # ones are finished
2519
      self.feedback_fn("Stopping master daemons")
2520
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2521
      try:
2522
        # Stop daemons on all nodes
2523
        for node_name in self.online_nodes:
2524
          self.feedback_fn("Stopping daemons on %s" % node_name)
2525
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2526

    
2527
        # All daemons are shut down now
2528
        try:
2529
          return fn(self, *args)
2530
        except Exception, err:
2531
          _, errmsg = FormatError(err)
2532
          logging.exception("Caught exception")
2533
          self.feedback_fn(errmsg)
2534
          raise
2535
      finally:
2536
        # Start cluster again, master node last
2537
        for node_name in self.nonmaster_nodes + [self.master_node]:
2538
          self.feedback_fn("Starting daemons on %s" % node_name)
2539
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2540
    finally:
2541
      # Resume watcher
2542
      watcher_block.Close()
2543

    
2544

    
2545
def RunWhileClusterStopped(feedback_fn, fn, *args):
2546
  """Calls a function while all cluster daemons are stopped.
2547

2548
  @type feedback_fn: callable
2549
  @param feedback_fn: Feedback function
2550
  @type fn: callable
2551
  @param fn: Function to be called when daemons are stopped
2552

2553
  """
2554
  feedback_fn("Gathering cluster information")
2555

    
2556
  # This ensures we're running on the master daemon
2557
  cl = GetClient()
2558

    
2559
  (cluster_name, master_node) = \
2560
    cl.QueryConfigValues(["cluster_name", "master_node"])
2561

    
2562
  online_nodes = GetOnlineNodes([], cl=cl)
2563

    
2564
  # Don't keep a reference to the client. The master daemon will go away.
2565
  del cl
2566

    
2567
  assert master_node in online_nodes
2568

    
2569
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2570
                                       online_nodes).Call(fn, *args)
2571

    
2572

    
2573
def GenerateTable(headers, fields, separator, data,
2574
                  numfields=None, unitfields=None,
2575
                  units=None):
2576
  """Prints a table with headers and different fields.
2577

2578
  @type headers: dict
2579
  @param headers: dictionary mapping field names to headers for
2580
      the table
2581
  @type fields: list
2582
  @param fields: the field names corresponding to each row in
2583
      the data field
2584
  @param separator: the separator to be used; if this is None,
2585
      the default 'smart' algorithm is used which computes optimal
2586
      field width, otherwise just the separator is used between
2587
      each field
2588
  @type data: list
2589
  @param data: a list of lists, each sublist being one row to be output
2590
  @type numfields: list
2591
  @param numfields: a list with the fields that hold numeric
2592
      values and thus should be right-aligned
2593
  @type unitfields: list
2594
  @param unitfields: a list with the fields that hold numeric
2595
      values that should be formatted with the units field
2596
  @type units: string or None
2597
  @param units: the units we should use for formatting, or None for
2598
      automatic choice (human-readable for non-separator usage, otherwise
2599
      megabytes); this is a one-letter string
2600

2601
  """
2602
  if units is None:
2603
    if separator:
2604
      units = "m"
2605
    else:
2606
      units = "h"
2607

    
2608
  if numfields is None:
2609
    numfields = []
2610
  if unitfields is None:
2611
    unitfields = []
2612

    
2613
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2614
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2615

    
2616
  format_fields = []
2617
  for field in fields:
2618
    if headers and field not in headers:
2619
      # TODO: handle better unknown fields (either revert to old
2620
      # style of raising exception, or deal more intelligently with
2621
      # variable fields)
2622
      headers[field] = field
2623
    if separator is not None:
2624
      format_fields.append("%s")
2625
    elif numfields.Matches(field):
2626
      format_fields.append("%*s")
2627
    else:
2628
      format_fields.append("%-*s")
2629

    
2630
  if separator is None:
2631
    mlens = [0 for name in fields]
2632
    format_str = " ".join(format_fields)
2633
  else:
2634
    format_str = separator.replace("%", "%%").join(format_fields)
2635

    
2636
  for row in data:
2637
    if row is None:
2638
      continue
2639
    for idx, val in enumerate(row):
2640
      if unitfields.Matches(fields[idx]):
2641
        try:
2642
          val = int(val)
2643
        except (TypeError, ValueError):
2644
          pass
2645
        else:
2646
          val = row[idx] = utils.FormatUnit(val, units)
2647
      val = row[idx] = str(val)
2648
      if separator is None:
2649
        mlens[idx] = max(mlens[idx], len(val))
2650

    
2651
  result = []
2652
  if headers:
2653
    args = []
2654
    for idx, name in enumerate(fields):
2655
      hdr = headers[name]
2656
      if separator is None:
2657
        mlens[idx] = max(mlens[idx], len(hdr))
2658
        args.append(mlens[idx])
2659
      args.append(hdr)
2660
    result.append(format_str % tuple(args))
2661

    
2662
  if separator is None:
2663
    assert len(mlens) == len(fields)
2664

    
2665
    if fields and not numfields.Matches(fields[-1]):
2666
      mlens[-1] = 0
2667

    
2668
  for line in data:
2669
    args = []
2670
    if line is None:
2671
      line = ["-" for _ in fields]
2672
    for idx in range(len(fields)):
2673
      if separator is None:
2674
        args.append(mlens[idx])
2675
      args.append(line[idx])
2676
    result.append(format_str % tuple(args))
2677

    
2678
  return result
2679

    
2680

    
2681
def _FormatBool(value):
2682
  """Formats a boolean value as a string.
2683

2684
  """
2685
  if value:
2686
    return "Y"
2687
  return "N"
2688

    
2689

    
2690
#: Default formatting for query results; (callback, align right)
2691
_DEFAULT_FORMAT_QUERY = {
2692
  constants.QFT_TEXT: (str, False),
2693
  constants.QFT_BOOL: (_FormatBool, False),
2694
  constants.QFT_NUMBER: (str, True),
2695
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2696
  constants.QFT_OTHER: (str, False),
2697
  constants.QFT_UNKNOWN: (str, False),
2698
  }
2699

    
2700

    
2701
def _GetColumnFormatter(fdef, override, unit):
2702
  """Returns formatting function for a field.
2703

2704
  @type fdef: L{objects.QueryFieldDefinition}
2705
  @type override: dict
2706
  @param override: Dictionary for overriding field formatting functions,
2707
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2708
  @type unit: string
2709
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2710
  @rtype: tuple; (callable, bool)
2711
  @return: Returns the function to format a value (takes one parameter) and a
2712
    boolean for aligning the value on the right-hand side
2713

2714
  """
2715
  fmt = override.get(fdef.name, None)
2716
  if fmt is not None:
2717
    return fmt
2718

    
2719
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2720

    
2721
  if fdef.kind == constants.QFT_UNIT:
2722
    # Can't keep this information in the static dictionary
2723
    return (lambda value: utils.FormatUnit(value, unit), True)
2724

    
2725
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2726
  if fmt is not None:
2727
    return fmt
2728

    
2729
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2730

    
2731

    
2732
class _QueryColumnFormatter:
2733
  """Callable class for formatting fields of a query.
2734

2735
  """
2736
  def __init__(self, fn, status_fn, verbose):
2737
    """Initializes this class.
2738

2739
    @type fn: callable
2740
    @param fn: Formatting function
2741
    @type status_fn: callable
2742
    @param status_fn: Function to report fields' status
2743
    @type verbose: boolean
2744
    @param verbose: whether to use verbose field descriptions or not
2745

2746
    """
2747
    self._fn = fn
2748
    self._status_fn = status_fn
2749
    self._verbose = verbose
2750

    
2751
  def __call__(self, data):
2752
    """Returns a field's string representation.
2753

2754
    """
2755
    (status, value) = data
2756

    
2757
    # Report status
2758
    self._status_fn(status)
2759

    
2760
    if status == constants.RS_NORMAL:
2761
      return self._fn(value)
2762

    
2763
    assert value is None, \
2764
           "Found value %r for abnormal status %s" % (value, status)
2765

    
2766
    return FormatResultError(status, self._verbose)
2767

    
2768

    
2769
def FormatResultError(status, verbose):
2770
  """Formats result status other than L{constants.RS_NORMAL}.
2771

2772
  @param status: The result status
2773
  @type verbose: boolean
2774
  @param verbose: Whether to return the verbose text
2775
  @return: Text of result status
2776

2777
  """
2778
  assert status != constants.RS_NORMAL, \
2779
         "FormatResultError called with status equal to constants.RS_NORMAL"
2780
  try:
2781
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2782
  except KeyError:
2783
    raise NotImplementedError("Unknown status %s" % status)
2784
  else:
2785
    if verbose:
2786
      return verbose_text
2787
    return normal_text
2788

    
2789

    
2790
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2791
                      header=False, verbose=False):
2792
  """Formats data in L{objects.QueryResponse}.
2793

2794
  @type result: L{objects.QueryResponse}
2795
  @param result: result of query operation
2796
  @type unit: string
2797
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2798
    see L{utils.text.FormatUnit}
2799
  @type format_override: dict
2800
  @param format_override: Dictionary for overriding field formatting functions,
2801
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2802
  @type separator: string or None
2803
  @param separator: String used to separate fields
2804
  @type header: bool
2805
  @param header: Whether to output header row
2806
  @type verbose: boolean
2807
  @param verbose: whether to use verbose field descriptions or not
2808

2809
  """
2810
  if unit is None:
2811
    if separator:
2812
      unit = "m"
2813
    else:
2814
      unit = "h"
2815

    
2816
  if format_override is None:
2817
    format_override = {}
2818

    
2819
  stats = dict.fromkeys(constants.RS_ALL, 0)
2820

    
2821
  def _RecordStatus(status):
2822
    if status in stats:
2823
      stats[status] += 1
2824

    
2825
  columns = []
2826
  for fdef in result.fields:
2827
    assert fdef.title and fdef.name
2828
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2829
    columns.append(TableColumn(fdef.title,
2830
                               _QueryColumnFormatter(fn, _RecordStatus,
2831
                                                     verbose),
2832
                               align_right))
2833

    
2834
  table = FormatTable(result.data, columns, header, separator)
2835

    
2836
  # Collect statistics
2837
  assert len(stats) == len(constants.RS_ALL)
2838
  assert compat.all(count >= 0 for count in stats.values())
2839

    
2840
  # Determine overall status. If there was no data, unknown fields must be
2841
  # detected via the field definitions.
2842
  if (stats[constants.RS_UNKNOWN] or
2843
      (not result.data and _GetUnknownFields(result.fields))):
2844
    status = QR_UNKNOWN
2845
  elif compat.any(count > 0 for key, count in stats.items()
2846
                  if key != constants.RS_NORMAL):
2847
    status = QR_INCOMPLETE
2848
  else:
2849
    status = QR_NORMAL
2850

    
2851
  return (status, table)
2852

    
2853

    
2854
def _GetUnknownFields(fdefs):
2855
  """Returns list of unknown fields included in C{fdefs}.
2856

2857
  @type fdefs: list of L{objects.QueryFieldDefinition}
2858

2859
  """
2860
  return [fdef for fdef in fdefs
2861
          if fdef.kind == constants.QFT_UNKNOWN]
2862

    
2863

    
2864
def _WarnUnknownFields(fdefs):
2865
  """Prints a warning to stderr if a query included unknown fields.
2866

2867
  @type fdefs: list of L{objects.QueryFieldDefinition}
2868

2869
  """
2870
  unknown = _GetUnknownFields(fdefs)
2871
  if unknown:
2872
    ToStderr("Warning: Queried for unknown fields %s",
2873
             utils.CommaJoin(fdef.name for fdef in unknown))
2874
    return True
2875

    
2876
  return False
2877

    
2878

    
2879
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2880
                format_override=None, verbose=False, force_filter=False,
2881
                namefield=None, qfilter=None, isnumeric=False):
2882
  """Generic implementation for listing all items of a resource.
2883

2884
  @param resource: One of L{constants.QR_VIA_LUXI}
2885
  @type fields: list of strings
2886
  @param fields: List of fields to query for
2887
  @type names: list of strings
2888
  @param names: Names of items to query for
2889
  @type unit: string or None
2890
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2891
    None for automatic choice (human-readable for non-separator usage,
2892
    otherwise megabytes); this is a one-letter string
2893
  @type separator: string or None
2894
  @param separator: String used to separate fields
2895
  @type header: bool
2896
  @param header: Whether to show header row
2897
  @type force_filter: bool
2898
  @param force_filter: Whether to always treat names as filter
2899
  @type format_override: dict
2900
  @param format_override: Dictionary for overriding field formatting functions,
2901
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2902
  @type verbose: boolean
2903
  @param verbose: whether to use verbose field descriptions or not
2904
  @type namefield: string
2905
  @param namefield: Name of field to use for simple filters (see
2906
    L{qlang.MakeFilter} for details)
2907
  @type qfilter: list or None
2908
  @param qfilter: Query filter (in addition to names)
2909
  @param isnumeric: bool
2910
  @param isnumeric: Whether the namefield's type is numeric, and therefore
2911
    any simple filters built by namefield should use integer values to
2912
    reflect that
2913

2914
  """
2915
  if not names:
2916
    names = None
2917

    
2918
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
2919
                                isnumeric=isnumeric)
2920

    
2921
  if qfilter is None:
2922
    qfilter = namefilter
2923
  elif namefilter is not None:
2924
    qfilter = [qlang.OP_AND, namefilter, qfilter]
2925

    
2926
  if cl is None:
2927
    cl = GetClient()
2928

    
2929
  response = cl.Query(resource, fields, qfilter)
2930

    
2931
  found_unknown = _WarnUnknownFields(response.fields)
2932

    
2933
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2934
                                     header=header,
2935
                                     format_override=format_override,
2936
                                     verbose=verbose)
2937

    
2938
  for line in data:
2939
    ToStdout(line)
2940

    
2941
  assert ((found_unknown and status == QR_UNKNOWN) or
2942
          (not found_unknown and status != QR_UNKNOWN))
2943

    
2944
  if status == QR_UNKNOWN:
2945
    return constants.EXIT_UNKNOWN_FIELD
2946

    
2947
  # TODO: Should the list command fail if not all data could be collected?
2948
  return constants.EXIT_SUCCESS
2949

    
2950

    
2951
def GenericListFields(resource, fields, separator, header, cl=None):
2952
  """Generic implementation for listing fields for a resource.
2953

2954
  @param resource: One of L{constants.QR_VIA_LUXI}
2955
  @type fields: list of strings
2956
  @param fields: List of fields to query for
2957
  @type separator: string or None
2958
  @param separator: String used to separate fields
2959
  @type header: bool
2960
  @param header: Whether to show header row
2961

2962
  """
2963
  if cl is None:
2964
    cl = GetClient()
2965

    
2966
  if not fields:
2967
    fields = None
2968

    
2969
  response = cl.QueryFields(resource, fields)
2970

    
2971
  found_unknown = _WarnUnknownFields(response.fields)
2972

    
2973
  columns = [
2974
    TableColumn("Name", str, False),
2975
    TableColumn("Title", str, False),
2976
    TableColumn("Description", str, False),
2977
    ]
2978

    
2979
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2980

    
2981
  for line in FormatTable(rows, columns, header, separator):
2982
    ToStdout(line)
2983

    
2984
  if found_unknown:
2985
    return constants.EXIT_UNKNOWN_FIELD
2986

    
2987
  return constants.EXIT_SUCCESS
2988

    
2989

    
2990
class TableColumn:
2991
  """Describes a column for L{FormatTable}.
2992

2993
  """
2994
  def __init__(self, title, fn, align_right):
2995
    """Initializes this class.
2996

2997
    @type title: string
2998
    @param title: Column title
2999
    @type fn: callable
3000
    @param fn: Formatting function
3001
    @type align_right: bool
3002
    @param align_right: Whether to align values on the right-hand side
3003

3004
    """
3005
    self.title = title
3006
    self.format = fn
3007
    self.align_right = align_right
3008

    
3009

    
3010
def _GetColFormatString(width, align_right):
3011
  """Returns the format string for a field.
3012

3013
  """
3014
  if align_right:
3015
    sign = ""
3016
  else:
3017
    sign = "-"
3018

    
3019
  return "%%%s%ss" % (sign, width)
3020

    
3021

    
3022
def FormatTable(rows, columns, header, separator):
3023
  """Formats data as a table.
3024

3025
  @type rows: list of lists
3026
  @param rows: Row data, one list per row
3027
  @type columns: list of L{TableColumn}
3028
  @param columns: Column descriptions
3029
  @type header: bool
3030
  @param header: Whether to show header row
3031
  @type separator: string or None
3032
  @param separator: String used to separate columns
3033

3034
  """
3035
  if header:
3036
    data = [[col.title for col in columns]]
3037
    colwidth = [len(col.title) for col in columns]
3038
  else:
3039
    data = []
3040
    colwidth = [0 for _ in columns]
3041

    
3042
  # Format row data
3043
  for row in rows:
3044
    assert len(row) == len(columns)
3045

    
3046
    formatted = [col.format(value) for value, col in zip(row, columns)]
3047

    
3048
    if separator is None:
3049
      # Update column widths
3050
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3051
        # Modifying a list's items while iterating is fine
3052
        colwidth[idx] = max(oldwidth, len(value))
3053

    
3054
    data.append(formatted)
3055

    
3056
  if separator is not None:
3057
    # Return early if a separator is used
3058
    return [separator.join(row) for row in data]
3059

    
3060
  if columns and not columns[-1].align_right:
3061
    # Avoid unnecessary spaces at end of line
3062
    colwidth[-1] = 0
3063

    
3064
  # Build format string
3065
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3066
                  for col, width in zip(columns, colwidth)])
3067

    
3068
  return [fmt % tuple(row) for row in data]
3069

    
3070

    
3071
def FormatTimestamp(ts):
3072
  """Formats a given timestamp.
3073

3074
  @type ts: timestamp
3075
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3076

3077
  @rtype: string
3078
  @return: a string with the formatted timestamp
3079

3080
  """
3081
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3082
    return "?"
3083

    
3084
  (sec, usecs) = ts
3085
  return utils.FormatTime(sec, usecs=usecs)
3086

    
3087

    
3088
def ParseTimespec(value):
3089
  """Parse a time specification.
3090

3091
  The following suffixed will be recognized:
3092

3093
    - s: seconds
3094
    - m: minutes
3095
    - h: hours
3096
    - d: day
3097
    - w: weeks
3098

3099
  Without any suffix, the value will be taken to be in seconds.
3100

3101
  """
3102
  value = str(value)
3103
  if not value:
3104
    raise errors.OpPrereqError("Empty time specification passed",
3105
                               errors.ECODE_INVAL)
3106
  suffix_map = {
3107
    "s": 1,
3108
    "m": 60,
3109
    "h": 3600,
3110
    "d": 86400,
3111
    "w": 604800,
3112
    }
3113
  if value[-1] not in suffix_map:
3114
    try:
3115
      value = int(value)
3116
    except (TypeError, ValueError):
3117
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3118
                                 errors.ECODE_INVAL)
3119
  else:
3120
    multiplier = suffix_map[value[-1]]
3121
    value = value[:-1]
3122
    if not value: # no data left after stripping the suffix
3123
      raise errors.OpPrereqError("Invalid time specification (only"
3124
                                 " suffix passed)", errors.ECODE_INVAL)
3125
    try:
3126
      value = int(value) * multiplier
3127
    except (TypeError, ValueError):
3128
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3129
                                 errors.ECODE_INVAL)
3130
  return value
3131

    
3132

    
3133
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3134
                   filter_master=False, nodegroup=None):
3135
  """Returns the names of online nodes.
3136

3137
  This function will also log a warning on stderr with the names of
3138
  the online nodes.
3139

3140
  @param nodes: if not empty, use only this subset of nodes (minus the
3141
      offline ones)
3142
  @param cl: if not None, luxi client to use
3143
  @type nowarn: boolean
3144
  @param nowarn: by default, this function will output a note with the
3145
      offline nodes that are skipped; if this parameter is True the
3146
      note is not displayed
3147
  @type secondary_ips: boolean
3148
  @param secondary_ips: if True, return the secondary IPs instead of the
3149
      names, useful for doing network traffic over the replication interface
3150
      (if any)
3151
  @type filter_master: boolean
3152
  @param filter_master: if True, do not return the master node in the list
3153
      (useful in coordination with secondary_ips where we cannot check our
3154
      node name against the list)
3155
  @type nodegroup: string
3156
  @param nodegroup: If set, only return nodes in this node group
3157

3158
  """
3159
  if cl is None:
3160
    cl = GetClient()
3161

    
3162
  qfilter = []
3163

    
3164
  if nodes:
3165
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3166

    
3167
  if nodegroup is not None:
3168
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3169
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3170

    
3171
  if filter_master:
3172
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3173

    
3174
  if qfilter:
3175
    if len(qfilter) > 1:
3176
      final_filter = [qlang.OP_AND] + qfilter
3177
    else:
3178
      assert len(qfilter) == 1
3179
      final_filter = qfilter[0]
3180
  else:
3181
    final_filter = None
3182

    
3183
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3184

    
3185
  def _IsOffline(row):
3186
    (_, (_, offline), _) = row
3187
    return offline
3188

    
3189
  def _GetName(row):
3190
    ((_, name), _, _) = row
3191
    return name
3192

    
3193
  def _GetSip(row):
3194
    (_, _, (_, sip)) = row
3195
    return sip
3196

    
3197
  (offline, online) = compat.partition(result.data, _IsOffline)
3198

    
3199
  if offline and not nowarn:
3200
    ToStderr("Note: skipping offline node(s): %s" %
3201
             utils.CommaJoin(map(_GetName, offline)))
3202

    
3203
  if secondary_ips:
3204
    fn = _GetSip
3205
  else:
3206
    fn = _GetName
3207

    
3208
  return map(fn, online)
3209

    
3210

    
3211
def _ToStream(stream, txt, *args):
3212
  """Write a message to a stream, bypassing the logging system
3213

3214
  @type stream: file object
3215
  @param stream: the file to which we should write
3216
  @type txt: str
3217
  @param txt: the message
3218

3219
  """
3220
  try:
3221
    if args:
3222
      args = tuple(args)
3223
      stream.write(txt % args)
3224
    else:
3225
      stream.write(txt)
3226
    stream.write("\n")
3227
    stream.flush()
3228
  except IOError, err:
3229
    if err.errno == errno.EPIPE:
3230
      # our terminal went away, we'll exit
3231
      sys.exit(constants.EXIT_FAILURE)
3232
    else:
3233
      raise
3234

    
3235

    
3236
def ToStdout(txt, *args):
3237
  """Write a message to stdout only, bypassing the logging system
3238

3239
  This is just a wrapper over _ToStream.
3240

3241
  @type txt: str
3242
  @param txt: the message
3243

3244
  """
3245
  _ToStream(sys.stdout, txt, *args)
3246

    
3247

    
3248
def ToStderr(txt, *args):
3249
  """Write a message to stderr only, bypassing the logging system
3250

3251
  This is just a wrapper over _ToStream.
3252

3253
  @type txt: str
3254
  @param txt: the message
3255

3256
  """
3257
  _ToStream(sys.stderr, txt, *args)
3258

    
3259

    
3260
class JobExecutor(object):
3261
  """Class which manages the submission and execution of multiple jobs.
3262

3263
  Note that instances of this class should not be reused between
3264
  GetResults() calls.
3265

3266
  """
3267
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3268
    self.queue = []
3269
    if cl is None:
3270
      cl = GetClient()
3271
    self.cl = cl
3272
    self.verbose = verbose
3273
    self.jobs = []
3274
    self.opts = opts
3275
    self.feedback_fn = feedback_fn
3276
    self._counter = itertools.count()
3277

    
3278
  @staticmethod
3279
  def _IfName(name, fmt):
3280
    """Helper function for formatting name.
3281

3282
    """
3283
    if name:
3284
      return fmt % name
3285

    
3286
    return ""
3287

    
3288
  def QueueJob(self, name, *ops):
3289
    """Record a job for later submit.
3290

3291
    @type name: string
3292
    @param name: a description of the job, will be used in WaitJobSet
3293

3294
    """
3295
    SetGenericOpcodeOpts(ops, self.opts)
3296
    self.queue.append((self._counter.next(), name, ops))
3297

    
3298
  def AddJobId(self, name, status, job_id):
3299
    """Adds a job ID to the internal queue.
3300

3301
    """
3302
    self.jobs.append((self._counter.next(), status, job_id, name))
3303

    
3304
  def SubmitPending(self, each=False):
3305
    """Submit all pending jobs.
3306

3307
    """
3308
    if each:
3309
      results = []
3310
      for (_, _, ops) in self.queue:
3311
        # SubmitJob will remove the success status, but raise an exception if
3312
        # the submission fails, so we'll notice that anyway.
3313
        results.append([True, self.cl.SubmitJob(ops)[0]])
3314
    else:
3315
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3316
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3317
      self.jobs.append((idx, status, data, name))
3318

    
3319
  def _ChooseJob(self):
3320
    """Choose a non-waiting/queued job to poll next.
3321

3322
    """
3323
    assert self.jobs, "_ChooseJob called with empty job list"
3324

    
3325
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3326
                               ["status"])
3327
    assert result
3328

    
3329
    for job_data, status in zip(self.jobs, result):
3330
      if (isinstance(status, list) and status and
3331
          status[0] in (constants.JOB_STATUS_QUEUED,
3332
                        constants.JOB_STATUS_WAITING,
3333
                        constants.JOB_STATUS_CANCELING)):
3334
        # job is still present and waiting
3335
        continue
3336
      # good candidate found (either running job or lost job)
3337
      self.jobs.remove(job_data)
3338
      return job_data
3339

    
3340
    # no job found
3341
    return self.jobs.pop(0)
3342

    
3343
  def GetResults(self):
3344
    """Wait for and return the results of all jobs.
3345

3346
    @rtype: list
3347
    @return: list of tuples (success, job results), in the same order
3348
        as the submitted jobs; if a job has failed, instead of the result
3349
        there will be the error message
3350

3351
    """
3352
    if not self.jobs:
3353
      self.SubmitPending()
3354
    results = []
3355
    if self.verbose:
3356
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3357
      if ok_jobs:
3358
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3359

    
3360
    # first, remove any non-submitted jobs
3361
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3362
    for idx, _, jid, name in failures:
3363
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3364
      results.append((idx, False, jid))
3365

    
3366
    while self.jobs:
3367
      (idx, _, jid, name) = self._ChooseJob()
3368
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3369
      try:
3370
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3371
        success = True
3372
      except errors.JobLost, err:
3373
        _, job_result = FormatError(err)
3374
        ToStderr("Job %s%s has been archived, cannot check its result",
3375
                 jid, self._IfName(name, " for %s"))
3376
        success = False
3377
      except (errors.GenericError, luxi.ProtocolError), err:
3378
        _, job_result = FormatError(err)
3379
        success = False
3380
        # the error message will always be shown, verbose or not
3381
        ToStderr("Job %s%s has failed: %s",
3382
                 jid, self._IfName(name, " for %s"), job_result)
3383

    
3384
      results.append((idx, success, job_result))
3385

    
3386
    # sort based on the index, then drop it
3387
    results.sort()
3388
    results = [i[1:] for i in results]
3389

    
3390
    return results
3391

    
3392
  def WaitOrShow(self, wait):
3393
    """Wait for job results or only print the job IDs.
3394

3395
    @type wait: boolean
3396
    @param wait: whether to wait or not
3397

3398
    """
3399
    if wait:
3400
      return self.GetResults()
3401
    else:
3402
      if not self.jobs:
3403
        self.SubmitPending()
3404
      for _, status, result, name in self.jobs:
3405
        if status:
3406
          ToStdout("%s: %s", result, name)
3407
        else:
3408
          ToStderr("Failure for %s: %s", name, result)
3409
      return [row[1:3] for row in self.jobs]
3410

    
3411

    
3412
def FormatParameterDict(buf, param_dict, actual, level=1):
3413
  """Formats a parameter dictionary.
3414

3415
  @type buf: L{StringIO}
3416
  @param buf: the buffer into which to write
3417
  @type param_dict: dict
3418
  @param param_dict: the own parameters
3419
  @type actual: dict
3420
  @param actual: the current parameter set (including defaults)
3421
  @param level: Level of indent
3422

3423
  """
3424
  indent = "  " * level
3425

    
3426
  for key in sorted(actual):
3427
    data = actual[key]
3428
    buf.write("%s- %s:" % (indent, key))
3429

    
3430
    if isinstance(data, dict) and data:
3431
      buf.write("\n")
3432
      FormatParameterDict(buf, param_dict.get(key, {}), data,
3433
                          level=level + 1)
3434
    else:
3435
      val = param_dict.get(key, "default (%s)" % data)
3436
      buf.write(" %s\n" % val)
3437

    
3438

    
3439
def ConfirmOperation(names, list_type, text, extra=""):
3440
  """Ask the user to confirm an operation on a list of list_type.
3441

3442
  This function is used to request confirmation for doing an operation
3443
  on a given list of list_type.
3444

3445
  @type names: list
3446
  @param names: the list of names that we display when
3447
      we ask for confirmation
3448
  @type list_type: str
3449
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3450
  @type text: str
3451
  @param text: the operation that the user should confirm
3452
  @rtype: boolean
3453
  @return: True or False depending on user's confirmation.
3454

3455
  """
3456
  count = len(names)
3457
  msg = ("The %s will operate on %d %s.\n%s"
3458
         "Do you want to continue?" % (text, count, list_type, extra))
3459
  affected = (("\nAffected %s:\n" % list_type) +
3460
              "\n".join(["  %s" % name for name in names]))
3461

    
3462
  choices = [("y", True, "Yes, execute the %s" % text),
3463
             ("n", False, "No, abort the %s" % text)]
3464

    
3465
  if count > 20:
3466
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3467
    question = msg
3468
  else:
3469
    question = msg + affected
3470

    
3471
  choice = AskUser(question, choices)
3472
  if choice == "v":
3473
    choices.pop(1)
3474
    choice = AskUser(msg + affected, choices)
3475
  return choice
3476

    
3477

    
3478
def _MaybeParseUnit(elements):
3479
  """Parses and returns an array of potential values with units.
3480

3481
  """
3482
  parsed = {}
3483
  for k, v in elements.items():
3484
    if v == constants.VALUE_DEFAULT:
3485
      parsed[k] = v
3486
    else:
3487
      parsed[k] = utils.ParseUnit(v)
3488
  return parsed
3489

    
3490

    
3491
def CreateIPolicyFromOpts(ispecs_mem_size=None,
3492
                          ispecs_cpu_count=None,
3493
                          ispecs_disk_count=None,
3494
                          ispecs_disk_size=None,
3495
                          ispecs_nic_count=None,
3496
                          ipolicy_disk_templates=None,
3497
                          ipolicy_vcpu_ratio=None,
3498
                          ipolicy_spindle_ratio=None,
3499
                          group_ipolicy=False,
3500
                          allowed_values=None,
3501
                          fill_all=False):
3502
  """Creation of instance policy based on command line options.
3503

3504
  @param fill_all: whether for cluster policies we should ensure that
3505
    all values are filled
3506

3507

3508
  """
3509
  try:
3510
    if ispecs_mem_size:
3511
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3512
    if ispecs_disk_size:
3513
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3514
  except (TypeError, ValueError, errors.UnitParseError), err:
3515
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3516
                               " in policy: %s" %
3517
                               (ispecs_disk_size, ispecs_mem_size, err),
3518
                               errors.ECODE_INVAL)
3519

    
3520
  # prepare ipolicy dict
3521
  ipolicy_transposed = {
3522
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3523
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3524
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3525
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3526
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3527
    }
3528

    
3529
  # first, check that the values given are correct
3530
  if group_ipolicy:
3531
    forced_type = TISPECS_GROUP_TYPES
3532
  else:
3533
    forced_type = TISPECS_CLUSTER_TYPES
3534

    
3535
  for specs in ipolicy_transposed.values():
3536
    utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3537

    
3538
  # then transpose
3539
  ipolicy_out = objects.MakeEmptyIPolicy()
3540
  for name, specs in ipolicy_transposed.iteritems():
3541
    assert name in constants.ISPECS_PARAMETERS
3542
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3543
      ipolicy_out[key][name] = val
3544

    
3545
  # no filldict for non-dicts
3546
  if not group_ipolicy and fill_all:
3547
    if ipolicy_disk_templates is None:
3548
      ipolicy_disk_templates = constants.DISK_TEMPLATES
3549
    if ipolicy_vcpu_ratio is None:
3550
      ipolicy_vcpu_ratio = \
3551
        constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
3552
    if ipolicy_spindle_ratio is None:
3553
      ipolicy_spindle_ratio = \
3554
        constants.IPOLICY_DEFAULTS[constants.IPOLICY_SPINDLE_RATIO]
3555
  if ipolicy_disk_templates is not None:
3556
    ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3557
  if ipolicy_vcpu_ratio is not None:
3558
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3559
  if ipolicy_spindle_ratio is not None:
3560
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
3561

    
3562
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
3563

    
3564
  return ipolicy_out