Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ d4117a72

History | View | Annotate | Download (118.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ALLOCATABLE_OPT",
58
  "ALLOC_POLICY_OPT",
59
  "ALL_OPT",
60
  "ALLOW_FAILOVER_OPT",
61
  "AUTO_PROMOTE_OPT",
62
  "AUTO_REPLACE_OPT",
63
  "BACKEND_OPT",
64
  "BLK_OS_OPT",
65
  "CAPAB_MASTER_OPT",
66
  "CAPAB_VM_OPT",
67
  "CLEANUP_OPT",
68
  "CLUSTER_DOMAIN_SECRET_OPT",
69
  "CONFIRM_OPT",
70
  "CP_SIZE_OPT",
71
  "DEBUG_OPT",
72
  "DEBUG_SIMERR_OPT",
73
  "DISKIDX_OPT",
74
  "DISK_OPT",
75
  "DISK_PARAMS_OPT",
76
  "DISK_TEMPLATE_OPT",
77
  "DRAINED_OPT",
78
  "DRY_RUN_OPT",
79
  "DRBD_HELPER_OPT",
80
  "DST_NODE_OPT",
81
  "EARLY_RELEASE_OPT",
82
  "ENABLED_HV_OPT",
83
  "ERROR_CODES_OPT",
84
  "FIELDS_OPT",
85
  "FILESTORE_DIR_OPT",
86
  "FILESTORE_DRIVER_OPT",
87
  "FORCE_FILTER_OPT",
88
  "FORCE_OPT",
89
  "FORCE_VARIANT_OPT",
90
  "GLOBAL_FILEDIR_OPT",
91
  "HID_OS_OPT",
92
  "GLOBAL_SHARED_FILEDIR_OPT",
93
  "HVLIST_OPT",
94
  "HVOPTS_OPT",
95
  "HYPERVISOR_OPT",
96
  "IALLOCATOR_OPT",
97
  "DEFAULT_IALLOCATOR_OPT",
98
  "IDENTIFY_DEFAULTS_OPT",
99
  "IGNORE_CONSIST_OPT",
100
  "IGNORE_ERRORS_OPT",
101
  "IGNORE_FAILURES_OPT",
102
  "IGNORE_OFFLINE_OPT",
103
  "IGNORE_REMOVE_FAILURES_OPT",
104
  "IGNORE_SECONDARIES_OPT",
105
  "IGNORE_SIZE_OPT",
106
  "INTERVAL_OPT",
107
  "MAC_PREFIX_OPT",
108
  "MAINTAIN_NODE_HEALTH_OPT",
109
  "MASTER_NETDEV_OPT",
110
  "MASTER_NETMASK_OPT",
111
  "MC_OPT",
112
  "MIGRATION_MODE_OPT",
113
  "NET_OPT",
114
  "NEW_CLUSTER_CERT_OPT",
115
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
116
  "NEW_CONFD_HMAC_KEY_OPT",
117
  "NEW_RAPI_CERT_OPT",
118
  "NEW_SECONDARY_OPT",
119
  "NEW_SPICE_CERT_OPT",
120
  "NIC_PARAMS_OPT",
121
  "NODE_FORCE_JOIN_OPT",
122
  "NODE_LIST_OPT",
123
  "NODE_PLACEMENT_OPT",
124
  "NODEGROUP_OPT",
125
  "NODE_PARAMS_OPT",
126
  "NODE_POWERED_OPT",
127
  "NODRBD_STORAGE_OPT",
128
  "NOHDR_OPT",
129
  "NOIPCHECK_OPT",
130
  "NO_INSTALL_OPT",
131
  "NONAMECHECK_OPT",
132
  "NOLVM_STORAGE_OPT",
133
  "NOMODIFY_ETCHOSTS_OPT",
134
  "NOMODIFY_SSH_SETUP_OPT",
135
  "NONICS_OPT",
136
  "NONLIVE_OPT",
137
  "NONPLUS1_OPT",
138
  "NORUNTIME_CHGS_OPT",
139
  "NOSHUTDOWN_OPT",
140
  "NOSTART_OPT",
141
  "NOSSH_KEYCHECK_OPT",
142
  "NOVOTING_OPT",
143
  "NO_REMEMBER_OPT",
144
  "NWSYNC_OPT",
145
  "OFFLINE_INST_OPT",
146
  "ONLINE_INST_OPT",
147
  "ON_PRIMARY_OPT",
148
  "ON_SECONDARY_OPT",
149
  "OFFLINE_OPT",
150
  "OSPARAMS_OPT",
151
  "OS_OPT",
152
  "OS_SIZE_OPT",
153
  "OOB_TIMEOUT_OPT",
154
  "POWER_DELAY_OPT",
155
  "PREALLOC_WIPE_DISKS_OPT",
156
  "PRIMARY_IP_VERSION_OPT",
157
  "PRIMARY_ONLY_OPT",
158
  "PRIORITY_OPT",
159
  "RAPI_CERT_OPT",
160
  "READD_OPT",
161
  "REBOOT_TYPE_OPT",
162
  "REMOVE_INSTANCE_OPT",
163
  "REMOVE_UIDS_OPT",
164
  "RESERVED_LVS_OPT",
165
  "RUNTIME_MEM_OPT",
166
  "ROMAN_OPT",
167
  "SECONDARY_IP_OPT",
168
  "SECONDARY_ONLY_OPT",
169
  "SELECT_OS_OPT",
170
  "SEP_OPT",
171
  "SHOWCMD_OPT",
172
  "SHOW_MACHINE_OPT",
173
  "SHUTDOWN_TIMEOUT_OPT",
174
  "SINGLE_NODE_OPT",
175
  "SPECS_CPU_COUNT_OPT",
176
  "SPECS_DISK_COUNT_OPT",
177
  "SPECS_DISK_SIZE_OPT",
178
  "SPECS_MEM_SIZE_OPT",
179
  "SPECS_NIC_COUNT_OPT",
180
  "IPOLICY_DISK_TEMPLATES",
181
  "IPOLICY_VCPU_RATIO",
182
  "SPICE_CACERT_OPT",
183
  "SPICE_CERT_OPT",
184
  "SRC_DIR_OPT",
185
  "SRC_NODE_OPT",
186
  "SUBMIT_OPT",
187
  "STARTUP_PAUSED_OPT",
188
  "STATIC_OPT",
189
  "SYNC_OPT",
190
  "TAG_ADD_OPT",
191
  "TAG_SRC_OPT",
192
  "TIMEOUT_OPT",
193
  "TO_GROUP_OPT",
194
  "UIDPOOL_OPT",
195
  "USEUNITS_OPT",
196
  "USE_EXTERNAL_MIP_SCRIPT",
197
  "USE_REPL_NET_OPT",
198
  "VERBOSE_OPT",
199
  "VG_NAME_OPT",
200
  "WFSYNC_OPT",
201
  "YES_DOIT_OPT",
202
  "DISK_STATE_OPT",
203
  "HV_STATE_OPT",
204
  "IGNORE_IPOLICY_OPT",
205
  "INSTANCE_POLICY_OPTS",
206
  # Generic functions for CLI programs
207
  "ConfirmOperation",
208
  "CreateIPolicyFromOpts",
209
  "GenericMain",
210
  "GenericInstanceCreate",
211
  "GenericList",
212
  "GenericListFields",
213
  "GetClient",
214
  "GetOnlineNodes",
215
  "JobExecutor",
216
  "JobSubmittedException",
217
  "ParseTimespec",
218
  "RunWhileClusterStopped",
219
  "SubmitOpCode",
220
  "SubmitOrSend",
221
  "UsesRPC",
222
  # Formatting functions
223
  "ToStderr", "ToStdout",
224
  "FormatError",
225
  "FormatQueryResult",
226
  "FormatParameterDict",
227
  "GenerateTable",
228
  "AskUser",
229
  "FormatTimestamp",
230
  "FormatLogMessage",
231
  # Tags functions
232
  "ListTags",
233
  "AddTags",
234
  "RemoveTags",
235
  # command line options support infrastructure
236
  "ARGS_MANY_INSTANCES",
237
  "ARGS_MANY_NODES",
238
  "ARGS_MANY_GROUPS",
239
  "ARGS_NONE",
240
  "ARGS_ONE_INSTANCE",
241
  "ARGS_ONE_NODE",
242
  "ARGS_ONE_GROUP",
243
  "ARGS_ONE_OS",
244
  "ArgChoice",
245
  "ArgCommand",
246
  "ArgFile",
247
  "ArgGroup",
248
  "ArgHost",
249
  "ArgInstance",
250
  "ArgJobId",
251
  "ArgNode",
252
  "ArgOs",
253
  "ArgSuggest",
254
  "ArgUnknown",
255
  "OPT_COMPL_INST_ADD_NODES",
256
  "OPT_COMPL_MANY_NODES",
257
  "OPT_COMPL_ONE_IALLOCATOR",
258
  "OPT_COMPL_ONE_INSTANCE",
259
  "OPT_COMPL_ONE_NODE",
260
  "OPT_COMPL_ONE_NODEGROUP",
261
  "OPT_COMPL_ONE_OS",
262
  "cli_option",
263
  "SplitNodeOption",
264
  "CalculateOSNames",
265
  "ParseFields",
266
  "COMMON_CREATE_OPTS",
267
  ]
268

    
269
NO_PREFIX = "no_"
270
UN_PREFIX = "-"
271

    
272
#: Priorities (sorted)
273
_PRIORITY_NAMES = [
274
  ("low", constants.OP_PRIO_LOW),
275
  ("normal", constants.OP_PRIO_NORMAL),
276
  ("high", constants.OP_PRIO_HIGH),
277
  ]
278

    
279
#: Priority dictionary for easier lookup
280
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
281
# we migrate to Python 2.6
282
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
283

    
284
# Query result status for clients
285
(QR_NORMAL,
286
 QR_UNKNOWN,
287
 QR_INCOMPLETE) = range(3)
288

    
289
#: Maximum batch size for ChooseJob
290
_CHOOSE_BATCH = 25
291

    
292

    
293
# constants used to create InstancePolicy dictionary
294
TISPECS_GROUP_TYPES = {
295
  constants.ISPECS_MIN: constants.VTYPE_INT,
296
  constants.ISPECS_MAX: constants.VTYPE_INT,
297
  }
298

    
299
TISPECS_CLUSTER_TYPES = {
300
  constants.ISPECS_MIN: constants.VTYPE_INT,
301
  constants.ISPECS_MAX: constants.VTYPE_INT,
302
  constants.ISPECS_STD: constants.VTYPE_INT,
303
  }
304

    
305

    
306
class _Argument:
307
  def __init__(self, min=0, max=None): # pylint: disable=W0622
308
    self.min = min
309
    self.max = max
310

    
311
  def __repr__(self):
312
    return ("<%s min=%s max=%s>" %
313
            (self.__class__.__name__, self.min, self.max))
314

    
315

    
316
class ArgSuggest(_Argument):
317
  """Suggesting argument.
318

319
  Value can be any of the ones passed to the constructor.
320

321
  """
322
  # pylint: disable=W0622
323
  def __init__(self, min=0, max=None, choices=None):
324
    _Argument.__init__(self, min=min, max=max)
325
    self.choices = choices
326

    
327
  def __repr__(self):
328
    return ("<%s min=%s max=%s choices=%r>" %
329
            (self.__class__.__name__, self.min, self.max, self.choices))
330

    
331

    
332
class ArgChoice(ArgSuggest):
333
  """Choice argument.
334

335
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
336
  but value must be one of the choices.
337

338
  """
339

    
340

    
341
class ArgUnknown(_Argument):
342
  """Unknown argument to program (e.g. determined at runtime).
343

344
  """
345

    
346

    
347
class ArgInstance(_Argument):
348
  """Instances argument.
349

350
  """
351

    
352

    
353
class ArgNode(_Argument):
354
  """Node argument.
355

356
  """
357

    
358

    
359
class ArgGroup(_Argument):
360
  """Node group argument.
361

362
  """
363

    
364

    
365
class ArgJobId(_Argument):
366
  """Job ID argument.
367

368
  """
369

    
370

    
371
class ArgFile(_Argument):
372
  """File path argument.
373

374
  """
375

    
376

    
377
class ArgCommand(_Argument):
378
  """Command argument.
379

380
  """
381

    
382

    
383
class ArgHost(_Argument):
384
  """Host argument.
385

386
  """
387

    
388

    
389
class ArgOs(_Argument):
390
  """OS argument.
391

392
  """
393

    
394

    
395
ARGS_NONE = []
396
ARGS_MANY_INSTANCES = [ArgInstance()]
397
ARGS_MANY_NODES = [ArgNode()]
398
ARGS_MANY_GROUPS = [ArgGroup()]
399
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
400
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
401
# TODO
402
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
403
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
404

    
405

    
406
def _ExtractTagsObject(opts, args):
407
  """Extract the tag type object.
408

409
  Note that this function will modify its args parameter.
410

411
  """
412
  if not hasattr(opts, "tag_type"):
413
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
414
  kind = opts.tag_type
415
  if kind == constants.TAG_CLUSTER:
416
    retval = kind, None
417
  elif kind in (constants.TAG_NODEGROUP,
418
                constants.TAG_NODE,
419
                constants.TAG_INSTANCE):
420
    if not args:
421
      raise errors.OpPrereqError("no arguments passed to the command",
422
                                 errors.ECODE_INVAL)
423
    name = args.pop(0)
424
    retval = kind, name
425
  else:
426
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
427
  return retval
428

    
429

    
430
def _ExtendTags(opts, args):
431
  """Extend the args if a source file has been given.
432

433
  This function will extend the tags with the contents of the file
434
  passed in the 'tags_source' attribute of the opts parameter. A file
435
  named '-' will be replaced by stdin.
436

437
  """
438
  fname = opts.tags_source
439
  if fname is None:
440
    return
441
  if fname == "-":
442
    new_fh = sys.stdin
443
  else:
444
    new_fh = open(fname, "r")
445
  new_data = []
446
  try:
447
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
448
    # because of python bug 1633941
449
    while True:
450
      line = new_fh.readline()
451
      if not line:
452
        break
453
      new_data.append(line.strip())
454
  finally:
455
    new_fh.close()
456
  args.extend(new_data)
457

    
458

    
459
def ListTags(opts, args):
460
  """List the tags on a given object.
461

462
  This is a generic implementation that knows how to deal with all
463
  three cases of tag objects (cluster, node, instance). The opts
464
  argument is expected to contain a tag_type field denoting what
465
  object type we work on.
466

467
  """
468
  kind, name = _ExtractTagsObject(opts, args)
469
  cl = GetClient(query=True)
470
  result = cl.QueryTags(kind, name)
471
  result = list(result)
472
  result.sort()
473
  for tag in result:
474
    ToStdout(tag)
475

    
476

    
477
def AddTags(opts, args):
478
  """Add tags on a given object.
479

480
  This is a generic implementation that knows how to deal with all
481
  three cases of tag objects (cluster, node, instance). The opts
482
  argument is expected to contain a tag_type field denoting what
483
  object type we work on.
484

485
  """
486
  kind, name = _ExtractTagsObject(opts, args)
487
  _ExtendTags(opts, args)
488
  if not args:
489
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
490
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
491
  SubmitOrSend(op, opts)
492

    
493

    
494
def RemoveTags(opts, args):
495
  """Remove tags from a given object.
496

497
  This is a generic implementation that knows how to deal with all
498
  three cases of tag objects (cluster, node, instance). The opts
499
  argument is expected to contain a tag_type field denoting what
500
  object type we work on.
501

502
  """
503
  kind, name = _ExtractTagsObject(opts, args)
504
  _ExtendTags(opts, args)
505
  if not args:
506
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
507
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
508
  SubmitOrSend(op, opts)
509

    
510

    
511
def check_unit(option, opt, value): # pylint: disable=W0613
512
  """OptParsers custom converter for units.
513

514
  """
515
  try:
516
    return utils.ParseUnit(value)
517
  except errors.UnitParseError, err:
518
    raise OptionValueError("option %s: %s" % (opt, err))
519

    
520

    
521
def _SplitKeyVal(opt, data):
522
  """Convert a KeyVal string into a dict.
523

524
  This function will convert a key=val[,...] string into a dict. Empty
525
  values will be converted specially: keys which have the prefix 'no_'
526
  will have the value=False and the prefix stripped, the others will
527
  have value=True.
528

529
  @type opt: string
530
  @param opt: a string holding the option name for which we process the
531
      data, used in building error messages
532
  @type data: string
533
  @param data: a string of the format key=val,key=val,...
534
  @rtype: dict
535
  @return: {key=val, key=val}
536
  @raises errors.ParameterError: if there are duplicate keys
537

538
  """
539
  kv_dict = {}
540
  if data:
541
    for elem in utils.UnescapeAndSplit(data, sep=","):
542
      if "=" in elem:
543
        key, val = elem.split("=", 1)
544
      else:
545
        if elem.startswith(NO_PREFIX):
546
          key, val = elem[len(NO_PREFIX):], False
547
        elif elem.startswith(UN_PREFIX):
548
          key, val = elem[len(UN_PREFIX):], None
549
        else:
550
          key, val = elem, True
551
      if key in kv_dict:
552
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
553
                                    (key, opt))
554
      kv_dict[key] = val
555
  return kv_dict
556

    
557

    
558
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
559
  """Custom parser for ident:key=val,key=val options.
560

561
  This will store the parsed values as a tuple (ident, {key: val}). As such,
562
  multiple uses of this option via action=append is possible.
563

564
  """
565
  if ":" not in value:
566
    ident, rest = value, ""
567
  else:
568
    ident, rest = value.split(":", 1)
569

    
570
  if ident.startswith(NO_PREFIX):
571
    if rest:
572
      msg = "Cannot pass options when removing parameter groups: %s" % value
573
      raise errors.ParameterError(msg)
574
    retval = (ident[len(NO_PREFIX):], False)
575
  elif (ident.startswith(UN_PREFIX) and
576
        (len(ident) <= len(UN_PREFIX) or
577
         not ident[len(UN_PREFIX)][0].isdigit())):
578
    if rest:
579
      msg = "Cannot pass options when removing parameter groups: %s" % value
580
      raise errors.ParameterError(msg)
581
    retval = (ident[len(UN_PREFIX):], None)
582
  else:
583
    kv_dict = _SplitKeyVal(opt, rest)
584
    retval = (ident, kv_dict)
585
  return retval
586

    
587

    
588
def check_key_val(option, opt, value):  # pylint: disable=W0613
589
  """Custom parser class for key=val,key=val options.
590

591
  This will store the parsed values as a dict {key: val}.
592

593
  """
594
  return _SplitKeyVal(opt, value)
595

    
596

    
597
def check_bool(option, opt, value): # pylint: disable=W0613
598
  """Custom parser for yes/no options.
599

600
  This will store the parsed value as either True or False.
601

602
  """
603
  value = value.lower()
604
  if value == constants.VALUE_FALSE or value == "no":
605
    return False
606
  elif value == constants.VALUE_TRUE or value == "yes":
607
    return True
608
  else:
609
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
610

    
611

    
612
def check_list(option, opt, value): # pylint: disable=W0613
613
  """Custom parser for comma-separated lists.
614

615
  """
616
  # we have to make this explicit check since "".split(",") is [""],
617
  # not an empty list :(
618
  if not value:
619
    return []
620
  else:
621
    return utils.UnescapeAndSplit(value)
622

    
623

    
624
def check_maybefloat(option, opt, value): # pylint: disable=W0613
625
  """Custom parser for float numbers which might be also defaults.
626

627
  """
628
  value = value.lower()
629

    
630
  if value == constants.VALUE_DEFAULT:
631
    return value
632
  else:
633
    return float(value)
634

    
635

    
636
# completion_suggestion is normally a list. Using numeric values not evaluating
637
# to False for dynamic completion.
638
(OPT_COMPL_MANY_NODES,
639
 OPT_COMPL_ONE_NODE,
640
 OPT_COMPL_ONE_INSTANCE,
641
 OPT_COMPL_ONE_OS,
642
 OPT_COMPL_ONE_IALLOCATOR,
643
 OPT_COMPL_INST_ADD_NODES,
644
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
645

    
646
OPT_COMPL_ALL = frozenset([
647
  OPT_COMPL_MANY_NODES,
648
  OPT_COMPL_ONE_NODE,
649
  OPT_COMPL_ONE_INSTANCE,
650
  OPT_COMPL_ONE_OS,
651
  OPT_COMPL_ONE_IALLOCATOR,
652
  OPT_COMPL_INST_ADD_NODES,
653
  OPT_COMPL_ONE_NODEGROUP,
654
  ])
655

    
656

    
657
class CliOption(Option):
658
  """Custom option class for optparse.
659

660
  """
661
  ATTRS = Option.ATTRS + [
662
    "completion_suggest",
663
    ]
664
  TYPES = Option.TYPES + (
665
    "identkeyval",
666
    "keyval",
667
    "unit",
668
    "bool",
669
    "list",
670
    "maybefloat",
671
    )
672
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
673
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
674
  TYPE_CHECKER["keyval"] = check_key_val
675
  TYPE_CHECKER["unit"] = check_unit
676
  TYPE_CHECKER["bool"] = check_bool
677
  TYPE_CHECKER["list"] = check_list
678
  TYPE_CHECKER["maybefloat"] = check_maybefloat
679

    
680

    
681
# optparse.py sets make_option, so we do it for our own option class, too
682
cli_option = CliOption
683

    
684

    
685
_YORNO = "yes|no"
686

    
687
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
688
                       help="Increase debugging level")
689

    
690
NOHDR_OPT = cli_option("--no-headers", default=False,
691
                       action="store_true", dest="no_headers",
692
                       help="Don't display column headers")
693

    
694
SEP_OPT = cli_option("--separator", default=None,
695
                     action="store", dest="separator",
696
                     help=("Separator between output fields"
697
                           " (defaults to one space)"))
698

    
699
USEUNITS_OPT = cli_option("--units", default=None,
700
                          dest="units", choices=("h", "m", "g", "t"),
701
                          help="Specify units for output (one of h/m/g/t)")
702

    
703
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
704
                        type="string", metavar="FIELDS",
705
                        help="Comma separated list of output fields")
706

    
707
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
708
                       default=False, help="Force the operation")
709

    
710
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
711
                         default=False, help="Do not require confirmation")
712

    
713
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
714
                                  action="store_true", default=False,
715
                                  help=("Ignore offline nodes and do as much"
716
                                        " as possible"))
717

    
718
TAG_ADD_OPT = cli_option("--tags", dest="tags",
719
                         default=None, help="Comma-separated list of instance"
720
                                            " tags")
721

    
722
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
723
                         default=None, help="File with tag names")
724

    
725
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
726
                        default=False, action="store_true",
727
                        help=("Submit the job and return the job ID, but"
728
                              " don't wait for the job to finish"))
729

    
730
SYNC_OPT = cli_option("--sync", dest="do_locking",
731
                      default=False, action="store_true",
732
                      help=("Grab locks while doing the queries"
733
                            " in order to ensure more consistent results"))
734

    
735
DRY_RUN_OPT = cli_option("--dry-run", default=False,
736
                         action="store_true",
737
                         help=("Do not execute the operation, just run the"
738
                               " check steps and verify if it could be"
739
                               " executed"))
740

    
741
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
742
                         action="store_true",
743
                         help="Increase the verbosity of the operation")
744

    
745
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
746
                              action="store_true", dest="simulate_errors",
747
                              help="Debugging option that makes the operation"
748
                              " treat most runtime checks as failed")
749

    
750
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
751
                        default=True, action="store_false",
752
                        help="Don't wait for sync (DANGEROUS!)")
753

    
754
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
755
                        default=False, action="store_true",
756
                        help="Wait for disks to sync")
757

    
758
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
759
                             action="store_true", default=False,
760
                             help="Enable offline instance")
761

    
762
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
763
                              action="store_true", default=False,
764
                              help="Disable down instance")
765

    
766
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
767
                               help=("Custom disk setup (%s)" %
768
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
769
                               default=None, metavar="TEMPL",
770
                               choices=list(constants.DISK_TEMPLATES))
771

    
772
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
773
                        help="Do not create any network cards for"
774
                        " the instance")
775

    
776
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
777
                               help="Relative path under default cluster-wide"
778
                               " file storage dir to store file-based disks",
779
                               default=None, metavar="<DIR>")
780

    
781
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
782
                                  help="Driver to use for image files",
783
                                  default="loop", metavar="<DRIVER>",
784
                                  choices=list(constants.FILE_DRIVER))
785

    
786
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
787
                            help="Select nodes for the instance automatically"
788
                            " using the <NAME> iallocator plugin",
789
                            default=None, type="string",
790
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
791

    
792
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
793
                                    metavar="<NAME>",
794
                                    help="Set the default instance"
795
                                    " allocator plugin",
796
                                    default=None, type="string",
797
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
798

    
799
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
800
                    metavar="<os>",
801
                    completion_suggest=OPT_COMPL_ONE_OS)
802

    
803
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
804
                          type="keyval", default={},
805
                          help="OS parameters")
806

    
807
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
808
                               action="store_true", default=False,
809
                               help="Force an unknown variant")
810

    
811
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
812
                            action="store_true", default=False,
813
                            help="Do not install the OS (will"
814
                            " enable no-start)")
815

    
816
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
817
                                dest="allow_runtime_chgs",
818
                                default=True, action="store_false",
819
                                help="Don't allow runtime changes")
820

    
821
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
822
                         type="keyval", default={},
823
                         help="Backend parameters")
824

    
825
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
826
                        default={}, dest="hvparams",
827
                        help="Hypervisor parameters")
828

    
829
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
830
                             help="Disk template parameters, in the format"
831
                             " template:option=value,option=value,...",
832
                             type="identkeyval", action="append", default=[])
833

    
834
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
835
                                 type="keyval", default={},
836
                                 help="Memory size specs: list of key=value,"
837
                                " where key is one of min, max, std"
838
                                 " (in MB or using a unit)")
839

    
840
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
841
                                 type="keyval", default={},
842
                                 help="CPU count specs: list of key=value,"
843
                                 " where key is one of min, max, std")
844

    
845
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
846
                                  dest="ispecs_disk_count",
847
                                  type="keyval", default={},
848
                                  help="Disk count specs: list of key=value,"
849
                                  " where key is one of min, max, std")
850

    
851
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
852
                                 type="keyval", default={},
853
                                 help="Disk size specs: list of key=value,"
854
                                 " where key is one of min, max, std"
855
                                 " (in MB or using a unit)")
856

    
857
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
858
                                 type="keyval", default={},
859
                                 help="NIC count specs: list of key=value,"
860
                                 " where key is one of min, max, std")
861

    
862
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
863
                                    dest="ipolicy_disk_templates",
864
                                    type="list", default=None,
865
                                    help="Comma-separated list of"
866
                                    " enabled disk templates")
867

    
868
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
869
                                 dest="ipolicy_vcpu_ratio",
870
                                 type="maybefloat", default=None,
871
                                 help="The maximum allowed vcpu-to-cpu ratio")
872

    
873
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
874
                                   dest="ipolicy_spindle_ratio",
875
                                   type="maybefloat", default=None,
876
                                   help=("The maximum allowed instances to"
877
                                         " spindle ratio"))
878

    
879
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
880
                            help="Hypervisor and hypervisor options, in the"
881
                            " format hypervisor:option=value,option=value,...",
882
                            default=None, type="identkeyval")
883

    
884
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
885
                        help="Hypervisor and hypervisor options, in the"
886
                        " format hypervisor:option=value,option=value,...",
887
                        default=[], action="append", type="identkeyval")
888

    
889
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
890
                           action="store_false",
891
                           help="Don't check that the instance's IP"
892
                           " is alive")
893

    
894
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
895
                             default=True, action="store_false",
896
                             help="Don't check that the instance's name"
897
                             " is resolvable")
898

    
899
NET_OPT = cli_option("--net",
900
                     help="NIC parameters", default=[],
901
                     dest="nics", action="append", type="identkeyval")
902

    
903
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
904
                      dest="disks", action="append", type="identkeyval")
905

    
906
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
907
                         help="Comma-separated list of disks"
908
                         " indices to act on (e.g. 0,2) (optional,"
909
                         " defaults to all disks)")
910

    
911
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
912
                         help="Enforces a single-disk configuration using the"
913
                         " given disk size, in MiB unless a suffix is used",
914
                         default=None, type="unit", metavar="<size>")
915

    
916
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
917
                                dest="ignore_consistency",
918
                                action="store_true", default=False,
919
                                help="Ignore the consistency of the disks on"
920
                                " the secondary")
921

    
922
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
923
                                dest="allow_failover",
924
                                action="store_true", default=False,
925
                                help="If migration is not possible fallback to"
926
                                     " failover")
927

    
928
NONLIVE_OPT = cli_option("--non-live", dest="live",
929
                         default=True, action="store_false",
930
                         help="Do a non-live migration (this usually means"
931
                         " freeze the instance, save the state, transfer and"
932
                         " only then resume running on the secondary node)")
933

    
934
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
935
                                default=None,
936
                                choices=list(constants.HT_MIGRATION_MODES),
937
                                help="Override default migration mode (choose"
938
                                " either live or non-live")
939

    
940
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
941
                                help="Target node and optional secondary node",
942
                                metavar="<pnode>[:<snode>]",
943
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
944

    
945
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
946
                           action="append", metavar="<node>",
947
                           help="Use only this node (can be used multiple"
948
                           " times, if not given defaults to all nodes)",
949
                           completion_suggest=OPT_COMPL_ONE_NODE)
950

    
951
NODEGROUP_OPT_NAME = "--node-group"
952
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
953
                           dest="nodegroup",
954
                           help="Node group (name or uuid)",
955
                           metavar="<nodegroup>",
956
                           default=None, type="string",
957
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
958

    
959
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
960
                             metavar="<node>",
961
                             completion_suggest=OPT_COMPL_ONE_NODE)
962

    
963
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
964
                         action="store_false",
965
                         help="Don't start the instance after creation")
966

    
967
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
968
                         action="store_true", default=False,
969
                         help="Show command instead of executing it")
970

    
971
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
972
                         default=False, action="store_true",
973
                         help="Instead of performing the migration, try to"
974
                         " recover from a failed cleanup. This is safe"
975
                         " to run even if the instance is healthy, but it"
976
                         " will create extra replication traffic and "
977
                         " disrupt briefly the replication (like during the"
978
                         " migration")
979

    
980
STATIC_OPT = cli_option("-s", "--static", dest="static",
981
                        action="store_true", default=False,
982
                        help="Only show configuration data, not runtime data")
983

    
984
ALL_OPT = cli_option("--all", dest="show_all",
985
                     default=False, action="store_true",
986
                     help="Show info on all instances on the cluster."
987
                     " This can take a long time to run, use wisely")
988

    
989
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
990
                           action="store_true", default=False,
991
                           help="Interactive OS reinstall, lists available"
992
                           " OS templates for selection")
993

    
994
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
995
                                 action="store_true", default=False,
996
                                 help="Remove the instance from the cluster"
997
                                 " configuration even if there are failures"
998
                                 " during the removal process")
999

    
1000
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1001
                                        dest="ignore_remove_failures",
1002
                                        action="store_true", default=False,
1003
                                        help="Remove the instance from the"
1004
                                        " cluster configuration even if there"
1005
                                        " are failures during the removal"
1006
                                        " process")
1007

    
1008
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1009
                                 action="store_true", default=False,
1010
                                 help="Remove the instance from the cluster")
1011

    
1012
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1013
                               help="Specifies the new node for the instance",
1014
                               metavar="NODE", default=None,
1015
                               completion_suggest=OPT_COMPL_ONE_NODE)
1016

    
1017
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1018
                               help="Specifies the new secondary node",
1019
                               metavar="NODE", default=None,
1020
                               completion_suggest=OPT_COMPL_ONE_NODE)
1021

    
1022
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1023
                            default=False, action="store_true",
1024
                            help="Replace the disk(s) on the primary"
1025
                                 " node (applies only to internally mirrored"
1026
                                 " disk templates, e.g. %s)" %
1027
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1028

    
1029
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1030
                              default=False, action="store_true",
1031
                              help="Replace the disk(s) on the secondary"
1032
                                   " node (applies only to internally mirrored"
1033
                                   " disk templates, e.g. %s)" %
1034
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1035

    
1036
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1037
                              default=False, action="store_true",
1038
                              help="Lock all nodes and auto-promote as needed"
1039
                              " to MC status")
1040

    
1041
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1042
                              default=False, action="store_true",
1043
                              help="Automatically replace faulty disks"
1044
                                   " (applies only to internally mirrored"
1045
                                   " disk templates, e.g. %s)" %
1046
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1047

    
1048
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1049
                             default=False, action="store_true",
1050
                             help="Ignore current recorded size"
1051
                             " (useful for forcing activation when"
1052
                             " the recorded size is wrong)")
1053

    
1054
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1055
                          metavar="<node>",
1056
                          completion_suggest=OPT_COMPL_ONE_NODE)
1057

    
1058
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1059
                         metavar="<dir>")
1060

    
1061
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1062
                              help="Specify the secondary ip for the node",
1063
                              metavar="ADDRESS", default=None)
1064

    
1065
READD_OPT = cli_option("--readd", dest="readd",
1066
                       default=False, action="store_true",
1067
                       help="Readd old node after replacing it")
1068

    
1069
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1070
                                default=True, action="store_false",
1071
                                help="Disable SSH key fingerprint checking")
1072

    
1073
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1074
                                 default=False, action="store_true",
1075
                                 help="Force the joining of a node")
1076

    
1077
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1078
                    type="bool", default=None, metavar=_YORNO,
1079
                    help="Set the master_candidate flag on the node")
1080

    
1081
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1082
                         type="bool", default=None,
1083
                         help=("Set the offline flag on the node"
1084
                               " (cluster does not communicate with offline"
1085
                               " nodes)"))
1086

    
1087
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1088
                         type="bool", default=None,
1089
                         help=("Set the drained flag on the node"
1090
                               " (excluded from allocation operations)"))
1091

    
1092
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1093
                              type="bool", default=None, metavar=_YORNO,
1094
                              help="Set the master_capable flag on the node")
1095

    
1096
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1097
                          type="bool", default=None, metavar=_YORNO,
1098
                          help="Set the vm_capable flag on the node")
1099

    
1100
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1101
                             type="bool", default=None, metavar=_YORNO,
1102
                             help="Set the allocatable flag on a volume")
1103

    
1104
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1105
                               help="Disable support for lvm based instances"
1106
                               " (cluster-wide)",
1107
                               action="store_false", default=True)
1108

    
1109
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1110
                            dest="enabled_hypervisors",
1111
                            help="Comma-separated list of hypervisors",
1112
                            type="string", default=None)
1113

    
1114
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1115
                            type="keyval", default={},
1116
                            help="NIC parameters")
1117

    
1118
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1119
                         dest="candidate_pool_size", type="int",
1120
                         help="Set the candidate pool size")
1121

    
1122
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1123
                         help=("Enables LVM and specifies the volume group"
1124
                               " name (cluster-wide) for disk allocation"
1125
                               " [%s]" % constants.DEFAULT_VG),
1126
                         metavar="VG", default=None)
1127

    
1128
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1129
                          help="Destroy cluster", action="store_true")
1130

    
1131
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1132
                          help="Skip node agreement check (dangerous)",
1133
                          action="store_true", default=False)
1134

    
1135
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1136
                            help="Specify the mac prefix for the instance IP"
1137
                            " addresses, in the format XX:XX:XX",
1138
                            metavar="PREFIX",
1139
                            default=None)
1140

    
1141
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1142
                               help="Specify the node interface (cluster-wide)"
1143
                               " on which the master IP address will be added"
1144
                               " (cluster init default: %s)" %
1145
                               constants.DEFAULT_BRIDGE,
1146
                               metavar="NETDEV",
1147
                               default=None)
1148

    
1149
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1150
                                help="Specify the netmask of the master IP",
1151
                                metavar="NETMASK",
1152
                                default=None)
1153

    
1154
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1155
                                     dest="use_external_mip_script",
1156
                                     help="Specify whether to run a"
1157
                                     " user-provided script for the master"
1158
                                     " IP address turnup and"
1159
                                     " turndown operations",
1160
                                     type="bool", metavar=_YORNO, default=None)
1161

    
1162
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1163
                                help="Specify the default directory (cluster-"
1164
                                "wide) for storing the file-based disks [%s]" %
1165
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1166
                                metavar="DIR",
1167
                                default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1168

    
1169
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1170
  "--shared-file-storage-dir",
1171
  dest="shared_file_storage_dir",
1172
  help="Specify the default directory (cluster-wide) for storing the"
1173
  " shared file-based disks [%s]" %
1174
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1175
  metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1176

    
1177
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1178
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1179
                                   action="store_false", default=True)
1180

    
1181
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1182
                                    help="Don't initialize SSH keys",
1183
                                    action="store_false", default=True)
1184

    
1185
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1186
                             help="Enable parseable error messages",
1187
                             action="store_true", default=False)
1188

    
1189
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1190
                          help="Skip N+1 memory redundancy tests",
1191
                          action="store_true", default=False)
1192

    
1193
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1194
                             help="Type of reboot: soft/hard/full",
1195
                             default=constants.INSTANCE_REBOOT_HARD,
1196
                             metavar="<REBOOT>",
1197
                             choices=list(constants.REBOOT_TYPES))
1198

    
1199
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1200
                                    dest="ignore_secondaries",
1201
                                    default=False, action="store_true",
1202
                                    help="Ignore errors from secondaries")
1203

    
1204
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1205
                            action="store_false", default=True,
1206
                            help="Don't shutdown the instance (unsafe)")
1207

    
1208
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1209
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1210
                         help="Maximum time to wait")
1211

    
1212
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1213
                                  dest="shutdown_timeout", type="int",
1214
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1215
                                  help="Maximum time to wait for instance"
1216
                                  " shutdown")
1217

    
1218
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1219
                          default=None,
1220
                          help=("Number of seconds between repetions of the"
1221
                                " command"))
1222

    
1223
EARLY_RELEASE_OPT = cli_option("--early-release",
1224
                               dest="early_release", default=False,
1225
                               action="store_true",
1226
                               help="Release the locks on the secondary"
1227
                               " node(s) early")
1228

    
1229
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1230
                                  dest="new_cluster_cert",
1231
                                  default=False, action="store_true",
1232
                                  help="Generate a new cluster certificate")
1233

    
1234
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1235
                           default=None,
1236
                           help="File containing new RAPI certificate")
1237

    
1238
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1239
                               default=None, action="store_true",
1240
                               help=("Generate a new self-signed RAPI"
1241
                                     " certificate"))
1242

    
1243
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1244
                            default=None,
1245
                            help="File containing new SPICE certificate")
1246

    
1247
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1248
                              default=None,
1249
                              help="File containing the certificate of the CA"
1250
                              " which signed the SPICE certificate")
1251

    
1252
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1253
                                dest="new_spice_cert", default=None,
1254
                                action="store_true",
1255
                                help=("Generate a new self-signed SPICE"
1256
                                      " certificate"))
1257

    
1258
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1259
                                    dest="new_confd_hmac_key",
1260
                                    default=False, action="store_true",
1261
                                    help=("Create a new HMAC key for %s" %
1262
                                          constants.CONFD))
1263

    
1264
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1265
                                       dest="cluster_domain_secret",
1266
                                       default=None,
1267
                                       help=("Load new new cluster domain"
1268
                                             " secret from file"))
1269

    
1270
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1271
                                           dest="new_cluster_domain_secret",
1272
                                           default=False, action="store_true",
1273
                                           help=("Create a new cluster domain"
1274
                                                 " secret"))
1275

    
1276
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1277
                              dest="use_replication_network",
1278
                              help="Whether to use the replication network"
1279
                              " for talking to the nodes",
1280
                              action="store_true", default=False)
1281

    
1282
MAINTAIN_NODE_HEALTH_OPT = \
1283
    cli_option("--maintain-node-health", dest="maintain_node_health",
1284
               metavar=_YORNO, default=None, type="bool",
1285
               help="Configure the cluster to automatically maintain node"
1286
               " health, by shutting down unknown instances, shutting down"
1287
               " unknown DRBD devices, etc.")
1288

    
1289
IDENTIFY_DEFAULTS_OPT = \
1290
    cli_option("--identify-defaults", dest="identify_defaults",
1291
               default=False, action="store_true",
1292
               help="Identify which saved instance parameters are equal to"
1293
               " the current cluster defaults and set them as such, instead"
1294
               " of marking them as overridden")
1295

    
1296
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1297
                         action="store", dest="uid_pool",
1298
                         help=("A list of user-ids or user-id"
1299
                               " ranges separated by commas"))
1300

    
1301
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1302
                          action="store", dest="add_uids",
1303
                          help=("A list of user-ids or user-id"
1304
                                " ranges separated by commas, to be"
1305
                                " added to the user-id pool"))
1306

    
1307
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1308
                             action="store", dest="remove_uids",
1309
                             help=("A list of user-ids or user-id"
1310
                                   " ranges separated by commas, to be"
1311
                                   " removed from the user-id pool"))
1312

    
1313
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1314
                              action="store", dest="reserved_lvs",
1315
                              help=("A comma-separated list of reserved"
1316
                                    " logical volumes names, that will be"
1317
                                    " ignored by cluster verify"))
1318

    
1319
ROMAN_OPT = cli_option("--roman",
1320
                       dest="roman_integers", default=False,
1321
                       action="store_true",
1322
                       help="Use roman numbers for positive integers")
1323

    
1324
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1325
                             action="store", default=None,
1326
                             help="Specifies usermode helper for DRBD")
1327

    
1328
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1329
                                action="store_false", default=True,
1330
                                help="Disable support for DRBD")
1331

    
1332
PRIMARY_IP_VERSION_OPT = \
1333
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1334
               action="store", dest="primary_ip_version",
1335
               metavar="%d|%d" % (constants.IP4_VERSION,
1336
                                  constants.IP6_VERSION),
1337
               help="Cluster-wide IP version for primary IP")
1338

    
1339
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1340
                              action="store_true",
1341
                              help="Show machine name for every line in output")
1342

    
1343

    
1344
def _PriorityOptionCb(option, _, value, parser):
1345
  """Callback for processing C{--priority} option.
1346

1347
  """
1348
  value = _PRIONAME_TO_VALUE[value]
1349

    
1350
  setattr(parser.values, option.dest, value)
1351

    
1352

    
1353
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1354
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1355
                          choices=_PRIONAME_TO_VALUE.keys(),
1356
                          action="callback", type="choice",
1357
                          callback=_PriorityOptionCb,
1358
                          help="Priority for opcode processing")
1359

    
1360
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1361
                        type="bool", default=None, metavar=_YORNO,
1362
                        help="Sets the hidden flag on the OS")
1363

    
1364
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1365
                        type="bool", default=None, metavar=_YORNO,
1366
                        help="Sets the blacklisted flag on the OS")
1367

    
1368
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1369
                                     type="bool", metavar=_YORNO,
1370
                                     dest="prealloc_wipe_disks",
1371
                                     help=("Wipe disks prior to instance"
1372
                                           " creation"))
1373

    
1374
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1375
                             type="keyval", default=None,
1376
                             help="Node parameters")
1377

    
1378
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1379
                              action="store", metavar="POLICY", default=None,
1380
                              help="Allocation policy for the node group")
1381

    
1382
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1383
                              type="bool", metavar=_YORNO,
1384
                              dest="node_powered",
1385
                              help="Specify if the SoR for node is powered")
1386

    
1387
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1388
                             default=constants.OOB_TIMEOUT,
1389
                             help="Maximum time to wait for out-of-band helper")
1390

    
1391
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1392
                             default=constants.OOB_POWER_DELAY,
1393
                             help="Time in seconds to wait between power-ons")
1394

    
1395
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1396
                              action="store_true", default=False,
1397
                              help=("Whether command argument should be treated"
1398
                                    " as filter"))
1399

    
1400
NO_REMEMBER_OPT = cli_option("--no-remember",
1401
                             dest="no_remember",
1402
                             action="store_true", default=False,
1403
                             help="Perform but do not record the change"
1404
                             " in the configuration")
1405

    
1406
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1407
                              default=False, action="store_true",
1408
                              help="Evacuate primary instances only")
1409

    
1410
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1411
                                default=False, action="store_true",
1412
                                help="Evacuate secondary instances only"
1413
                                     " (applies only to internally mirrored"
1414
                                     " disk templates, e.g. %s)" %
1415
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1416

    
1417
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1418
                                action="store_true", default=False,
1419
                                help="Pause instance at startup")
1420

    
1421
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1422
                          help="Destination node group (name or uuid)",
1423
                          default=None, action="append",
1424
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1425

    
1426
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1427
                               action="append", dest="ignore_errors",
1428
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1429
                               help="Error code to be ignored")
1430

    
1431
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1432
                            action="append",
1433
                            help=("Specify disk state information in the"
1434
                                  " format"
1435
                                  " storage_type/identifier:option=value,...;"
1436
                                  " note this is unused for now"),
1437
                            type="identkeyval")
1438

    
1439
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1440
                          action="append",
1441
                          help=("Specify hypervisor state information in the"
1442
                                " format hypervisor:option=value,...;"
1443
                                " note this is unused for now"),
1444
                          type="identkeyval")
1445

    
1446
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1447
                                action="store_true", default=False,
1448
                                help="Ignore instance policy violations")
1449

    
1450
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1451
                             help="Sets the instance's runtime memory,"
1452
                             " ballooning it up or down to the new value",
1453
                             default=None, type="unit", metavar="<size>")
1454

    
1455
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1456
                          action="store_true", default=False,
1457
                          help="Marks the grow as absolute instead of the"
1458
                          " (default) relative mode")
1459

    
1460
#: Options provided by all commands
1461
COMMON_OPTS = [DEBUG_OPT]
1462

    
1463
# common options for creating instances. add and import then add their own
1464
# specific ones.
1465
COMMON_CREATE_OPTS = [
1466
  BACKEND_OPT,
1467
  DISK_OPT,
1468
  DISK_TEMPLATE_OPT,
1469
  FILESTORE_DIR_OPT,
1470
  FILESTORE_DRIVER_OPT,
1471
  HYPERVISOR_OPT,
1472
  IALLOCATOR_OPT,
1473
  NET_OPT,
1474
  NODE_PLACEMENT_OPT,
1475
  NOIPCHECK_OPT,
1476
  NONAMECHECK_OPT,
1477
  NONICS_OPT,
1478
  NWSYNC_OPT,
1479
  OSPARAMS_OPT,
1480
  OS_SIZE_OPT,
1481
  SUBMIT_OPT,
1482
  TAG_ADD_OPT,
1483
  DRY_RUN_OPT,
1484
  PRIORITY_OPT,
1485
  ]
1486

    
1487
# common instance policy options
1488
INSTANCE_POLICY_OPTS = [
1489
  SPECS_CPU_COUNT_OPT,
1490
  SPECS_DISK_COUNT_OPT,
1491
  SPECS_DISK_SIZE_OPT,
1492
  SPECS_MEM_SIZE_OPT,
1493
  SPECS_NIC_COUNT_OPT,
1494
  IPOLICY_DISK_TEMPLATES,
1495
  IPOLICY_VCPU_RATIO,
1496
  IPOLICY_SPINDLE_RATIO,
1497
  ]
1498

    
1499

    
1500
class _ShowUsage(Exception):
1501
  """Exception class for L{_ParseArgs}.
1502

1503
  """
1504
  def __init__(self, exit_error):
1505
    """Initializes instances of this class.
1506

1507
    @type exit_error: bool
1508
    @param exit_error: Whether to report failure on exit
1509

1510
    """
1511
    Exception.__init__(self)
1512
    self.exit_error = exit_error
1513

    
1514

    
1515
class _ShowVersion(Exception):
1516
  """Exception class for L{_ParseArgs}.
1517

1518
  """
1519

    
1520

    
1521
def _ParseArgs(binary, argv, commands, aliases, env_override):
1522
  """Parser for the command line arguments.
1523

1524
  This function parses the arguments and returns the function which
1525
  must be executed together with its (modified) arguments.
1526

1527
  @param binary: Script name
1528
  @param argv: Command line arguments
1529
  @param commands: Dictionary containing command definitions
1530
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1531
  @param env_override: list of env variables allowed for default args
1532
  @raise _ShowUsage: If usage description should be shown
1533
  @raise _ShowVersion: If version should be shown
1534

1535
  """
1536
  assert not (env_override - set(commands))
1537
  assert not (set(aliases.keys()) & set(commands.keys()))
1538

    
1539
  if len(argv) > 1:
1540
    cmd = argv[1]
1541
  else:
1542
    # No option or command given
1543
    raise _ShowUsage(exit_error=True)
1544

    
1545
  if cmd == "--version":
1546
    raise _ShowVersion()
1547
  elif cmd == "--help":
1548
    raise _ShowUsage(exit_error=False)
1549
  elif not (cmd in commands or cmd in aliases):
1550
    raise _ShowUsage(exit_error=True)
1551

    
1552
  # get command, unalias it, and look it up in commands
1553
  if cmd in aliases:
1554
    if aliases[cmd] not in commands:
1555
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1556
                                   " command '%s'" % (cmd, aliases[cmd]))
1557

    
1558
    cmd = aliases[cmd]
1559

    
1560
  if cmd in env_override:
1561
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1562
    env_args = os.environ.get(args_env_name)
1563
    if env_args:
1564
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1565

    
1566
  func, args_def, parser_opts, usage, description = commands[cmd]
1567
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1568
                        description=description,
1569
                        formatter=TitledHelpFormatter(),
1570
                        usage="%%prog %s %s" % (cmd, usage))
1571
  parser.disable_interspersed_args()
1572
  options, args = parser.parse_args(args=argv[2:])
1573

    
1574
  if not _CheckArguments(cmd, args_def, args):
1575
    return None, None, None
1576

    
1577
  return func, options, args
1578

    
1579

    
1580
def _FormatUsage(binary, commands):
1581
  """Generates a nice description of all commands.
1582

1583
  @param binary: Script name
1584
  @param commands: Dictionary containing command definitions
1585

1586
  """
1587
  # compute the max line length for cmd + usage
1588
  mlen = min(60, max(map(len, commands)))
1589

    
1590
  yield "Usage: %s {command} [options...] [argument...]" % binary
1591
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1592
  yield ""
1593
  yield "Commands:"
1594

    
1595
  # and format a nice command list
1596
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1597
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1598
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1599
    for line in help_lines:
1600
      yield " %-*s   %s" % (mlen, "", line)
1601

    
1602
  yield ""
1603

    
1604

    
1605
def _CheckArguments(cmd, args_def, args):
1606
  """Verifies the arguments using the argument definition.
1607

1608
  Algorithm:
1609

1610
    1. Abort with error if values specified by user but none expected.
1611

1612
    1. For each argument in definition
1613

1614
      1. Keep running count of minimum number of values (min_count)
1615
      1. Keep running count of maximum number of values (max_count)
1616
      1. If it has an unlimited number of values
1617

1618
        1. Abort with error if it's not the last argument in the definition
1619

1620
    1. If last argument has limited number of values
1621

1622
      1. Abort with error if number of values doesn't match or is too large
1623

1624
    1. Abort with error if user didn't pass enough values (min_count)
1625

1626
  """
1627
  if args and not args_def:
1628
    ToStderr("Error: Command %s expects no arguments", cmd)
1629
    return False
1630

    
1631
  min_count = None
1632
  max_count = None
1633
  check_max = None
1634

    
1635
  last_idx = len(args_def) - 1
1636

    
1637
  for idx, arg in enumerate(args_def):
1638
    if min_count is None:
1639
      min_count = arg.min
1640
    elif arg.min is not None:
1641
      min_count += arg.min
1642

    
1643
    if max_count is None:
1644
      max_count = arg.max
1645
    elif arg.max is not None:
1646
      max_count += arg.max
1647

    
1648
    if idx == last_idx:
1649
      check_max = (arg.max is not None)
1650

    
1651
    elif arg.max is None:
1652
      raise errors.ProgrammerError("Only the last argument can have max=None")
1653

    
1654
  if check_max:
1655
    # Command with exact number of arguments
1656
    if (min_count is not None and max_count is not None and
1657
        min_count == max_count and len(args) != min_count):
1658
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1659
      return False
1660

    
1661
    # Command with limited number of arguments
1662
    if max_count is not None and len(args) > max_count:
1663
      ToStderr("Error: Command %s expects only %d argument(s)",
1664
               cmd, max_count)
1665
      return False
1666

    
1667
  # Command with some required arguments
1668
  if min_count is not None and len(args) < min_count:
1669
    ToStderr("Error: Command %s expects at least %d argument(s)",
1670
             cmd, min_count)
1671
    return False
1672

    
1673
  return True
1674

    
1675

    
1676
def SplitNodeOption(value):
1677
  """Splits the value of a --node option.
1678

1679
  """
1680
  if value and ":" in value:
1681
    return value.split(":", 1)
1682
  else:
1683
    return (value, None)
1684

    
1685

    
1686
def CalculateOSNames(os_name, os_variants):
1687
  """Calculates all the names an OS can be called, according to its variants.
1688

1689
  @type os_name: string
1690
  @param os_name: base name of the os
1691
  @type os_variants: list or None
1692
  @param os_variants: list of supported variants
1693
  @rtype: list
1694
  @return: list of valid names
1695

1696
  """
1697
  if os_variants:
1698
    return ["%s+%s" % (os_name, v) for v in os_variants]
1699
  else:
1700
    return [os_name]
1701

    
1702

    
1703
def ParseFields(selected, default):
1704
  """Parses the values of "--field"-like options.
1705

1706
  @type selected: string or None
1707
  @param selected: User-selected options
1708
  @type default: list
1709
  @param default: Default fields
1710

1711
  """
1712
  if selected is None:
1713
    return default
1714

    
1715
  if selected.startswith("+"):
1716
    return default + selected[1:].split(",")
1717

    
1718
  return selected.split(",")
1719

    
1720

    
1721
UsesRPC = rpc.RunWithRPC
1722

    
1723

    
1724
def AskUser(text, choices=None):
1725
  """Ask the user a question.
1726

1727
  @param text: the question to ask
1728

1729
  @param choices: list with elements tuples (input_char, return_value,
1730
      description); if not given, it will default to: [('y', True,
1731
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1732
      note that the '?' char is reserved for help
1733

1734
  @return: one of the return values from the choices list; if input is
1735
      not possible (i.e. not running with a tty, we return the last
1736
      entry from the list
1737

1738
  """
1739
  if choices is None:
1740
    choices = [("y", True, "Perform the operation"),
1741
               ("n", False, "Do not perform the operation")]
1742
  if not choices or not isinstance(choices, list):
1743
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1744
  for entry in choices:
1745
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1746
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1747

    
1748
  answer = choices[-1][1]
1749
  new_text = []
1750
  for line in text.splitlines():
1751
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1752
  text = "\n".join(new_text)
1753
  try:
1754
    f = file("/dev/tty", "a+")
1755
  except IOError:
1756
    return answer
1757
  try:
1758
    chars = [entry[0] for entry in choices]
1759
    chars[-1] = "[%s]" % chars[-1]
1760
    chars.append("?")
1761
    maps = dict([(entry[0], entry[1]) for entry in choices])
1762
    while True:
1763
      f.write(text)
1764
      f.write("\n")
1765
      f.write("/".join(chars))
1766
      f.write(": ")
1767
      line = f.readline(2).strip().lower()
1768
      if line in maps:
1769
        answer = maps[line]
1770
        break
1771
      elif line == "?":
1772
        for entry in choices:
1773
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1774
        f.write("\n")
1775
        continue
1776
  finally:
1777
    f.close()
1778
  return answer
1779

    
1780

    
1781
class JobSubmittedException(Exception):
1782
  """Job was submitted, client should exit.
1783

1784
  This exception has one argument, the ID of the job that was
1785
  submitted. The handler should print this ID.
1786

1787
  This is not an error, just a structured way to exit from clients.
1788

1789
  """
1790

    
1791

    
1792
def SendJob(ops, cl=None):
1793
  """Function to submit an opcode without waiting for the results.
1794

1795
  @type ops: list
1796
  @param ops: list of opcodes
1797
  @type cl: luxi.Client
1798
  @param cl: the luxi client to use for communicating with the master;
1799
             if None, a new client will be created
1800

1801
  """
1802
  if cl is None:
1803
    cl = GetClient()
1804

    
1805
  job_id = cl.SubmitJob(ops)
1806

    
1807
  return job_id
1808

    
1809

    
1810
def GenericPollJob(job_id, cbs, report_cbs):
1811
  """Generic job-polling function.
1812

1813
  @type job_id: number
1814
  @param job_id: Job ID
1815
  @type cbs: Instance of L{JobPollCbBase}
1816
  @param cbs: Data callbacks
1817
  @type report_cbs: Instance of L{JobPollReportCbBase}
1818
  @param report_cbs: Reporting callbacks
1819

1820
  """
1821
  prev_job_info = None
1822
  prev_logmsg_serial = None
1823

    
1824
  status = None
1825

    
1826
  while True:
1827
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1828
                                      prev_logmsg_serial)
1829
    if not result:
1830
      # job not found, go away!
1831
      raise errors.JobLost("Job with id %s lost" % job_id)
1832

    
1833
    if result == constants.JOB_NOTCHANGED:
1834
      report_cbs.ReportNotChanged(job_id, status)
1835

    
1836
      # Wait again
1837
      continue
1838

    
1839
    # Split result, a tuple of (field values, log entries)
1840
    (job_info, log_entries) = result
1841
    (status, ) = job_info
1842

    
1843
    if log_entries:
1844
      for log_entry in log_entries:
1845
        (serial, timestamp, log_type, message) = log_entry
1846
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1847
                                    log_type, message)
1848
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1849

    
1850
    # TODO: Handle canceled and archived jobs
1851
    elif status in (constants.JOB_STATUS_SUCCESS,
1852
                    constants.JOB_STATUS_ERROR,
1853
                    constants.JOB_STATUS_CANCELING,
1854
                    constants.JOB_STATUS_CANCELED):
1855
      break
1856

    
1857
    prev_job_info = job_info
1858

    
1859
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1860
  if not jobs:
1861
    raise errors.JobLost("Job with id %s lost" % job_id)
1862

    
1863
  status, opstatus, result = jobs[0]
1864

    
1865
  if status == constants.JOB_STATUS_SUCCESS:
1866
    return result
1867

    
1868
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1869
    raise errors.OpExecError("Job was canceled")
1870

    
1871
  has_ok = False
1872
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1873
    if status == constants.OP_STATUS_SUCCESS:
1874
      has_ok = True
1875
    elif status == constants.OP_STATUS_ERROR:
1876
      errors.MaybeRaise(msg)
1877

    
1878
      if has_ok:
1879
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1880
                                 (idx, msg))
1881

    
1882
      raise errors.OpExecError(str(msg))
1883

    
1884
  # default failure mode
1885
  raise errors.OpExecError(result)
1886

    
1887

    
1888
class JobPollCbBase:
1889
  """Base class for L{GenericPollJob} callbacks.
1890

1891
  """
1892
  def __init__(self):
1893
    """Initializes this class.
1894

1895
    """
1896

    
1897
  def WaitForJobChangeOnce(self, job_id, fields,
1898
                           prev_job_info, prev_log_serial):
1899
    """Waits for changes on a job.
1900

1901
    """
1902
    raise NotImplementedError()
1903

    
1904
  def QueryJobs(self, job_ids, fields):
1905
    """Returns the selected fields for the selected job IDs.
1906

1907
    @type job_ids: list of numbers
1908
    @param job_ids: Job IDs
1909
    @type fields: list of strings
1910
    @param fields: Fields
1911

1912
    """
1913
    raise NotImplementedError()
1914

    
1915

    
1916
class JobPollReportCbBase:
1917
  """Base class for L{GenericPollJob} reporting callbacks.
1918

1919
  """
1920
  def __init__(self):
1921
    """Initializes this class.
1922

1923
    """
1924

    
1925
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1926
    """Handles a log message.
1927

1928
    """
1929
    raise NotImplementedError()
1930

    
1931
  def ReportNotChanged(self, job_id, status):
1932
    """Called for if a job hasn't changed in a while.
1933

1934
    @type job_id: number
1935
    @param job_id: Job ID
1936
    @type status: string or None
1937
    @param status: Job status if available
1938

1939
    """
1940
    raise NotImplementedError()
1941

    
1942

    
1943
class _LuxiJobPollCb(JobPollCbBase):
1944
  def __init__(self, cl):
1945
    """Initializes this class.
1946

1947
    """
1948
    JobPollCbBase.__init__(self)
1949
    self.cl = cl
1950

    
1951
  def WaitForJobChangeOnce(self, job_id, fields,
1952
                           prev_job_info, prev_log_serial):
1953
    """Waits for changes on a job.
1954

1955
    """
1956
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1957
                                        prev_job_info, prev_log_serial)
1958

    
1959
  def QueryJobs(self, job_ids, fields):
1960
    """Returns the selected fields for the selected job IDs.
1961

1962
    """
1963
    return self.cl.QueryJobs(job_ids, fields)
1964

    
1965

    
1966
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1967
  def __init__(self, feedback_fn):
1968
    """Initializes this class.
1969

1970
    """
1971
    JobPollReportCbBase.__init__(self)
1972

    
1973
    self.feedback_fn = feedback_fn
1974

    
1975
    assert callable(feedback_fn)
1976

    
1977
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1978
    """Handles a log message.
1979

1980
    """
1981
    self.feedback_fn((timestamp, log_type, log_msg))
1982

    
1983
  def ReportNotChanged(self, job_id, status):
1984
    """Called if a job hasn't changed in a while.
1985

1986
    """
1987
    # Ignore
1988

    
1989

    
1990
class StdioJobPollReportCb(JobPollReportCbBase):
1991
  def __init__(self):
1992
    """Initializes this class.
1993

1994
    """
1995
    JobPollReportCbBase.__init__(self)
1996

    
1997
    self.notified_queued = False
1998
    self.notified_waitlock = False
1999

    
2000
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2001
    """Handles a log message.
2002

2003
    """
2004
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2005
             FormatLogMessage(log_type, log_msg))
2006

    
2007
  def ReportNotChanged(self, job_id, status):
2008
    """Called if a job hasn't changed in a while.
2009

2010
    """
2011
    if status is None:
2012
      return
2013

    
2014
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2015
      ToStderr("Job %s is waiting in queue", job_id)
2016
      self.notified_queued = True
2017

    
2018
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2019
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2020
      self.notified_waitlock = True
2021

    
2022

    
2023
def FormatLogMessage(log_type, log_msg):
2024
  """Formats a job message according to its type.
2025

2026
  """
2027
  if log_type != constants.ELOG_MESSAGE:
2028
    log_msg = str(log_msg)
2029

    
2030
  return utils.SafeEncode(log_msg)
2031

    
2032

    
2033
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2034
  """Function to poll for the result of a job.
2035

2036
  @type job_id: job identified
2037
  @param job_id: the job to poll for results
2038
  @type cl: luxi.Client
2039
  @param cl: the luxi client to use for communicating with the master;
2040
             if None, a new client will be created
2041

2042
  """
2043
  if cl is None:
2044
    cl = GetClient()
2045

    
2046
  if reporter is None:
2047
    if feedback_fn:
2048
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2049
    else:
2050
      reporter = StdioJobPollReportCb()
2051
  elif feedback_fn:
2052
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2053

    
2054
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2055

    
2056

    
2057
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2058
  """Legacy function to submit an opcode.
2059

2060
  This is just a simple wrapper over the construction of the processor
2061
  instance. It should be extended to better handle feedback and
2062
  interaction functions.
2063

2064
  """
2065
  if cl is None:
2066
    cl = GetClient()
2067

    
2068
  SetGenericOpcodeOpts([op], opts)
2069

    
2070
  job_id = SendJob([op], cl=cl)
2071

    
2072
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2073
                       reporter=reporter)
2074

    
2075
  return op_results[0]
2076

    
2077

    
2078
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2079
  """Wrapper around SubmitOpCode or SendJob.
2080

2081
  This function will decide, based on the 'opts' parameter, whether to
2082
  submit and wait for the result of the opcode (and return it), or
2083
  whether to just send the job and print its identifier. It is used in
2084
  order to simplify the implementation of the '--submit' option.
2085

2086
  It will also process the opcodes if we're sending the via SendJob
2087
  (otherwise SubmitOpCode does it).
2088

2089
  """
2090
  if opts and opts.submit_only:
2091
    job = [op]
2092
    SetGenericOpcodeOpts(job, opts)
2093
    job_id = SendJob(job, cl=cl)
2094
    raise JobSubmittedException(job_id)
2095
  else:
2096
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2097

    
2098

    
2099
def SetGenericOpcodeOpts(opcode_list, options):
2100
  """Processor for generic options.
2101

2102
  This function updates the given opcodes based on generic command
2103
  line options (like debug, dry-run, etc.).
2104

2105
  @param opcode_list: list of opcodes
2106
  @param options: command line options or None
2107
  @return: None (in-place modification)
2108

2109
  """
2110
  if not options:
2111
    return
2112
  for op in opcode_list:
2113
    op.debug_level = options.debug
2114
    if hasattr(options, "dry_run"):
2115
      op.dry_run = options.dry_run
2116
    if getattr(options, "priority", None) is not None:
2117
      op.priority = options.priority
2118

    
2119

    
2120
def GetClient(query=False):
2121
  """Connects to the a luxi socket and returns a client.
2122

2123
  @type query: boolean
2124
  @param query: this signifies that the client will only be
2125
      used for queries; if the build-time parameter
2126
      enable-split-queries is enabled, then the client will be
2127
      connected to the query socket instead of the masterd socket
2128

2129
  """
2130
  if query and constants.ENABLE_SPLIT_QUERY:
2131
    address = pathutils.QUERY_SOCKET
2132
  else:
2133
    address = None
2134
  # TODO: Cache object?
2135
  try:
2136
    client = luxi.Client(address=address)
2137
  except luxi.NoMasterError:
2138
    ss = ssconf.SimpleStore()
2139

    
2140
    # Try to read ssconf file
2141
    try:
2142
      ss.GetMasterNode()
2143
    except errors.ConfigurationError:
2144
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2145
                                 " not part of a cluster",
2146
                                 errors.ECODE_INVAL)
2147

    
2148
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2149
    if master != myself:
2150
      raise errors.OpPrereqError("This is not the master node, please connect"
2151
                                 " to node '%s' and rerun the command" %
2152
                                 master, errors.ECODE_INVAL)
2153
    raise
2154
  return client
2155

    
2156

    
2157
def FormatError(err):
2158
  """Return a formatted error message for a given error.
2159

2160
  This function takes an exception instance and returns a tuple
2161
  consisting of two values: first, the recommended exit code, and
2162
  second, a string describing the error message (not
2163
  newline-terminated).
2164

2165
  """
2166
  retcode = 1
2167
  obuf = StringIO()
2168
  msg = str(err)
2169
  if isinstance(err, errors.ConfigurationError):
2170
    txt = "Corrupt configuration file: %s" % msg
2171
    logging.error(txt)
2172
    obuf.write(txt + "\n")
2173
    obuf.write("Aborting.")
2174
    retcode = 2
2175
  elif isinstance(err, errors.HooksAbort):
2176
    obuf.write("Failure: hooks execution failed:\n")
2177
    for node, script, out in err.args[0]:
2178
      if out:
2179
        obuf.write("  node: %s, script: %s, output: %s\n" %
2180
                   (node, script, out))
2181
      else:
2182
        obuf.write("  node: %s, script: %s (no output)\n" %
2183
                   (node, script))
2184
  elif isinstance(err, errors.HooksFailure):
2185
    obuf.write("Failure: hooks general failure: %s" % msg)
2186
  elif isinstance(err, errors.ResolverError):
2187
    this_host = netutils.Hostname.GetSysName()
2188
    if err.args[0] == this_host:
2189
      msg = "Failure: can't resolve my own hostname ('%s')"
2190
    else:
2191
      msg = "Failure: can't resolve hostname '%s'"
2192
    obuf.write(msg % err.args[0])
2193
  elif isinstance(err, errors.OpPrereqError):
2194
    if len(err.args) == 2:
2195
      obuf.write("Failure: prerequisites not met for this"
2196
                 " operation:\nerror type: %s, error details:\n%s" %
2197
                 (err.args[1], err.args[0]))
2198
    else:
2199
      obuf.write("Failure: prerequisites not met for this"
2200
                 " operation:\n%s" % msg)
2201
  elif isinstance(err, errors.OpExecError):
2202
    obuf.write("Failure: command execution error:\n%s" % msg)
2203
  elif isinstance(err, errors.TagError):
2204
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2205
  elif isinstance(err, errors.JobQueueDrainError):
2206
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2207
               " accept new requests\n")
2208
  elif isinstance(err, errors.JobQueueFull):
2209
    obuf.write("Failure: the job queue is full and doesn't accept new"
2210
               " job submissions until old jobs are archived\n")
2211
  elif isinstance(err, errors.TypeEnforcementError):
2212
    obuf.write("Parameter Error: %s" % msg)
2213
  elif isinstance(err, errors.ParameterError):
2214
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2215
  elif isinstance(err, luxi.NoMasterError):
2216
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
2217
               " and listening for connections?")
2218
  elif isinstance(err, luxi.TimeoutError):
2219
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2220
               " been submitted and will continue to run even if the call"
2221
               " timed out. Useful commands in this situation are \"gnt-job"
2222
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2223
    obuf.write(msg)
2224
  elif isinstance(err, luxi.PermissionError):
2225
    obuf.write("It seems you don't have permissions to connect to the"
2226
               " master daemon.\nPlease retry as a different user.")
2227
  elif isinstance(err, luxi.ProtocolError):
2228
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2229
               "%s" % msg)
2230
  elif isinstance(err, errors.JobLost):
2231
    obuf.write("Error checking job status: %s" % msg)
2232
  elif isinstance(err, errors.QueryFilterParseError):
2233
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2234
    obuf.write("\n".join(err.GetDetails()))
2235
  elif isinstance(err, errors.GenericError):
2236
    obuf.write("Unhandled Ganeti error: %s" % msg)
2237
  elif isinstance(err, JobSubmittedException):
2238
    obuf.write("JobID: %s\n" % err.args[0])
2239
    retcode = 0
2240
  else:
2241
    obuf.write("Unhandled exception: %s" % msg)
2242
  return retcode, obuf.getvalue().rstrip("\n")
2243

    
2244

    
2245
def GenericMain(commands, override=None, aliases=None,
2246
                env_override=frozenset()):
2247
  """Generic main function for all the gnt-* commands.
2248

2249
  @param commands: a dictionary with a special structure, see the design doc
2250
                   for command line handling.
2251
  @param override: if not None, we expect a dictionary with keys that will
2252
                   override command line options; this can be used to pass
2253
                   options from the scripts to generic functions
2254
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2255
  @param env_override: list of environment names which are allowed to submit
2256
                       default args for commands
2257

2258
  """
2259
  # save the program name and the entire command line for later logging
2260
  if sys.argv:
2261
    binary = os.path.basename(sys.argv[0])
2262
    if not binary:
2263
      binary = sys.argv[0]
2264

    
2265
    if len(sys.argv) >= 2:
2266
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2267
    else:
2268
      logname = binary
2269

    
2270
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2271
  else:
2272
    binary = "<unknown program>"
2273
    cmdline = "<unknown>"
2274

    
2275
  if aliases is None:
2276
    aliases = {}
2277

    
2278
  try:
2279
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2280
                                       env_override)
2281
  except _ShowVersion:
2282
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2283
             constants.RELEASE_VERSION)
2284
    return constants.EXIT_SUCCESS
2285
  except _ShowUsage, err:
2286
    for line in _FormatUsage(binary, commands):
2287
      ToStdout(line)
2288

    
2289
    if err.exit_error:
2290
      return constants.EXIT_FAILURE
2291
    else:
2292
      return constants.EXIT_SUCCESS
2293
  except errors.ParameterError, err:
2294
    result, err_msg = FormatError(err)
2295
    ToStderr(err_msg)
2296
    return 1
2297

    
2298
  if func is None: # parse error
2299
    return 1
2300

    
2301
  if override is not None:
2302
    for key, val in override.iteritems():
2303
      setattr(options, key, val)
2304

    
2305
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2306
                     stderr_logging=True)
2307

    
2308
  logging.info("Command line: %s", cmdline)
2309

    
2310
  try:
2311
    result = func(options, args)
2312
  except (errors.GenericError, luxi.ProtocolError,
2313
          JobSubmittedException), err:
2314
    result, err_msg = FormatError(err)
2315
    logging.exception("Error during command processing")
2316
    ToStderr(err_msg)
2317
  except KeyboardInterrupt:
2318
    result = constants.EXIT_FAILURE
2319
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2320
             " might have been submitted and"
2321
             " will continue to run in the background.")
2322
  except IOError, err:
2323
    if err.errno == errno.EPIPE:
2324
      # our terminal went away, we'll exit
2325
      sys.exit(constants.EXIT_FAILURE)
2326
    else:
2327
      raise
2328

    
2329
  return result
2330

    
2331

    
2332
def ParseNicOption(optvalue):
2333
  """Parses the value of the --net option(s).
2334

2335
  """
2336
  try:
2337
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2338
  except (TypeError, ValueError), err:
2339
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2340
                               errors.ECODE_INVAL)
2341

    
2342
  nics = [{}] * nic_max
2343
  for nidx, ndict in optvalue:
2344
    nidx = int(nidx)
2345

    
2346
    if not isinstance(ndict, dict):
2347
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2348
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2349

    
2350
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2351

    
2352
    nics[nidx] = ndict
2353

    
2354
  return nics
2355

    
2356

    
2357
def GenericInstanceCreate(mode, opts, args):
2358
  """Add an instance to the cluster via either creation or import.
2359

2360
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2361
  @param opts: the command line options selected by the user
2362
  @type args: list
2363
  @param args: should contain only one element, the new instance name
2364
  @rtype: int
2365
  @return: the desired exit code
2366

2367
  """
2368
  instance = args[0]
2369

    
2370
  (pnode, snode) = SplitNodeOption(opts.node)
2371

    
2372
  hypervisor = None
2373
  hvparams = {}
2374
  if opts.hypervisor:
2375
    hypervisor, hvparams = opts.hypervisor
2376

    
2377
  if opts.nics:
2378
    nics = ParseNicOption(opts.nics)
2379
  elif opts.no_nics:
2380
    # no nics
2381
    nics = []
2382
  elif mode == constants.INSTANCE_CREATE:
2383
    # default of one nic, all auto
2384
    nics = [{}]
2385
  else:
2386
    # mode == import
2387
    nics = []
2388

    
2389
  if opts.disk_template == constants.DT_DISKLESS:
2390
    if opts.disks or opts.sd_size is not None:
2391
      raise errors.OpPrereqError("Diskless instance but disk"
2392
                                 " information passed", errors.ECODE_INVAL)
2393
    disks = []
2394
  else:
2395
    if (not opts.disks and not opts.sd_size
2396
        and mode == constants.INSTANCE_CREATE):
2397
      raise errors.OpPrereqError("No disk information specified",
2398
                                 errors.ECODE_INVAL)
2399
    if opts.disks and opts.sd_size is not None:
2400
      raise errors.OpPrereqError("Please use either the '--disk' or"
2401
                                 " '-s' option", errors.ECODE_INVAL)
2402
    if opts.sd_size is not None:
2403
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2404

    
2405
    if opts.disks:
2406
      try:
2407
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2408
      except ValueError, err:
2409
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2410
                                   errors.ECODE_INVAL)
2411
      disks = [{}] * disk_max
2412
    else:
2413
      disks = []
2414
    for didx, ddict in opts.disks:
2415
      didx = int(didx)
2416
      if not isinstance(ddict, dict):
2417
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2418
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2419
      elif constants.IDISK_SIZE in ddict:
2420
        if constants.IDISK_ADOPT in ddict:
2421
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2422
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2423
        try:
2424
          ddict[constants.IDISK_SIZE] = \
2425
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2426
        except ValueError, err:
2427
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2428
                                     (didx, err), errors.ECODE_INVAL)
2429
      elif constants.IDISK_ADOPT in ddict:
2430
        if mode == constants.INSTANCE_IMPORT:
2431
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2432
                                     " import", errors.ECODE_INVAL)
2433
        ddict[constants.IDISK_SIZE] = 0
2434
      else:
2435
        raise errors.OpPrereqError("Missing size or adoption source for"
2436
                                   " disk %d" % didx, errors.ECODE_INVAL)
2437
      disks[didx] = ddict
2438

    
2439
  if opts.tags is not None:
2440
    tags = opts.tags.split(",")
2441
  else:
2442
    tags = []
2443

    
2444
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2445
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2446

    
2447
  if mode == constants.INSTANCE_CREATE:
2448
    start = opts.start
2449
    os_type = opts.os
2450
    force_variant = opts.force_variant
2451
    src_node = None
2452
    src_path = None
2453
    no_install = opts.no_install
2454
    identify_defaults = False
2455
  elif mode == constants.INSTANCE_IMPORT:
2456
    start = False
2457
    os_type = None
2458
    force_variant = False
2459
    src_node = opts.src_node
2460
    src_path = opts.src_dir
2461
    no_install = None
2462
    identify_defaults = opts.identify_defaults
2463
  else:
2464
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2465

    
2466
  op = opcodes.OpInstanceCreate(instance_name=instance,
2467
                                disks=disks,
2468
                                disk_template=opts.disk_template,
2469
                                nics=nics,
2470
                                conflicts_check=opts.conflicts_check,
2471
                                pnode=pnode, snode=snode,
2472
                                ip_check=opts.ip_check,
2473
                                name_check=opts.name_check,
2474
                                wait_for_sync=opts.wait_for_sync,
2475
                                file_storage_dir=opts.file_storage_dir,
2476
                                file_driver=opts.file_driver,
2477
                                iallocator=opts.iallocator,
2478
                                hypervisor=hypervisor,
2479
                                hvparams=hvparams,
2480
                                beparams=opts.beparams,
2481
                                osparams=opts.osparams,
2482
                                mode=mode,
2483
                                start=start,
2484
                                os_type=os_type,
2485
                                force_variant=force_variant,
2486
                                src_node=src_node,
2487
                                src_path=src_path,
2488
                                tags=tags,
2489
                                no_install=no_install,
2490
                                identify_defaults=identify_defaults,
2491
                                ignore_ipolicy=opts.ignore_ipolicy)
2492

    
2493
  SubmitOrSend(op, opts)
2494
  return 0
2495

    
2496

    
2497
class _RunWhileClusterStoppedHelper:
2498
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2499

2500
  """
2501
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2502
    """Initializes this class.
2503

2504
    @type feedback_fn: callable
2505
    @param feedback_fn: Feedback function
2506
    @type cluster_name: string
2507
    @param cluster_name: Cluster name
2508
    @type master_node: string
2509
    @param master_node Master node name
2510
    @type online_nodes: list
2511
    @param online_nodes: List of names of online nodes
2512

2513
    """
2514
    self.feedback_fn = feedback_fn
2515
    self.cluster_name = cluster_name
2516
    self.master_node = master_node
2517
    self.online_nodes = online_nodes
2518

    
2519
    self.ssh = ssh.SshRunner(self.cluster_name)
2520

    
2521
    self.nonmaster_nodes = [name for name in online_nodes
2522
                            if name != master_node]
2523

    
2524
    assert self.master_node not in self.nonmaster_nodes
2525

    
2526
  def _RunCmd(self, node_name, cmd):
2527
    """Runs a command on the local or a remote machine.
2528

2529
    @type node_name: string
2530
    @param node_name: Machine name
2531
    @type cmd: list
2532
    @param cmd: Command
2533

2534
    """
2535
    if node_name is None or node_name == self.master_node:
2536
      # No need to use SSH
2537
      result = utils.RunCmd(cmd)
2538
    else:
2539
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2540
                            utils.ShellQuoteArgs(cmd))
2541

    
2542
    if result.failed:
2543
      errmsg = ["Failed to run command %s" % result.cmd]
2544
      if node_name:
2545
        errmsg.append("on node %s" % node_name)
2546
      errmsg.append(": exitcode %s and error %s" %
2547
                    (result.exit_code, result.output))
2548
      raise errors.OpExecError(" ".join(errmsg))
2549

    
2550
  def Call(self, fn, *args):
2551
    """Call function while all daemons are stopped.
2552

2553
    @type fn: callable
2554
    @param fn: Function to be called
2555

2556
    """
2557
    # Pause watcher by acquiring an exclusive lock on watcher state file
2558
    self.feedback_fn("Blocking watcher")
2559
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2560
    try:
2561
      # TODO: Currently, this just blocks. There's no timeout.
2562
      # TODO: Should it be a shared lock?
2563
      watcher_block.Exclusive(blocking=True)
2564

    
2565
      # Stop master daemons, so that no new jobs can come in and all running
2566
      # ones are finished
2567
      self.feedback_fn("Stopping master daemons")
2568
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2569
      try:
2570
        # Stop daemons on all nodes
2571
        for node_name in self.online_nodes:
2572
          self.feedback_fn("Stopping daemons on %s" % node_name)
2573
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2574

    
2575
        # All daemons are shut down now
2576
        try:
2577
          return fn(self, *args)
2578
        except Exception, err:
2579
          _, errmsg = FormatError(err)
2580
          logging.exception("Caught exception")
2581
          self.feedback_fn(errmsg)
2582
          raise
2583
      finally:
2584
        # Start cluster again, master node last
2585
        for node_name in self.nonmaster_nodes + [self.master_node]:
2586
          self.feedback_fn("Starting daemons on %s" % node_name)
2587
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2588
    finally:
2589
      # Resume watcher
2590
      watcher_block.Close()
2591

    
2592

    
2593
def RunWhileClusterStopped(feedback_fn, fn, *args):
2594
  """Calls a function while all cluster daemons are stopped.
2595

2596
  @type feedback_fn: callable
2597
  @param feedback_fn: Feedback function
2598
  @type fn: callable
2599
  @param fn: Function to be called when daemons are stopped
2600

2601
  """
2602
  feedback_fn("Gathering cluster information")
2603

    
2604
  # This ensures we're running on the master daemon
2605
  cl = GetClient()
2606

    
2607
  (cluster_name, master_node) = \
2608
    cl.QueryConfigValues(["cluster_name", "master_node"])
2609

    
2610
  online_nodes = GetOnlineNodes([], cl=cl)
2611

    
2612
  # Don't keep a reference to the client. The master daemon will go away.
2613
  del cl
2614

    
2615
  assert master_node in online_nodes
2616

    
2617
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2618
                                       online_nodes).Call(fn, *args)
2619

    
2620

    
2621
def GenerateTable(headers, fields, separator, data,
2622
                  numfields=None, unitfields=None,
2623
                  units=None):
2624
  """Prints a table with headers and different fields.
2625

2626
  @type headers: dict
2627
  @param headers: dictionary mapping field names to headers for
2628
      the table
2629
  @type fields: list
2630
  @param fields: the field names corresponding to each row in
2631
      the data field
2632
  @param separator: the separator to be used; if this is None,
2633
      the default 'smart' algorithm is used which computes optimal
2634
      field width, otherwise just the separator is used between
2635
      each field
2636
  @type data: list
2637
  @param data: a list of lists, each sublist being one row to be output
2638
  @type numfields: list
2639
  @param numfields: a list with the fields that hold numeric
2640
      values and thus should be right-aligned
2641
  @type unitfields: list
2642
  @param unitfields: a list with the fields that hold numeric
2643
      values that should be formatted with the units field
2644
  @type units: string or None
2645
  @param units: the units we should use for formatting, or None for
2646
      automatic choice (human-readable for non-separator usage, otherwise
2647
      megabytes); this is a one-letter string
2648

2649
  """
2650
  if units is None:
2651
    if separator:
2652
      units = "m"
2653
    else:
2654
      units = "h"
2655

    
2656
  if numfields is None:
2657
    numfields = []
2658
  if unitfields is None:
2659
    unitfields = []
2660

    
2661
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2662
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2663

    
2664
  format_fields = []
2665
  for field in fields:
2666
    if headers and field not in headers:
2667
      # TODO: handle better unknown fields (either revert to old
2668
      # style of raising exception, or deal more intelligently with
2669
      # variable fields)
2670
      headers[field] = field
2671
    if separator is not None:
2672
      format_fields.append("%s")
2673
    elif numfields.Matches(field):
2674
      format_fields.append("%*s")
2675
    else:
2676
      format_fields.append("%-*s")
2677

    
2678
  if separator is None:
2679
    mlens = [0 for name in fields]
2680
    format_str = " ".join(format_fields)
2681
  else:
2682
    format_str = separator.replace("%", "%%").join(format_fields)
2683

    
2684
  for row in data:
2685
    if row is None:
2686
      continue
2687
    for idx, val in enumerate(row):
2688
      if unitfields.Matches(fields[idx]):
2689
        try:
2690
          val = int(val)
2691
        except (TypeError, ValueError):
2692
          pass
2693
        else:
2694
          val = row[idx] = utils.FormatUnit(val, units)
2695
      val = row[idx] = str(val)
2696
      if separator is None:
2697
        mlens[idx] = max(mlens[idx], len(val))
2698

    
2699
  result = []
2700
  if headers:
2701
    args = []
2702
    for idx, name in enumerate(fields):
2703
      hdr = headers[name]
2704
      if separator is None:
2705
        mlens[idx] = max(mlens[idx], len(hdr))
2706
        args.append(mlens[idx])
2707
      args.append(hdr)
2708
    result.append(format_str % tuple(args))
2709

    
2710
  if separator is None:
2711
    assert len(mlens) == len(fields)
2712

    
2713
    if fields and not numfields.Matches(fields[-1]):
2714
      mlens[-1] = 0
2715

    
2716
  for line in data:
2717
    args = []
2718
    if line is None:
2719
      line = ["-" for _ in fields]
2720
    for idx in range(len(fields)):
2721
      if separator is None:
2722
        args.append(mlens[idx])
2723
      args.append(line[idx])
2724
    result.append(format_str % tuple(args))
2725

    
2726
  return result
2727

    
2728

    
2729
def _FormatBool(value):
2730
  """Formats a boolean value as a string.
2731

2732
  """
2733
  if value:
2734
    return "Y"
2735
  return "N"
2736

    
2737

    
2738
#: Default formatting for query results; (callback, align right)
2739
_DEFAULT_FORMAT_QUERY = {
2740
  constants.QFT_TEXT: (str, False),
2741
  constants.QFT_BOOL: (_FormatBool, False),
2742
  constants.QFT_NUMBER: (str, True),
2743
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2744
  constants.QFT_OTHER: (str, False),
2745
  constants.QFT_UNKNOWN: (str, False),
2746
  }
2747

    
2748

    
2749
def _GetColumnFormatter(fdef, override, unit):
2750
  """Returns formatting function for a field.
2751

2752
  @type fdef: L{objects.QueryFieldDefinition}
2753
  @type override: dict
2754
  @param override: Dictionary for overriding field formatting functions,
2755
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2756
  @type unit: string
2757
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2758
  @rtype: tuple; (callable, bool)
2759
  @return: Returns the function to format a value (takes one parameter) and a
2760
    boolean for aligning the value on the right-hand side
2761

2762
  """
2763
  fmt = override.get(fdef.name, None)
2764
  if fmt is not None:
2765
    return fmt
2766

    
2767
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2768

    
2769
  if fdef.kind == constants.QFT_UNIT:
2770
    # Can't keep this information in the static dictionary
2771
    return (lambda value: utils.FormatUnit(value, unit), True)
2772

    
2773
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2774
  if fmt is not None:
2775
    return fmt
2776

    
2777
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2778

    
2779

    
2780
class _QueryColumnFormatter:
2781
  """Callable class for formatting fields of a query.
2782

2783
  """
2784
  def __init__(self, fn, status_fn, verbose):
2785
    """Initializes this class.
2786

2787
    @type fn: callable
2788
    @param fn: Formatting function
2789
    @type status_fn: callable
2790
    @param status_fn: Function to report fields' status
2791
    @type verbose: boolean
2792
    @param verbose: whether to use verbose field descriptions or not
2793

2794
    """
2795
    self._fn = fn
2796
    self._status_fn = status_fn
2797
    self._verbose = verbose
2798

    
2799
  def __call__(self, data):
2800
    """Returns a field's string representation.
2801

2802
    """
2803
    (status, value) = data
2804

    
2805
    # Report status
2806
    self._status_fn(status)
2807

    
2808
    if status == constants.RS_NORMAL:
2809
      return self._fn(value)
2810

    
2811
    assert value is None, \
2812
           "Found value %r for abnormal status %s" % (value, status)
2813

    
2814
    return FormatResultError(status, self._verbose)
2815

    
2816

    
2817
def FormatResultError(status, verbose):
2818
  """Formats result status other than L{constants.RS_NORMAL}.
2819

2820
  @param status: The result status
2821
  @type verbose: boolean
2822
  @param verbose: Whether to return the verbose text
2823
  @return: Text of result status
2824

2825
  """
2826
  assert status != constants.RS_NORMAL, \
2827
         "FormatResultError called with status equal to constants.RS_NORMAL"
2828
  try:
2829
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2830
  except KeyError:
2831
    raise NotImplementedError("Unknown status %s" % status)
2832
  else:
2833
    if verbose:
2834
      return verbose_text
2835
    return normal_text
2836

    
2837

    
2838
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2839
                      header=False, verbose=False):
2840
  """Formats data in L{objects.QueryResponse}.
2841

2842
  @type result: L{objects.QueryResponse}
2843
  @param result: result of query operation
2844
  @type unit: string
2845
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2846
    see L{utils.text.FormatUnit}
2847
  @type format_override: dict
2848
  @param format_override: Dictionary for overriding field formatting functions,
2849
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2850
  @type separator: string or None
2851
  @param separator: String used to separate fields
2852
  @type header: bool
2853
  @param header: Whether to output header row
2854
  @type verbose: boolean
2855
  @param verbose: whether to use verbose field descriptions or not
2856

2857
  """
2858
  if unit is None:
2859
    if separator:
2860
      unit = "m"
2861
    else:
2862
      unit = "h"
2863

    
2864
  if format_override is None:
2865
    format_override = {}
2866

    
2867
  stats = dict.fromkeys(constants.RS_ALL, 0)
2868

    
2869
  def _RecordStatus(status):
2870
    if status in stats:
2871
      stats[status] += 1
2872

    
2873
  columns = []
2874
  for fdef in result.fields:
2875
    assert fdef.title and fdef.name
2876
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2877
    columns.append(TableColumn(fdef.title,
2878
                               _QueryColumnFormatter(fn, _RecordStatus,
2879
                                                     verbose),
2880
                               align_right))
2881

    
2882
  table = FormatTable(result.data, columns, header, separator)
2883

    
2884
  # Collect statistics
2885
  assert len(stats) == len(constants.RS_ALL)
2886
  assert compat.all(count >= 0 for count in stats.values())
2887

    
2888
  # Determine overall status. If there was no data, unknown fields must be
2889
  # detected via the field definitions.
2890
  if (stats[constants.RS_UNKNOWN] or
2891
      (not result.data and _GetUnknownFields(result.fields))):
2892
    status = QR_UNKNOWN
2893
  elif compat.any(count > 0 for key, count in stats.items()
2894
                  if key != constants.RS_NORMAL):
2895
    status = QR_INCOMPLETE
2896
  else:
2897
    status = QR_NORMAL
2898

    
2899
  return (status, table)
2900

    
2901

    
2902
def _GetUnknownFields(fdefs):
2903
  """Returns list of unknown fields included in C{fdefs}.
2904

2905
  @type fdefs: list of L{objects.QueryFieldDefinition}
2906

2907
  """
2908
  return [fdef for fdef in fdefs
2909
          if fdef.kind == constants.QFT_UNKNOWN]
2910

    
2911

    
2912
def _WarnUnknownFields(fdefs):
2913
  """Prints a warning to stderr if a query included unknown fields.
2914

2915
  @type fdefs: list of L{objects.QueryFieldDefinition}
2916

2917
  """
2918
  unknown = _GetUnknownFields(fdefs)
2919
  if unknown:
2920
    ToStderr("Warning: Queried for unknown fields %s",
2921
             utils.CommaJoin(fdef.name for fdef in unknown))
2922
    return True
2923

    
2924
  return False
2925

    
2926

    
2927
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2928
                format_override=None, verbose=False, force_filter=False,
2929
                namefield=None, qfilter=None, isnumeric=False):
2930
  """Generic implementation for listing all items of a resource.
2931

2932
  @param resource: One of L{constants.QR_VIA_LUXI}
2933
  @type fields: list of strings
2934
  @param fields: List of fields to query for
2935
  @type names: list of strings
2936
  @param names: Names of items to query for
2937
  @type unit: string or None
2938
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2939
    None for automatic choice (human-readable for non-separator usage,
2940
    otherwise megabytes); this is a one-letter string
2941
  @type separator: string or None
2942
  @param separator: String used to separate fields
2943
  @type header: bool
2944
  @param header: Whether to show header row
2945
  @type force_filter: bool
2946
  @param force_filter: Whether to always treat names as filter
2947
  @type format_override: dict
2948
  @param format_override: Dictionary for overriding field formatting functions,
2949
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2950
  @type verbose: boolean
2951
  @param verbose: whether to use verbose field descriptions or not
2952
  @type namefield: string
2953
  @param namefield: Name of field to use for simple filters (see
2954
    L{qlang.MakeFilter} for details)
2955
  @type qfilter: list or None
2956
  @param qfilter: Query filter (in addition to names)
2957
  @param isnumeric: bool
2958
  @param isnumeric: Whether the namefield's type is numeric, and therefore
2959
    any simple filters built by namefield should use integer values to
2960
    reflect that
2961

2962
  """
2963
  if not names:
2964
    names = None
2965

    
2966
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
2967
                                isnumeric=isnumeric)
2968

    
2969
  if qfilter is None:
2970
    qfilter = namefilter
2971
  elif namefilter is not None:
2972
    qfilter = [qlang.OP_AND, namefilter, qfilter]
2973

    
2974
  if cl is None:
2975
    cl = GetClient()
2976

    
2977
  response = cl.Query(resource, fields, qfilter)
2978

    
2979
  found_unknown = _WarnUnknownFields(response.fields)
2980

    
2981
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2982
                                     header=header,
2983
                                     format_override=format_override,
2984
                                     verbose=verbose)
2985

    
2986
  for line in data:
2987
    ToStdout(line)
2988

    
2989
  assert ((found_unknown and status == QR_UNKNOWN) or
2990
          (not found_unknown and status != QR_UNKNOWN))
2991

    
2992
  if status == QR_UNKNOWN:
2993
    return constants.EXIT_UNKNOWN_FIELD
2994

    
2995
  # TODO: Should the list command fail if not all data could be collected?
2996
  return constants.EXIT_SUCCESS
2997

    
2998

    
2999
def GenericListFields(resource, fields, separator, header, cl=None):
3000
  """Generic implementation for listing fields for a resource.
3001

3002
  @param resource: One of L{constants.QR_VIA_LUXI}
3003
  @type fields: list of strings
3004
  @param fields: List of fields to query for
3005
  @type separator: string or None
3006
  @param separator: String used to separate fields
3007
  @type header: bool
3008
  @param header: Whether to show header row
3009

3010
  """
3011
  if cl is None:
3012
    cl = GetClient()
3013

    
3014
  if not fields:
3015
    fields = None
3016

    
3017
  response = cl.QueryFields(resource, fields)
3018

    
3019
  found_unknown = _WarnUnknownFields(response.fields)
3020

    
3021
  columns = [
3022
    TableColumn("Name", str, False),
3023
    TableColumn("Title", str, False),
3024
    TableColumn("Description", str, False),
3025
    ]
3026

    
3027
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
3028

    
3029
  for line in FormatTable(rows, columns, header, separator):
3030
    ToStdout(line)
3031

    
3032
  if found_unknown:
3033
    return constants.EXIT_UNKNOWN_FIELD
3034

    
3035
  return constants.EXIT_SUCCESS
3036

    
3037

    
3038
class TableColumn:
3039
  """Describes a column for L{FormatTable}.
3040

3041
  """
3042
  def __init__(self, title, fn, align_right):
3043
    """Initializes this class.
3044

3045
    @type title: string
3046
    @param title: Column title
3047
    @type fn: callable
3048
    @param fn: Formatting function
3049
    @type align_right: bool
3050
    @param align_right: Whether to align values on the right-hand side
3051

3052
    """
3053
    self.title = title
3054
    self.format = fn
3055
    self.align_right = align_right
3056

    
3057

    
3058
def _GetColFormatString(width, align_right):
3059
  """Returns the format string for a field.
3060

3061
  """
3062
  if align_right:
3063
    sign = ""
3064
  else:
3065
    sign = "-"
3066

    
3067
  return "%%%s%ss" % (sign, width)
3068

    
3069

    
3070
def FormatTable(rows, columns, header, separator):
3071
  """Formats data as a table.
3072

3073
  @type rows: list of lists
3074
  @param rows: Row data, one list per row
3075
  @type columns: list of L{TableColumn}
3076
  @param columns: Column descriptions
3077
  @type header: bool
3078
  @param header: Whether to show header row
3079
  @type separator: string or None
3080
  @param separator: String used to separate columns
3081

3082
  """
3083
  if header:
3084
    data = [[col.title for col in columns]]
3085
    colwidth = [len(col.title) for col in columns]
3086
  else:
3087
    data = []
3088
    colwidth = [0 for _ in columns]
3089

    
3090
  # Format row data
3091
  for row in rows:
3092
    assert len(row) == len(columns)
3093

    
3094
    formatted = [col.format(value) for value, col in zip(row, columns)]
3095

    
3096
    if separator is None:
3097
      # Update column widths
3098
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3099
        # Modifying a list's items while iterating is fine
3100
        colwidth[idx] = max(oldwidth, len(value))
3101

    
3102
    data.append(formatted)
3103

    
3104
  if separator is not None:
3105
    # Return early if a separator is used
3106
    return [separator.join(row) for row in data]
3107

    
3108
  if columns and not columns[-1].align_right:
3109
    # Avoid unnecessary spaces at end of line
3110
    colwidth[-1] = 0
3111

    
3112
  # Build format string
3113
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3114
                  for col, width in zip(columns, colwidth)])
3115

    
3116
  return [fmt % tuple(row) for row in data]
3117

    
3118

    
3119
def FormatTimestamp(ts):
3120
  """Formats a given timestamp.
3121

3122
  @type ts: timestamp
3123
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3124

3125
  @rtype: string
3126
  @return: a string with the formatted timestamp
3127

3128
  """
3129
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3130
    return "?"
3131

    
3132
  (sec, usecs) = ts
3133
  return utils.FormatTime(sec, usecs=usecs)
3134

    
3135

    
3136
def ParseTimespec(value):
3137
  """Parse a time specification.
3138

3139
  The following suffixed will be recognized:
3140

3141
    - s: seconds
3142
    - m: minutes
3143
    - h: hours
3144
    - d: day
3145
    - w: weeks
3146

3147
  Without any suffix, the value will be taken to be in seconds.
3148

3149
  """
3150
  value = str(value)
3151
  if not value:
3152
    raise errors.OpPrereqError("Empty time specification passed",
3153
                               errors.ECODE_INVAL)
3154
  suffix_map = {
3155
    "s": 1,
3156
    "m": 60,
3157
    "h": 3600,
3158
    "d": 86400,
3159
    "w": 604800,
3160
    }
3161
  if value[-1] not in suffix_map:
3162
    try:
3163
      value = int(value)
3164
    except (TypeError, ValueError):
3165
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3166
                                 errors.ECODE_INVAL)
3167
  else:
3168
    multiplier = suffix_map[value[-1]]
3169
    value = value[:-1]
3170
    if not value: # no data left after stripping the suffix
3171
      raise errors.OpPrereqError("Invalid time specification (only"
3172
                                 " suffix passed)", errors.ECODE_INVAL)
3173
    try:
3174
      value = int(value) * multiplier
3175
    except (TypeError, ValueError):
3176
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3177
                                 errors.ECODE_INVAL)
3178
  return value
3179

    
3180

    
3181
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3182
                   filter_master=False, nodegroup=None):
3183
  """Returns the names of online nodes.
3184

3185
  This function will also log a warning on stderr with the names of
3186
  the online nodes.
3187

3188
  @param nodes: if not empty, use only this subset of nodes (minus the
3189
      offline ones)
3190
  @param cl: if not None, luxi client to use
3191
  @type nowarn: boolean
3192
  @param nowarn: by default, this function will output a note with the
3193
      offline nodes that are skipped; if this parameter is True the
3194
      note is not displayed
3195
  @type secondary_ips: boolean
3196
  @param secondary_ips: if True, return the secondary IPs instead of the
3197
      names, useful for doing network traffic over the replication interface
3198
      (if any)
3199
  @type filter_master: boolean
3200
  @param filter_master: if True, do not return the master node in the list
3201
      (useful in coordination with secondary_ips where we cannot check our
3202
      node name against the list)
3203
  @type nodegroup: string
3204
  @param nodegroup: If set, only return nodes in this node group
3205

3206
  """
3207
  if cl is None:
3208
    cl = GetClient()
3209

    
3210
  qfilter = []
3211

    
3212
  if nodes:
3213
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3214

    
3215
  if nodegroup is not None:
3216
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3217
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3218

    
3219
  if filter_master:
3220
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3221

    
3222
  if qfilter:
3223
    if len(qfilter) > 1:
3224
      final_filter = [qlang.OP_AND] + qfilter
3225
    else:
3226
      assert len(qfilter) == 1
3227
      final_filter = qfilter[0]
3228
  else:
3229
    final_filter = None
3230

    
3231
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3232

    
3233
  def _IsOffline(row):
3234
    (_, (_, offline), _) = row
3235
    return offline
3236

    
3237
  def _GetName(row):
3238
    ((_, name), _, _) = row
3239
    return name
3240

    
3241
  def _GetSip(row):
3242
    (_, _, (_, sip)) = row
3243
    return sip
3244

    
3245
  (offline, online) = compat.partition(result.data, _IsOffline)
3246

    
3247
  if offline and not nowarn:
3248
    ToStderr("Note: skipping offline node(s): %s" %
3249
             utils.CommaJoin(map(_GetName, offline)))
3250

    
3251
  if secondary_ips:
3252
    fn = _GetSip
3253
  else:
3254
    fn = _GetName
3255

    
3256
  return map(fn, online)
3257

    
3258

    
3259
def _ToStream(stream, txt, *args):
3260
  """Write a message to a stream, bypassing the logging system
3261

3262
  @type stream: file object
3263
  @param stream: the file to which we should write
3264
  @type txt: str
3265
  @param txt: the message
3266

3267
  """
3268
  try:
3269
    if args:
3270
      args = tuple(args)
3271
      stream.write(txt % args)
3272
    else:
3273
      stream.write(txt)
3274
    stream.write("\n")
3275
    stream.flush()
3276
  except IOError, err:
3277
    if err.errno == errno.EPIPE:
3278
      # our terminal went away, we'll exit
3279
      sys.exit(constants.EXIT_FAILURE)
3280
    else:
3281
      raise
3282

    
3283

    
3284
def ToStdout(txt, *args):
3285
  """Write a message to stdout only, bypassing the logging system
3286

3287
  This is just a wrapper over _ToStream.
3288

3289
  @type txt: str
3290
  @param txt: the message
3291

3292
  """
3293
  _ToStream(sys.stdout, txt, *args)
3294

    
3295

    
3296
def ToStderr(txt, *args):
3297
  """Write a message to stderr only, bypassing the logging system
3298

3299
  This is just a wrapper over _ToStream.
3300

3301
  @type txt: str
3302
  @param txt: the message
3303

3304
  """
3305
  _ToStream(sys.stderr, txt, *args)
3306

    
3307

    
3308
class JobExecutor(object):
3309
  """Class which manages the submission and execution of multiple jobs.
3310

3311
  Note that instances of this class should not be reused between
3312
  GetResults() calls.
3313

3314
  """
3315
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3316
    self.queue = []
3317
    if cl is None:
3318
      cl = GetClient()
3319
    self.cl = cl
3320
    self.verbose = verbose
3321
    self.jobs = []
3322
    self.opts = opts
3323
    self.feedback_fn = feedback_fn
3324
    self._counter = itertools.count()
3325

    
3326
  @staticmethod
3327
  def _IfName(name, fmt):
3328
    """Helper function for formatting name.
3329

3330
    """
3331
    if name:
3332
      return fmt % name
3333

    
3334
    return ""
3335

    
3336
  def QueueJob(self, name, *ops):
3337
    """Record a job for later submit.
3338

3339
    @type name: string
3340
    @param name: a description of the job, will be used in WaitJobSet
3341

3342
    """
3343
    SetGenericOpcodeOpts(ops, self.opts)
3344
    self.queue.append((self._counter.next(), name, ops))
3345

    
3346
  def AddJobId(self, name, status, job_id):
3347
    """Adds a job ID to the internal queue.
3348

3349
    """
3350
    self.jobs.append((self._counter.next(), status, job_id, name))
3351

    
3352
  def SubmitPending(self, each=False):
3353
    """Submit all pending jobs.
3354

3355
    """
3356
    if each:
3357
      results = []
3358
      for (_, _, ops) in self.queue:
3359
        # SubmitJob will remove the success status, but raise an exception if
3360
        # the submission fails, so we'll notice that anyway.
3361
        results.append([True, self.cl.SubmitJob(ops)[0]])
3362
    else:
3363
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3364
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3365
      self.jobs.append((idx, status, data, name))
3366

    
3367
  def _ChooseJob(self):
3368
    """Choose a non-waiting/queued job to poll next.
3369

3370
    """
3371
    assert self.jobs, "_ChooseJob called with empty job list"
3372

    
3373
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3374
                               ["status"])
3375
    assert result
3376

    
3377
    for job_data, status in zip(self.jobs, result):
3378
      if (isinstance(status, list) and status and
3379
          status[0] in (constants.JOB_STATUS_QUEUED,
3380
                        constants.JOB_STATUS_WAITING,
3381
                        constants.JOB_STATUS_CANCELING)):
3382
        # job is still present and waiting
3383
        continue
3384
      # good candidate found (either running job or lost job)
3385
      self.jobs.remove(job_data)
3386
      return job_data
3387

    
3388
    # no job found
3389
    return self.jobs.pop(0)
3390

    
3391
  def GetResults(self):
3392
    """Wait for and return the results of all jobs.
3393

3394
    @rtype: list
3395
    @return: list of tuples (success, job results), in the same order
3396
        as the submitted jobs; if a job has failed, instead of the result
3397
        there will be the error message
3398

3399
    """
3400
    if not self.jobs:
3401
      self.SubmitPending()
3402
    results = []
3403
    if self.verbose:
3404
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3405
      if ok_jobs:
3406
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3407

    
3408
    # first, remove any non-submitted jobs
3409
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3410
    for idx, _, jid, name in failures:
3411
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3412
      results.append((idx, False, jid))
3413

    
3414
    while self.jobs:
3415
      (idx, _, jid, name) = self._ChooseJob()
3416
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3417
      try:
3418
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3419
        success = True
3420
      except errors.JobLost, err:
3421
        _, job_result = FormatError(err)
3422
        ToStderr("Job %s%s has been archived, cannot check its result",
3423
                 jid, self._IfName(name, " for %s"))
3424
        success = False
3425
      except (errors.GenericError, luxi.ProtocolError), err:
3426
        _, job_result = FormatError(err)
3427
        success = False
3428
        # the error message will always be shown, verbose or not
3429
        ToStderr("Job %s%s has failed: %s",
3430
                 jid, self._IfName(name, " for %s"), job_result)
3431

    
3432
      results.append((idx, success, job_result))
3433

    
3434
    # sort based on the index, then drop it
3435
    results.sort()
3436
    results = [i[1:] for i in results]
3437

    
3438
    return results
3439

    
3440
  def WaitOrShow(self, wait):
3441
    """Wait for job results or only print the job IDs.
3442

3443
    @type wait: boolean
3444
    @param wait: whether to wait or not
3445

3446
    """
3447
    if wait:
3448
      return self.GetResults()
3449
    else:
3450
      if not self.jobs:
3451
        self.SubmitPending()
3452
      for _, status, result, name in self.jobs:
3453
        if status:
3454
          ToStdout("%s: %s", result, name)
3455
        else:
3456
          ToStderr("Failure for %s: %s", name, result)
3457
      return [row[1:3] for row in self.jobs]
3458

    
3459

    
3460
def FormatParameterDict(buf, param_dict, actual, level=1):
3461
  """Formats a parameter dictionary.
3462

3463
  @type buf: L{StringIO}
3464
  @param buf: the buffer into which to write
3465
  @type param_dict: dict
3466
  @param param_dict: the own parameters
3467
  @type actual: dict
3468
  @param actual: the current parameter set (including defaults)
3469
  @param level: Level of indent
3470

3471
  """
3472
  indent = "  " * level
3473

    
3474
  for key in sorted(actual):
3475
    data = actual[key]
3476
    buf.write("%s- %s:" % (indent, key))
3477

    
3478
    if isinstance(data, dict) and data:
3479
      buf.write("\n")
3480
      FormatParameterDict(buf, param_dict.get(key, {}), data,
3481
                          level=level + 1)
3482
    else:
3483
      val = param_dict.get(key, "default (%s)" % data)
3484
      buf.write(" %s\n" % val)
3485

    
3486

    
3487
def ConfirmOperation(names, list_type, text, extra=""):
3488
  """Ask the user to confirm an operation on a list of list_type.
3489

3490
  This function is used to request confirmation for doing an operation
3491
  on a given list of list_type.
3492

3493
  @type names: list
3494
  @param names: the list of names that we display when
3495
      we ask for confirmation
3496
  @type list_type: str
3497
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3498
  @type text: str
3499
  @param text: the operation that the user should confirm
3500
  @rtype: boolean
3501
  @return: True or False depending on user's confirmation.
3502

3503
  """
3504
  count = len(names)
3505
  msg = ("The %s will operate on %d %s.\n%s"
3506
         "Do you want to continue?" % (text, count, list_type, extra))
3507
  affected = (("\nAffected %s:\n" % list_type) +
3508
              "\n".join(["  %s" % name for name in names]))
3509

    
3510
  choices = [("y", True, "Yes, execute the %s" % text),
3511
             ("n", False, "No, abort the %s" % text)]
3512

    
3513
  if count > 20:
3514
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3515
    question = msg
3516
  else:
3517
    question = msg + affected
3518

    
3519
  choice = AskUser(question, choices)
3520
  if choice == "v":
3521
    choices.pop(1)
3522
    choice = AskUser(msg + affected, choices)
3523
  return choice
3524

    
3525

    
3526
def _MaybeParseUnit(elements):
3527
  """Parses and returns an array of potential values with units.
3528

3529
  """
3530
  parsed = {}
3531
  for k, v in elements.items():
3532
    if v == constants.VALUE_DEFAULT:
3533
      parsed[k] = v
3534
    else:
3535
      parsed[k] = utils.ParseUnit(v)
3536
  return parsed
3537

    
3538

    
3539
def CreateIPolicyFromOpts(ispecs_mem_size=None,
3540
                          ispecs_cpu_count=None,
3541
                          ispecs_disk_count=None,
3542
                          ispecs_disk_size=None,
3543
                          ispecs_nic_count=None,
3544
                          ipolicy_disk_templates=None,
3545
                          ipolicy_vcpu_ratio=None,
3546
                          ipolicy_spindle_ratio=None,
3547
                          group_ipolicy=False,
3548
                          allowed_values=None,
3549
                          fill_all=False):
3550
  """Creation of instance policy based on command line options.
3551

3552
  @param fill_all: whether for cluster policies we should ensure that
3553
    all values are filled
3554

3555

3556
  """
3557
  try:
3558
    if ispecs_mem_size:
3559
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3560
    if ispecs_disk_size:
3561
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3562
  except (TypeError, ValueError, errors.UnitParseError), err:
3563
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3564
                               " in policy: %s" %
3565
                               (ispecs_disk_size, ispecs_mem_size, err),
3566
                               errors.ECODE_INVAL)
3567

    
3568
  # prepare ipolicy dict
3569
  ipolicy_transposed = {
3570
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3571
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3572
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3573
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3574
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3575
    }
3576

    
3577
  # first, check that the values given are correct
3578
  if group_ipolicy:
3579
    forced_type = TISPECS_GROUP_TYPES
3580
  else:
3581
    forced_type = TISPECS_CLUSTER_TYPES
3582

    
3583
  for specs in ipolicy_transposed.values():
3584
    utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3585

    
3586
  # then transpose
3587
  ipolicy_out = objects.MakeEmptyIPolicy()
3588
  for name, specs in ipolicy_transposed.iteritems():
3589
    assert name in constants.ISPECS_PARAMETERS
3590
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3591
      ipolicy_out[key][name] = val
3592

    
3593
  # no filldict for non-dicts
3594
  if not group_ipolicy and fill_all:
3595
    if ipolicy_disk_templates is None:
3596
      ipolicy_disk_templates = constants.DISK_TEMPLATES
3597
    if ipolicy_vcpu_ratio is None:
3598
      ipolicy_vcpu_ratio = \
3599
        constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
3600
    if ipolicy_spindle_ratio is None:
3601
      ipolicy_spindle_ratio = \
3602
        constants.IPOLICY_DEFAULTS[constants.IPOLICY_SPINDLE_RATIO]
3603
  if ipolicy_disk_templates is not None:
3604
    ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3605
  if ipolicy_vcpu_ratio is not None:
3606
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3607
  if ipolicy_spindle_ratio is not None:
3608
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
3609

    
3610
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
3611

    
3612
  return ipolicy_out