Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ c1912a48

History | View | Annotate | Download (121.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ADD_RESERVED_IPS_OPT",
58
  "ALLOCATABLE_OPT",
59
  "ALLOC_POLICY_OPT",
60
  "ALL_OPT",
61
  "ALLOW_FAILOVER_OPT",
62
  "AUTO_PROMOTE_OPT",
63
  "AUTO_REPLACE_OPT",
64
  "BACKEND_OPT",
65
  "BLK_OS_OPT",
66
  "CAPAB_MASTER_OPT",
67
  "CAPAB_VM_OPT",
68
  "CLEANUP_OPT",
69
  "CLUSTER_DOMAIN_SECRET_OPT",
70
  "CONFIRM_OPT",
71
  "CP_SIZE_OPT",
72
  "DEBUG_OPT",
73
  "DEBUG_SIMERR_OPT",
74
  "DISKIDX_OPT",
75
  "DISK_OPT",
76
  "DISK_PARAMS_OPT",
77
  "DISK_TEMPLATE_OPT",
78
  "DRAINED_OPT",
79
  "DRY_RUN_OPT",
80
  "DRBD_HELPER_OPT",
81
  "DST_NODE_OPT",
82
  "EARLY_RELEASE_OPT",
83
  "ENABLED_HV_OPT",
84
  "ERROR_CODES_OPT",
85
  "FAILURE_ONLY_OPT",
86
  "FIELDS_OPT",
87
  "FILESTORE_DIR_OPT",
88
  "FILESTORE_DRIVER_OPT",
89
  "FORCE_FILTER_OPT",
90
  "FORCE_OPT",
91
  "FORCE_VARIANT_OPT",
92
  "GATEWAY_OPT",
93
  "GATEWAY6_OPT",
94
  "GLOBAL_FILEDIR_OPT",
95
  "HID_OS_OPT",
96
  "GLOBAL_SHARED_FILEDIR_OPT",
97
  "HVLIST_OPT",
98
  "HVOPTS_OPT",
99
  "HYPERVISOR_OPT",
100
  "IALLOCATOR_OPT",
101
  "DEFAULT_IALLOCATOR_OPT",
102
  "IDENTIFY_DEFAULTS_OPT",
103
  "IGNORE_CONSIST_OPT",
104
  "IGNORE_ERRORS_OPT",
105
  "IGNORE_FAILURES_OPT",
106
  "IGNORE_OFFLINE_OPT",
107
  "IGNORE_REMOVE_FAILURES_OPT",
108
  "IGNORE_SECONDARIES_OPT",
109
  "IGNORE_SIZE_OPT",
110
  "INTERVAL_OPT",
111
  "MAC_PREFIX_OPT",
112
  "MAINTAIN_NODE_HEALTH_OPT",
113
  "MASTER_NETDEV_OPT",
114
  "MASTER_NETMASK_OPT",
115
  "MC_OPT",
116
  "MIGRATION_MODE_OPT",
117
  "NET_OPT",
118
  "NETWORK_OPT",
119
  "NETWORK6_OPT",
120
  "NETWORK_TYPE_OPT",
121
  "NEW_CLUSTER_CERT_OPT",
122
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
123
  "NEW_CONFD_HMAC_KEY_OPT",
124
  "NEW_RAPI_CERT_OPT",
125
  "NEW_SECONDARY_OPT",
126
  "NEW_SPICE_CERT_OPT",
127
  "NIC_PARAMS_OPT",
128
  "NOCONFLICTSCHECK_OPT",
129
  "NODE_FORCE_JOIN_OPT",
130
  "NODE_LIST_OPT",
131
  "NODE_PLACEMENT_OPT",
132
  "NODEGROUP_OPT",
133
  "NODE_PARAMS_OPT",
134
  "NODE_POWERED_OPT",
135
  "NODRBD_STORAGE_OPT",
136
  "NOHDR_OPT",
137
  "NOIPCHECK_OPT",
138
  "NO_INSTALL_OPT",
139
  "NONAMECHECK_OPT",
140
  "NOLVM_STORAGE_OPT",
141
  "NOMODIFY_ETCHOSTS_OPT",
142
  "NOMODIFY_SSH_SETUP_OPT",
143
  "NONICS_OPT",
144
  "NONLIVE_OPT",
145
  "NONPLUS1_OPT",
146
  "NORUNTIME_CHGS_OPT",
147
  "NOSHUTDOWN_OPT",
148
  "NOSTART_OPT",
149
  "NOSSH_KEYCHECK_OPT",
150
  "NOVOTING_OPT",
151
  "NO_REMEMBER_OPT",
152
  "NWSYNC_OPT",
153
  "OFFLINE_INST_OPT",
154
  "ONLINE_INST_OPT",
155
  "ON_PRIMARY_OPT",
156
  "ON_SECONDARY_OPT",
157
  "OFFLINE_OPT",
158
  "OSPARAMS_OPT",
159
  "OS_OPT",
160
  "OS_SIZE_OPT",
161
  "OOB_TIMEOUT_OPT",
162
  "POWER_DELAY_OPT",
163
  "PREALLOC_WIPE_DISKS_OPT",
164
  "PRIMARY_IP_VERSION_OPT",
165
  "PRIMARY_ONLY_OPT",
166
  "PRIORITY_OPT",
167
  "RAPI_CERT_OPT",
168
  "READD_OPT",
169
  "REBOOT_TYPE_OPT",
170
  "REMOVE_INSTANCE_OPT",
171
  "REMOVE_RESERVED_IPS_OPT",
172
  "REMOVE_UIDS_OPT",
173
  "RESERVED_LVS_OPT",
174
  "RUNTIME_MEM_OPT",
175
  "ROMAN_OPT",
176
  "SECONDARY_IP_OPT",
177
  "SECONDARY_ONLY_OPT",
178
  "SELECT_OS_OPT",
179
  "SEP_OPT",
180
  "SHOWCMD_OPT",
181
  "SHOW_MACHINE_OPT",
182
  "SHUTDOWN_TIMEOUT_OPT",
183
  "SINGLE_NODE_OPT",
184
  "SPECS_CPU_COUNT_OPT",
185
  "SPECS_DISK_COUNT_OPT",
186
  "SPECS_DISK_SIZE_OPT",
187
  "SPECS_MEM_SIZE_OPT",
188
  "SPECS_NIC_COUNT_OPT",
189
  "IPOLICY_DISK_TEMPLATES",
190
  "IPOLICY_VCPU_RATIO",
191
  "SPICE_CACERT_OPT",
192
  "SPICE_CERT_OPT",
193
  "SRC_DIR_OPT",
194
  "SRC_NODE_OPT",
195
  "SUBMIT_OPT",
196
  "STARTUP_PAUSED_OPT",
197
  "STATIC_OPT",
198
  "SYNC_OPT",
199
  "TAG_ADD_OPT",
200
  "TAG_SRC_OPT",
201
  "TIMEOUT_OPT",
202
  "TO_GROUP_OPT",
203
  "UIDPOOL_OPT",
204
  "USEUNITS_OPT",
205
  "USE_EXTERNAL_MIP_SCRIPT",
206
  "USE_REPL_NET_OPT",
207
  "VERBOSE_OPT",
208
  "VG_NAME_OPT",
209
  "WFSYNC_OPT",
210
  "YES_DOIT_OPT",
211
  "DISK_STATE_OPT",
212
  "HV_STATE_OPT",
213
  "IGNORE_IPOLICY_OPT",
214
  "INSTANCE_POLICY_OPTS",
215
  # Generic functions for CLI programs
216
  "ConfirmOperation",
217
  "CreateIPolicyFromOpts",
218
  "GenericMain",
219
  "GenericInstanceCreate",
220
  "GenericList",
221
  "GenericListFields",
222
  "GetClient",
223
  "GetOnlineNodes",
224
  "JobExecutor",
225
  "JobSubmittedException",
226
  "ParseTimespec",
227
  "RunWhileClusterStopped",
228
  "SubmitOpCode",
229
  "SubmitOrSend",
230
  "UsesRPC",
231
  # Formatting functions
232
  "ToStderr", "ToStdout",
233
  "FormatError",
234
  "FormatQueryResult",
235
  "FormatParameterDict",
236
  "GenerateTable",
237
  "AskUser",
238
  "FormatTimestamp",
239
  "FormatLogMessage",
240
  # Tags functions
241
  "ListTags",
242
  "AddTags",
243
  "RemoveTags",
244
  # command line options support infrastructure
245
  "ARGS_MANY_INSTANCES",
246
  "ARGS_MANY_NODES",
247
  "ARGS_MANY_GROUPS",
248
  "ARGS_MANY_NETWORKS",
249
  "ARGS_NONE",
250
  "ARGS_ONE_INSTANCE",
251
  "ARGS_ONE_NODE",
252
  "ARGS_ONE_GROUP",
253
  "ARGS_ONE_OS",
254
  "ARGS_ONE_NETWORK",
255
  "ArgChoice",
256
  "ArgCommand",
257
  "ArgFile",
258
  "ArgGroup",
259
  "ArgHost",
260
  "ArgInstance",
261
  "ArgJobId",
262
  "ArgNetwork",
263
  "ArgNode",
264
  "ArgOs",
265
  "ArgSuggest",
266
  "ArgUnknown",
267
  "OPT_COMPL_INST_ADD_NODES",
268
  "OPT_COMPL_MANY_NODES",
269
  "OPT_COMPL_ONE_IALLOCATOR",
270
  "OPT_COMPL_ONE_INSTANCE",
271
  "OPT_COMPL_ONE_NODE",
272
  "OPT_COMPL_ONE_NODEGROUP",
273
  "OPT_COMPL_ONE_NETWORK",
274
  "OPT_COMPL_ONE_OS",
275
  "cli_option",
276
  "SplitNodeOption",
277
  "CalculateOSNames",
278
  "ParseFields",
279
  "COMMON_CREATE_OPTS",
280
  ]
281

    
282
NO_PREFIX = "no_"
283
UN_PREFIX = "-"
284

    
285
#: Priorities (sorted)
286
_PRIORITY_NAMES = [
287
  ("low", constants.OP_PRIO_LOW),
288
  ("normal", constants.OP_PRIO_NORMAL),
289
  ("high", constants.OP_PRIO_HIGH),
290
  ]
291

    
292
#: Priority dictionary for easier lookup
293
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
294
# we migrate to Python 2.6
295
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
296

    
297
# Query result status for clients
298
(QR_NORMAL,
299
 QR_UNKNOWN,
300
 QR_INCOMPLETE) = range(3)
301

    
302
#: Maximum batch size for ChooseJob
303
_CHOOSE_BATCH = 25
304

    
305

    
306
# constants used to create InstancePolicy dictionary
307
TISPECS_GROUP_TYPES = {
308
  constants.ISPECS_MIN: constants.VTYPE_INT,
309
  constants.ISPECS_MAX: constants.VTYPE_INT,
310
  }
311

    
312
TISPECS_CLUSTER_TYPES = {
313
  constants.ISPECS_MIN: constants.VTYPE_INT,
314
  constants.ISPECS_MAX: constants.VTYPE_INT,
315
  constants.ISPECS_STD: constants.VTYPE_INT,
316
  }
317

    
318
#: User-friendly names for query2 field types
319
_QFT_NAMES = {
320
  constants.QFT_UNKNOWN: "Unknown",
321
  constants.QFT_TEXT: "Text",
322
  constants.QFT_BOOL: "Boolean",
323
  constants.QFT_NUMBER: "Number",
324
  constants.QFT_UNIT: "Storage size",
325
  constants.QFT_TIMESTAMP: "Timestamp",
326
  constants.QFT_OTHER: "Custom",
327
  }
328

    
329

    
330
class _Argument:
331
  def __init__(self, min=0, max=None): # pylint: disable=W0622
332
    self.min = min
333
    self.max = max
334

    
335
  def __repr__(self):
336
    return ("<%s min=%s max=%s>" %
337
            (self.__class__.__name__, self.min, self.max))
338

    
339

    
340
class ArgSuggest(_Argument):
341
  """Suggesting argument.
342

343
  Value can be any of the ones passed to the constructor.
344

345
  """
346
  # pylint: disable=W0622
347
  def __init__(self, min=0, max=None, choices=None):
348
    _Argument.__init__(self, min=min, max=max)
349
    self.choices = choices
350

    
351
  def __repr__(self):
352
    return ("<%s min=%s max=%s choices=%r>" %
353
            (self.__class__.__name__, self.min, self.max, self.choices))
354

    
355

    
356
class ArgChoice(ArgSuggest):
357
  """Choice argument.
358

359
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
360
  but value must be one of the choices.
361

362
  """
363

    
364

    
365
class ArgUnknown(_Argument):
366
  """Unknown argument to program (e.g. determined at runtime).
367

368
  """
369

    
370

    
371
class ArgInstance(_Argument):
372
  """Instances argument.
373

374
  """
375

    
376

    
377
class ArgNode(_Argument):
378
  """Node argument.
379

380
  """
381

    
382

    
383
class ArgNetwork(_Argument):
384
  """Network argument.
385

386
  """
387

    
388

    
389
class ArgGroup(_Argument):
390
  """Node group argument.
391

392
  """
393

    
394

    
395
class ArgJobId(_Argument):
396
  """Job ID argument.
397

398
  """
399

    
400

    
401
class ArgFile(_Argument):
402
  """File path argument.
403

404
  """
405

    
406

    
407
class ArgCommand(_Argument):
408
  """Command argument.
409

410
  """
411

    
412

    
413
class ArgHost(_Argument):
414
  """Host argument.
415

416
  """
417

    
418

    
419
class ArgOs(_Argument):
420
  """OS argument.
421

422
  """
423

    
424

    
425
ARGS_NONE = []
426
ARGS_MANY_INSTANCES = [ArgInstance()]
427
ARGS_MANY_NETWORKS = [ArgNetwork()]
428
ARGS_MANY_NODES = [ArgNode()]
429
ARGS_MANY_GROUPS = [ArgGroup()]
430
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
431
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
432
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
433
# TODO
434
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
435
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
436

    
437

    
438
def _ExtractTagsObject(opts, args):
439
  """Extract the tag type object.
440

441
  Note that this function will modify its args parameter.
442

443
  """
444
  if not hasattr(opts, "tag_type"):
445
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
446
  kind = opts.tag_type
447
  if kind == constants.TAG_CLUSTER:
448
    retval = kind, None
449
  elif kind in (constants.TAG_NODEGROUP,
450
                constants.TAG_NODE,
451
                constants.TAG_NETWORK,
452
                constants.TAG_INSTANCE):
453
    if not args:
454
      raise errors.OpPrereqError("no arguments passed to the command",
455
                                 errors.ECODE_INVAL)
456
    name = args.pop(0)
457
    retval = kind, name
458
  else:
459
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
460
  return retval
461

    
462

    
463
def _ExtendTags(opts, args):
464
  """Extend the args if a source file has been given.
465

466
  This function will extend the tags with the contents of the file
467
  passed in the 'tags_source' attribute of the opts parameter. A file
468
  named '-' will be replaced by stdin.
469

470
  """
471
  fname = opts.tags_source
472
  if fname is None:
473
    return
474
  if fname == "-":
475
    new_fh = sys.stdin
476
  else:
477
    new_fh = open(fname, "r")
478
  new_data = []
479
  try:
480
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
481
    # because of python bug 1633941
482
    while True:
483
      line = new_fh.readline()
484
      if not line:
485
        break
486
      new_data.append(line.strip())
487
  finally:
488
    new_fh.close()
489
  args.extend(new_data)
490

    
491

    
492
def ListTags(opts, args):
493
  """List the tags on a given object.
494

495
  This is a generic implementation that knows how to deal with all
496
  three cases of tag objects (cluster, node, instance). The opts
497
  argument is expected to contain a tag_type field denoting what
498
  object type we work on.
499

500
  """
501
  kind, name = _ExtractTagsObject(opts, args)
502
  cl = GetClient(query=True)
503
  result = cl.QueryTags(kind, name)
504
  result = list(result)
505
  result.sort()
506
  for tag in result:
507
    ToStdout(tag)
508

    
509

    
510
def AddTags(opts, args):
511
  """Add tags on a given object.
512

513
  This is a generic implementation that knows how to deal with all
514
  three cases of tag objects (cluster, node, instance). The opts
515
  argument is expected to contain a tag_type field denoting what
516
  object type we work on.
517

518
  """
519
  kind, name = _ExtractTagsObject(opts, args)
520
  _ExtendTags(opts, args)
521
  if not args:
522
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
523
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
524
  SubmitOrSend(op, opts)
525

    
526

    
527
def RemoveTags(opts, args):
528
  """Remove tags from a given object.
529

530
  This is a generic implementation that knows how to deal with all
531
  three cases of tag objects (cluster, node, instance). The opts
532
  argument is expected to contain a tag_type field denoting what
533
  object type we work on.
534

535
  """
536
  kind, name = _ExtractTagsObject(opts, args)
537
  _ExtendTags(opts, args)
538
  if not args:
539
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
540
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
541
  SubmitOrSend(op, opts)
542

    
543

    
544
def check_unit(option, opt, value): # pylint: disable=W0613
545
  """OptParsers custom converter for units.
546

547
  """
548
  try:
549
    return utils.ParseUnit(value)
550
  except errors.UnitParseError, err:
551
    raise OptionValueError("option %s: %s" % (opt, err))
552

    
553

    
554
def _SplitKeyVal(opt, data):
555
  """Convert a KeyVal string into a dict.
556

557
  This function will convert a key=val[,...] string into a dict. Empty
558
  values will be converted specially: keys which have the prefix 'no_'
559
  will have the value=False and the prefix stripped, the others will
560
  have value=True.
561

562
  @type opt: string
563
  @param opt: a string holding the option name for which we process the
564
      data, used in building error messages
565
  @type data: string
566
  @param data: a string of the format key=val,key=val,...
567
  @rtype: dict
568
  @return: {key=val, key=val}
569
  @raises errors.ParameterError: if there are duplicate keys
570

571
  """
572
  kv_dict = {}
573
  if data:
574
    for elem in utils.UnescapeAndSplit(data, sep=","):
575
      if "=" in elem:
576
        key, val = elem.split("=", 1)
577
      else:
578
        if elem.startswith(NO_PREFIX):
579
          key, val = elem[len(NO_PREFIX):], False
580
        elif elem.startswith(UN_PREFIX):
581
          key, val = elem[len(UN_PREFIX):], None
582
        else:
583
          key, val = elem, True
584
      if key in kv_dict:
585
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
586
                                    (key, opt))
587
      kv_dict[key] = val
588
  return kv_dict
589

    
590

    
591
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
592
  """Custom parser for ident:key=val,key=val options.
593

594
  This will store the parsed values as a tuple (ident, {key: val}). As such,
595
  multiple uses of this option via action=append is possible.
596

597
  """
598
  if ":" not in value:
599
    ident, rest = value, ""
600
  else:
601
    ident, rest = value.split(":", 1)
602

    
603
  if ident.startswith(NO_PREFIX):
604
    if rest:
605
      msg = "Cannot pass options when removing parameter groups: %s" % value
606
      raise errors.ParameterError(msg)
607
    retval = (ident[len(NO_PREFIX):], False)
608
  elif (ident.startswith(UN_PREFIX) and
609
        (len(ident) <= len(UN_PREFIX) or
610
         not ident[len(UN_PREFIX)][0].isdigit())):
611
    if rest:
612
      msg = "Cannot pass options when removing parameter groups: %s" % value
613
      raise errors.ParameterError(msg)
614
    retval = (ident[len(UN_PREFIX):], None)
615
  else:
616
    kv_dict = _SplitKeyVal(opt, rest)
617
    retval = (ident, kv_dict)
618
  return retval
619

    
620

    
621
def check_key_val(option, opt, value):  # pylint: disable=W0613
622
  """Custom parser class for key=val,key=val options.
623

624
  This will store the parsed values as a dict {key: val}.
625

626
  """
627
  return _SplitKeyVal(opt, value)
628

    
629

    
630
def check_bool(option, opt, value): # pylint: disable=W0613
631
  """Custom parser for yes/no options.
632

633
  This will store the parsed value as either True or False.
634

635
  """
636
  value = value.lower()
637
  if value == constants.VALUE_FALSE or value == "no":
638
    return False
639
  elif value == constants.VALUE_TRUE or value == "yes":
640
    return True
641
  else:
642
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
643

    
644

    
645
def check_list(option, opt, value): # pylint: disable=W0613
646
  """Custom parser for comma-separated lists.
647

648
  """
649
  # we have to make this explicit check since "".split(",") is [""],
650
  # not an empty list :(
651
  if not value:
652
    return []
653
  else:
654
    return utils.UnescapeAndSplit(value)
655

    
656

    
657
def check_maybefloat(option, opt, value): # pylint: disable=W0613
658
  """Custom parser for float numbers which might be also defaults.
659

660
  """
661
  value = value.lower()
662

    
663
  if value == constants.VALUE_DEFAULT:
664
    return value
665
  else:
666
    return float(value)
667

    
668

    
669
# completion_suggestion is normally a list. Using numeric values not evaluating
670
# to False for dynamic completion.
671
(OPT_COMPL_MANY_NODES,
672
 OPT_COMPL_ONE_NODE,
673
 OPT_COMPL_ONE_INSTANCE,
674
 OPT_COMPL_ONE_OS,
675
 OPT_COMPL_ONE_IALLOCATOR,
676
 OPT_COMPL_ONE_NETWORK,
677
 OPT_COMPL_INST_ADD_NODES,
678
 OPT_COMPL_ONE_NODEGROUP) = range(100, 108)
679

    
680
OPT_COMPL_ALL = compat.UniqueFrozenset([
681
  OPT_COMPL_MANY_NODES,
682
  OPT_COMPL_ONE_NODE,
683
  OPT_COMPL_ONE_INSTANCE,
684
  OPT_COMPL_ONE_OS,
685
  OPT_COMPL_ONE_IALLOCATOR,
686
  OPT_COMPL_ONE_NETWORK,
687
  OPT_COMPL_INST_ADD_NODES,
688
  OPT_COMPL_ONE_NODEGROUP,
689
  ])
690

    
691

    
692
class CliOption(Option):
693
  """Custom option class for optparse.
694

695
  """
696
  ATTRS = Option.ATTRS + [
697
    "completion_suggest",
698
    ]
699
  TYPES = Option.TYPES + (
700
    "identkeyval",
701
    "keyval",
702
    "unit",
703
    "bool",
704
    "list",
705
    "maybefloat",
706
    )
707
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
708
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
709
  TYPE_CHECKER["keyval"] = check_key_val
710
  TYPE_CHECKER["unit"] = check_unit
711
  TYPE_CHECKER["bool"] = check_bool
712
  TYPE_CHECKER["list"] = check_list
713
  TYPE_CHECKER["maybefloat"] = check_maybefloat
714

    
715

    
716
# optparse.py sets make_option, so we do it for our own option class, too
717
cli_option = CliOption
718

    
719

    
720
_YORNO = "yes|no"
721

    
722
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
723
                       help="Increase debugging level")
724

    
725
NOHDR_OPT = cli_option("--no-headers", default=False,
726
                       action="store_true", dest="no_headers",
727
                       help="Don't display column headers")
728

    
729
SEP_OPT = cli_option("--separator", default=None,
730
                     action="store", dest="separator",
731
                     help=("Separator between output fields"
732
                           " (defaults to one space)"))
733

    
734
USEUNITS_OPT = cli_option("--units", default=None,
735
                          dest="units", choices=("h", "m", "g", "t"),
736
                          help="Specify units for output (one of h/m/g/t)")
737

    
738
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
739
                        type="string", metavar="FIELDS",
740
                        help="Comma separated list of output fields")
741

    
742
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
743
                       default=False, help="Force the operation")
744

    
745
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
746
                         default=False, help="Do not require confirmation")
747

    
748
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
749
                                  action="store_true", default=False,
750
                                  help=("Ignore offline nodes and do as much"
751
                                        " as possible"))
752

    
753
TAG_ADD_OPT = cli_option("--tags", dest="tags",
754
                         default=None, help="Comma-separated list of instance"
755
                                            " tags")
756

    
757
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
758
                         default=None, help="File with tag names")
759

    
760
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
761
                        default=False, action="store_true",
762
                        help=("Submit the job and return the job ID, but"
763
                              " don't wait for the job to finish"))
764

    
765
SYNC_OPT = cli_option("--sync", dest="do_locking",
766
                      default=False, action="store_true",
767
                      help=("Grab locks while doing the queries"
768
                            " in order to ensure more consistent results"))
769

    
770
DRY_RUN_OPT = cli_option("--dry-run", default=False,
771
                         action="store_true",
772
                         help=("Do not execute the operation, just run the"
773
                               " check steps and verify if it could be"
774
                               " executed"))
775

    
776
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
777
                         action="store_true",
778
                         help="Increase the verbosity of the operation")
779

    
780
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
781
                              action="store_true", dest="simulate_errors",
782
                              help="Debugging option that makes the operation"
783
                              " treat most runtime checks as failed")
784

    
785
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
786
                        default=True, action="store_false",
787
                        help="Don't wait for sync (DANGEROUS!)")
788

    
789
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
790
                        default=False, action="store_true",
791
                        help="Wait for disks to sync")
792

    
793
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
794
                             action="store_true", default=False,
795
                             help="Enable offline instance")
796

    
797
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
798
                              action="store_true", default=False,
799
                              help="Disable down instance")
800

    
801
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
802
                               help=("Custom disk setup (%s)" %
803
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
804
                               default=None, metavar="TEMPL",
805
                               choices=list(constants.DISK_TEMPLATES))
806

    
807
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
808
                        help="Do not create any network cards for"
809
                        " the instance")
810

    
811
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
812
                               help="Relative path under default cluster-wide"
813
                               " file storage dir to store file-based disks",
814
                               default=None, metavar="<DIR>")
815

    
816
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
817
                                  help="Driver to use for image files",
818
                                  default="loop", metavar="<DRIVER>",
819
                                  choices=list(constants.FILE_DRIVER))
820

    
821
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
822
                            help="Select nodes for the instance automatically"
823
                            " using the <NAME> iallocator plugin",
824
                            default=None, type="string",
825
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
826

    
827
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
828
                                    metavar="<NAME>",
829
                                    help="Set the default instance"
830
                                    " allocator plugin",
831
                                    default=None, type="string",
832
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
833

    
834
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
835
                    metavar="<os>",
836
                    completion_suggest=OPT_COMPL_ONE_OS)
837

    
838
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
839
                          type="keyval", default={},
840
                          help="OS parameters")
841

    
842
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
843
                               action="store_true", default=False,
844
                               help="Force an unknown variant")
845

    
846
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
847
                            action="store_true", default=False,
848
                            help="Do not install the OS (will"
849
                            " enable no-start)")
850

    
851
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
852
                                dest="allow_runtime_chgs",
853
                                default=True, action="store_false",
854
                                help="Don't allow runtime changes")
855

    
856
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
857
                         type="keyval", default={},
858
                         help="Backend parameters")
859

    
860
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
861
                        default={}, dest="hvparams",
862
                        help="Hypervisor parameters")
863

    
864
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
865
                             help="Disk template parameters, in the format"
866
                             " template:option=value,option=value,...",
867
                             type="identkeyval", action="append", default=[])
868

    
869
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
870
                                 type="keyval", default={},
871
                                 help="Memory size specs: list of key=value,"
872
                                " where key is one of min, max, std"
873
                                 " (in MB or using a unit)")
874

    
875
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
876
                                 type="keyval", default={},
877
                                 help="CPU count specs: list of key=value,"
878
                                 " where key is one of min, max, std")
879

    
880
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
881
                                  dest="ispecs_disk_count",
882
                                  type="keyval", default={},
883
                                  help="Disk count specs: list of key=value,"
884
                                  " where key is one of min, max, std")
885

    
886
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
887
                                 type="keyval", default={},
888
                                 help="Disk size specs: list of key=value,"
889
                                 " where key is one of min, max, std"
890
                                 " (in MB or using a unit)")
891

    
892
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
893
                                 type="keyval", default={},
894
                                 help="NIC count specs: list of key=value,"
895
                                 " where key is one of min, max, std")
896

    
897
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
898
                                    dest="ipolicy_disk_templates",
899
                                    type="list", default=None,
900
                                    help="Comma-separated list of"
901
                                    " enabled disk templates")
902

    
903
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
904
                                 dest="ipolicy_vcpu_ratio",
905
                                 type="maybefloat", default=None,
906
                                 help="The maximum allowed vcpu-to-cpu ratio")
907

    
908
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
909
                                   dest="ipolicy_spindle_ratio",
910
                                   type="maybefloat", default=None,
911
                                   help=("The maximum allowed instances to"
912
                                         " spindle ratio"))
913

    
914
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
915
                            help="Hypervisor and hypervisor options, in the"
916
                            " format hypervisor:option=value,option=value,...",
917
                            default=None, type="identkeyval")
918

    
919
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
920
                        help="Hypervisor and hypervisor options, in the"
921
                        " format hypervisor:option=value,option=value,...",
922
                        default=[], action="append", type="identkeyval")
923

    
924
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
925
                           action="store_false",
926
                           help="Don't check that the instance's IP"
927
                           " is alive")
928

    
929
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
930
                             default=True, action="store_false",
931
                             help="Don't check that the instance's name"
932
                             " is resolvable")
933

    
934
NET_OPT = cli_option("--net",
935
                     help="NIC parameters", default=[],
936
                     dest="nics", action="append", type="identkeyval")
937

    
938
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
939
                      dest="disks", action="append", type="identkeyval")
940

    
941
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
942
                         help="Comma-separated list of disks"
943
                         " indices to act on (e.g. 0,2) (optional,"
944
                         " defaults to all disks)")
945

    
946
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
947
                         help="Enforces a single-disk configuration using the"
948
                         " given disk size, in MiB unless a suffix is used",
949
                         default=None, type="unit", metavar="<size>")
950

    
951
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
952
                                dest="ignore_consistency",
953
                                action="store_true", default=False,
954
                                help="Ignore the consistency of the disks on"
955
                                " the secondary")
956

    
957
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
958
                                dest="allow_failover",
959
                                action="store_true", default=False,
960
                                help="If migration is not possible fallback to"
961
                                     " failover")
962

    
963
NONLIVE_OPT = cli_option("--non-live", dest="live",
964
                         default=True, action="store_false",
965
                         help="Do a non-live migration (this usually means"
966
                         " freeze the instance, save the state, transfer and"
967
                         " only then resume running on the secondary node)")
968

    
969
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
970
                                default=None,
971
                                choices=list(constants.HT_MIGRATION_MODES),
972
                                help="Override default migration mode (choose"
973
                                " either live or non-live")
974

    
975
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
976
                                help="Target node and optional secondary node",
977
                                metavar="<pnode>[:<snode>]",
978
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
979

    
980
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
981
                           action="append", metavar="<node>",
982
                           help="Use only this node (can be used multiple"
983
                           " times, if not given defaults to all nodes)",
984
                           completion_suggest=OPT_COMPL_ONE_NODE)
985

    
986
NODEGROUP_OPT_NAME = "--node-group"
987
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
988
                           dest="nodegroup",
989
                           help="Node group (name or uuid)",
990
                           metavar="<nodegroup>",
991
                           default=None, type="string",
992
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
993

    
994
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
995
                             metavar="<node>",
996
                             completion_suggest=OPT_COMPL_ONE_NODE)
997

    
998
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
999
                         action="store_false",
1000
                         help="Don't start the instance after creation")
1001

    
1002
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1003
                         action="store_true", default=False,
1004
                         help="Show command instead of executing it")
1005

    
1006
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1007
                         default=False, action="store_true",
1008
                         help="Instead of performing the migration, try to"
1009
                         " recover from a failed cleanup. This is safe"
1010
                         " to run even if the instance is healthy, but it"
1011
                         " will create extra replication traffic and "
1012
                         " disrupt briefly the replication (like during the"
1013
                         " migration")
1014

    
1015
STATIC_OPT = cli_option("-s", "--static", dest="static",
1016
                        action="store_true", default=False,
1017
                        help="Only show configuration data, not runtime data")
1018

    
1019
ALL_OPT = cli_option("--all", dest="show_all",
1020
                     default=False, action="store_true",
1021
                     help="Show info on all instances on the cluster."
1022
                     " This can take a long time to run, use wisely")
1023

    
1024
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1025
                           action="store_true", default=False,
1026
                           help="Interactive OS reinstall, lists available"
1027
                           " OS templates for selection")
1028

    
1029
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1030
                                 action="store_true", default=False,
1031
                                 help="Remove the instance from the cluster"
1032
                                 " configuration even if there are failures"
1033
                                 " during the removal process")
1034

    
1035
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1036
                                        dest="ignore_remove_failures",
1037
                                        action="store_true", default=False,
1038
                                        help="Remove the instance from the"
1039
                                        " cluster configuration even if there"
1040
                                        " are failures during the removal"
1041
                                        " process")
1042

    
1043
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1044
                                 action="store_true", default=False,
1045
                                 help="Remove the instance from the cluster")
1046

    
1047
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1048
                               help="Specifies the new node for the instance",
1049
                               metavar="NODE", default=None,
1050
                               completion_suggest=OPT_COMPL_ONE_NODE)
1051

    
1052
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1053
                               help="Specifies the new secondary node",
1054
                               metavar="NODE", default=None,
1055
                               completion_suggest=OPT_COMPL_ONE_NODE)
1056

    
1057
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1058
                            default=False, action="store_true",
1059
                            help="Replace the disk(s) on the primary"
1060
                                 " node (applies only to internally mirrored"
1061
                                 " disk templates, e.g. %s)" %
1062
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1063

    
1064
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1065
                              default=False, action="store_true",
1066
                              help="Replace the disk(s) on the secondary"
1067
                                   " node (applies only to internally mirrored"
1068
                                   " disk templates, e.g. %s)" %
1069
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1070

    
1071
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1072
                              default=False, action="store_true",
1073
                              help="Lock all nodes and auto-promote as needed"
1074
                              " to MC status")
1075

    
1076
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1077
                              default=False, action="store_true",
1078
                              help="Automatically replace faulty disks"
1079
                                   " (applies only to internally mirrored"
1080
                                   " disk templates, e.g. %s)" %
1081
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1082

    
1083
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1084
                             default=False, action="store_true",
1085
                             help="Ignore current recorded size"
1086
                             " (useful for forcing activation when"
1087
                             " the recorded size is wrong)")
1088

    
1089
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1090
                          metavar="<node>",
1091
                          completion_suggest=OPT_COMPL_ONE_NODE)
1092

    
1093
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1094
                         metavar="<dir>")
1095

    
1096
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1097
                              help="Specify the secondary ip for the node",
1098
                              metavar="ADDRESS", default=None)
1099

    
1100
READD_OPT = cli_option("--readd", dest="readd",
1101
                       default=False, action="store_true",
1102
                       help="Readd old node after replacing it")
1103

    
1104
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1105
                                default=True, action="store_false",
1106
                                help="Disable SSH key fingerprint checking")
1107

    
1108
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1109
                                 default=False, action="store_true",
1110
                                 help="Force the joining of a node")
1111

    
1112
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1113
                    type="bool", default=None, metavar=_YORNO,
1114
                    help="Set the master_candidate flag on the node")
1115

    
1116
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1117
                         type="bool", default=None,
1118
                         help=("Set the offline flag on the node"
1119
                               " (cluster does not communicate with offline"
1120
                               " nodes)"))
1121

    
1122
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1123
                         type="bool", default=None,
1124
                         help=("Set the drained flag on the node"
1125
                               " (excluded from allocation operations)"))
1126

    
1127
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1128
                              type="bool", default=None, metavar=_YORNO,
1129
                              help="Set the master_capable flag on the node")
1130

    
1131
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1132
                          type="bool", default=None, metavar=_YORNO,
1133
                          help="Set the vm_capable flag on the node")
1134

    
1135
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1136
                             type="bool", default=None, metavar=_YORNO,
1137
                             help="Set the allocatable flag on a volume")
1138

    
1139
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1140
                               help="Disable support for lvm based instances"
1141
                               " (cluster-wide)",
1142
                               action="store_false", default=True)
1143

    
1144
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1145
                            dest="enabled_hypervisors",
1146
                            help="Comma-separated list of hypervisors",
1147
                            type="string", default=None)
1148

    
1149
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1150
                            type="keyval", default={},
1151
                            help="NIC parameters")
1152

    
1153
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1154
                         dest="candidate_pool_size", type="int",
1155
                         help="Set the candidate pool size")
1156

    
1157
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1158
                         help=("Enables LVM and specifies the volume group"
1159
                               " name (cluster-wide) for disk allocation"
1160
                               " [%s]" % constants.DEFAULT_VG),
1161
                         metavar="VG", default=None)
1162

    
1163
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1164
                          help="Destroy cluster", action="store_true")
1165

    
1166
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1167
                          help="Skip node agreement check (dangerous)",
1168
                          action="store_true", default=False)
1169

    
1170
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1171
                            help="Specify the mac prefix for the instance IP"
1172
                            " addresses, in the format XX:XX:XX",
1173
                            metavar="PREFIX",
1174
                            default=None)
1175

    
1176
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1177
                               help="Specify the node interface (cluster-wide)"
1178
                               " on which the master IP address will be added"
1179
                               " (cluster init default: %s)" %
1180
                               constants.DEFAULT_BRIDGE,
1181
                               metavar="NETDEV",
1182
                               default=None)
1183

    
1184
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1185
                                help="Specify the netmask of the master IP",
1186
                                metavar="NETMASK",
1187
                                default=None)
1188

    
1189
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1190
                                     dest="use_external_mip_script",
1191
                                     help="Specify whether to run a"
1192
                                     " user-provided script for the master"
1193
                                     " IP address turnup and"
1194
                                     " turndown operations",
1195
                                     type="bool", metavar=_YORNO, default=None)
1196

    
1197
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1198
                                help="Specify the default directory (cluster-"
1199
                                "wide) for storing the file-based disks [%s]" %
1200
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1201
                                metavar="DIR",
1202
                                default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1203

    
1204
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1205
  "--shared-file-storage-dir",
1206
  dest="shared_file_storage_dir",
1207
  help="Specify the default directory (cluster-wide) for storing the"
1208
  " shared file-based disks [%s]" %
1209
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1210
  metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1211

    
1212
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1213
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1214
                                   action="store_false", default=True)
1215

    
1216
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1217
                                    help="Don't initialize SSH keys",
1218
                                    action="store_false", default=True)
1219

    
1220
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1221
                             help="Enable parseable error messages",
1222
                             action="store_true", default=False)
1223

    
1224
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1225
                          help="Skip N+1 memory redundancy tests",
1226
                          action="store_true", default=False)
1227

    
1228
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1229
                             help="Type of reboot: soft/hard/full",
1230
                             default=constants.INSTANCE_REBOOT_HARD,
1231
                             metavar="<REBOOT>",
1232
                             choices=list(constants.REBOOT_TYPES))
1233

    
1234
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1235
                                    dest="ignore_secondaries",
1236
                                    default=False, action="store_true",
1237
                                    help="Ignore errors from secondaries")
1238

    
1239
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1240
                            action="store_false", default=True,
1241
                            help="Don't shutdown the instance (unsafe)")
1242

    
1243
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1244
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1245
                         help="Maximum time to wait")
1246

    
1247
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1248
                                  dest="shutdown_timeout", type="int",
1249
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1250
                                  help="Maximum time to wait for instance"
1251
                                  " shutdown")
1252

    
1253
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1254
                          default=None,
1255
                          help=("Number of seconds between repetions of the"
1256
                                " command"))
1257

    
1258
EARLY_RELEASE_OPT = cli_option("--early-release",
1259
                               dest="early_release", default=False,
1260
                               action="store_true",
1261
                               help="Release the locks on the secondary"
1262
                               " node(s) early")
1263

    
1264
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1265
                                  dest="new_cluster_cert",
1266
                                  default=False, action="store_true",
1267
                                  help="Generate a new cluster certificate")
1268

    
1269
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1270
                           default=None,
1271
                           help="File containing new RAPI certificate")
1272

    
1273
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1274
                               default=None, action="store_true",
1275
                               help=("Generate a new self-signed RAPI"
1276
                                     " certificate"))
1277

    
1278
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1279
                            default=None,
1280
                            help="File containing new SPICE certificate")
1281

    
1282
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1283
                              default=None,
1284
                              help="File containing the certificate of the CA"
1285
                              " which signed the SPICE certificate")
1286

    
1287
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1288
                                dest="new_spice_cert", default=None,
1289
                                action="store_true",
1290
                                help=("Generate a new self-signed SPICE"
1291
                                      " certificate"))
1292

    
1293
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1294
                                    dest="new_confd_hmac_key",
1295
                                    default=False, action="store_true",
1296
                                    help=("Create a new HMAC key for %s" %
1297
                                          constants.CONFD))
1298

    
1299
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1300
                                       dest="cluster_domain_secret",
1301
                                       default=None,
1302
                                       help=("Load new new cluster domain"
1303
                                             " secret from file"))
1304

    
1305
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1306
                                           dest="new_cluster_domain_secret",
1307
                                           default=False, action="store_true",
1308
                                           help=("Create a new cluster domain"
1309
                                                 " secret"))
1310

    
1311
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1312
                              dest="use_replication_network",
1313
                              help="Whether to use the replication network"
1314
                              " for talking to the nodes",
1315
                              action="store_true", default=False)
1316

    
1317
MAINTAIN_NODE_HEALTH_OPT = \
1318
    cli_option("--maintain-node-health", dest="maintain_node_health",
1319
               metavar=_YORNO, default=None, type="bool",
1320
               help="Configure the cluster to automatically maintain node"
1321
               " health, by shutting down unknown instances, shutting down"
1322
               " unknown DRBD devices, etc.")
1323

    
1324
IDENTIFY_DEFAULTS_OPT = \
1325
    cli_option("--identify-defaults", dest="identify_defaults",
1326
               default=False, action="store_true",
1327
               help="Identify which saved instance parameters are equal to"
1328
               " the current cluster defaults and set them as such, instead"
1329
               " of marking them as overridden")
1330

    
1331
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1332
                         action="store", dest="uid_pool",
1333
                         help=("A list of user-ids or user-id"
1334
                               " ranges separated by commas"))
1335

    
1336
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1337
                          action="store", dest="add_uids",
1338
                          help=("A list of user-ids or user-id"
1339
                                " ranges separated by commas, to be"
1340
                                " added to the user-id pool"))
1341

    
1342
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1343
                             action="store", dest="remove_uids",
1344
                             help=("A list of user-ids or user-id"
1345
                                   " ranges separated by commas, to be"
1346
                                   " removed from the user-id pool"))
1347

    
1348
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1349
                              action="store", dest="reserved_lvs",
1350
                              help=("A comma-separated list of reserved"
1351
                                    " logical volumes names, that will be"
1352
                                    " ignored by cluster verify"))
1353

    
1354
ROMAN_OPT = cli_option("--roman",
1355
                       dest="roman_integers", default=False,
1356
                       action="store_true",
1357
                       help="Use roman numbers for positive integers")
1358

    
1359
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1360
                             action="store", default=None,
1361
                             help="Specifies usermode helper for DRBD")
1362

    
1363
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1364
                                action="store_false", default=True,
1365
                                help="Disable support for DRBD")
1366

    
1367
PRIMARY_IP_VERSION_OPT = \
1368
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1369
               action="store", dest="primary_ip_version",
1370
               metavar="%d|%d" % (constants.IP4_VERSION,
1371
                                  constants.IP6_VERSION),
1372
               help="Cluster-wide IP version for primary IP")
1373

    
1374
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1375
                              action="store_true",
1376
                              help="Show machine name for every line in output")
1377

    
1378
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1379
                              action="store_true",
1380
                              help=("Hide successful results and show failures"
1381
                                    " only (determined by the exit code)"))
1382

    
1383

    
1384
def _PriorityOptionCb(option, _, value, parser):
1385
  """Callback for processing C{--priority} option.
1386

1387
  """
1388
  value = _PRIONAME_TO_VALUE[value]
1389

    
1390
  setattr(parser.values, option.dest, value)
1391

    
1392

    
1393
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1394
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1395
                          choices=_PRIONAME_TO_VALUE.keys(),
1396
                          action="callback", type="choice",
1397
                          callback=_PriorityOptionCb,
1398
                          help="Priority for opcode processing")
1399

    
1400
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1401
                        type="bool", default=None, metavar=_YORNO,
1402
                        help="Sets the hidden flag on the OS")
1403

    
1404
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1405
                        type="bool", default=None, metavar=_YORNO,
1406
                        help="Sets the blacklisted flag on the OS")
1407

    
1408
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1409
                                     type="bool", metavar=_YORNO,
1410
                                     dest="prealloc_wipe_disks",
1411
                                     help=("Wipe disks prior to instance"
1412
                                           " creation"))
1413

    
1414
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1415
                             type="keyval", default=None,
1416
                             help="Node parameters")
1417

    
1418
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1419
                              action="store", metavar="POLICY", default=None,
1420
                              help="Allocation policy for the node group")
1421

    
1422
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1423
                              type="bool", metavar=_YORNO,
1424
                              dest="node_powered",
1425
                              help="Specify if the SoR for node is powered")
1426

    
1427
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1428
                             default=constants.OOB_TIMEOUT,
1429
                             help="Maximum time to wait for out-of-band helper")
1430

    
1431
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1432
                             default=constants.OOB_POWER_DELAY,
1433
                             help="Time in seconds to wait between power-ons")
1434

    
1435
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1436
                              action="store_true", default=False,
1437
                              help=("Whether command argument should be treated"
1438
                                    " as filter"))
1439

    
1440
NO_REMEMBER_OPT = cli_option("--no-remember",
1441
                             dest="no_remember",
1442
                             action="store_true", default=False,
1443
                             help="Perform but do not record the change"
1444
                             " in the configuration")
1445

    
1446
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1447
                              default=False, action="store_true",
1448
                              help="Evacuate primary instances only")
1449

    
1450
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1451
                                default=False, action="store_true",
1452
                                help="Evacuate secondary instances only"
1453
                                     " (applies only to internally mirrored"
1454
                                     " disk templates, e.g. %s)" %
1455
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1456

    
1457
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1458
                                action="store_true", default=False,
1459
                                help="Pause instance at startup")
1460

    
1461
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1462
                          help="Destination node group (name or uuid)",
1463
                          default=None, action="append",
1464
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1465

    
1466
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1467
                               action="append", dest="ignore_errors",
1468
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1469
                               help="Error code to be ignored")
1470

    
1471
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1472
                            action="append",
1473
                            help=("Specify disk state information in the"
1474
                                  " format"
1475
                                  " storage_type/identifier:option=value,...;"
1476
                                  " note this is unused for now"),
1477
                            type="identkeyval")
1478

    
1479
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1480
                          action="append",
1481
                          help=("Specify hypervisor state information in the"
1482
                                " format hypervisor:option=value,...;"
1483
                                " note this is unused for now"),
1484
                          type="identkeyval")
1485

    
1486
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1487
                                action="store_true", default=False,
1488
                                help="Ignore instance policy violations")
1489

    
1490
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1491
                             help="Sets the instance's runtime memory,"
1492
                             " ballooning it up or down to the new value",
1493
                             default=None, type="unit", metavar="<size>")
1494

    
1495
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1496
                          action="store_true", default=False,
1497
                          help="Marks the grow as absolute instead of the"
1498
                          " (default) relative mode")
1499

    
1500
NETWORK_OPT = cli_option("--network",
1501
                         action="store", default=None, dest="network",
1502
                         help="IP network in CIDR notation")
1503

    
1504
GATEWAY_OPT = cli_option("--gateway",
1505
                         action="store", default=None, dest="gateway",
1506
                         help="IP address of the router (gateway)")
1507

    
1508
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1509
                                  action="store", default=None,
1510
                                  dest="add_reserved_ips",
1511
                                  help="Comma-separated list of"
1512
                                  " reserved IPs to add")
1513

    
1514
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1515
                                     action="store", default=None,
1516
                                     dest="remove_reserved_ips",
1517
                                     help="Comma-delimited list of"
1518
                                     " reserved IPs to remove")
1519

    
1520
NETWORK_TYPE_OPT = cli_option("--network-type",
1521
                              action="store", default=None, dest="network_type",
1522
                              help="Network type: private, public, None")
1523

    
1524
NETWORK6_OPT = cli_option("--network6",
1525
                          action="store", default=None, dest="network6",
1526
                          help="IP network in CIDR notation")
1527

    
1528
GATEWAY6_OPT = cli_option("--gateway6",
1529
                          action="store", default=None, dest="gateway6",
1530
                          help="IP6 address of the router (gateway)")
1531

    
1532
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1533
                                  dest="conflicts_check",
1534
                                  default=True,
1535
                                  action="store_false",
1536
                                  help="Don't check for conflicting IPs")
1537

    
1538
#: Options provided by all commands
1539
COMMON_OPTS = [DEBUG_OPT]
1540

    
1541
# common options for creating instances. add and import then add their own
1542
# specific ones.
1543
COMMON_CREATE_OPTS = [
1544
  BACKEND_OPT,
1545
  DISK_OPT,
1546
  DISK_TEMPLATE_OPT,
1547
  FILESTORE_DIR_OPT,
1548
  FILESTORE_DRIVER_OPT,
1549
  HYPERVISOR_OPT,
1550
  IALLOCATOR_OPT,
1551
  NET_OPT,
1552
  NODE_PLACEMENT_OPT,
1553
  NOIPCHECK_OPT,
1554
  NOCONFLICTSCHECK_OPT,
1555
  NONAMECHECK_OPT,
1556
  NONICS_OPT,
1557
  NWSYNC_OPT,
1558
  OSPARAMS_OPT,
1559
  OS_SIZE_OPT,
1560
  SUBMIT_OPT,
1561
  TAG_ADD_OPT,
1562
  DRY_RUN_OPT,
1563
  PRIORITY_OPT,
1564
  ]
1565

    
1566
# common instance policy options
1567
INSTANCE_POLICY_OPTS = [
1568
  SPECS_CPU_COUNT_OPT,
1569
  SPECS_DISK_COUNT_OPT,
1570
  SPECS_DISK_SIZE_OPT,
1571
  SPECS_MEM_SIZE_OPT,
1572
  SPECS_NIC_COUNT_OPT,
1573
  IPOLICY_DISK_TEMPLATES,
1574
  IPOLICY_VCPU_RATIO,
1575
  IPOLICY_SPINDLE_RATIO,
1576
  ]
1577

    
1578

    
1579
class _ShowUsage(Exception):
1580
  """Exception class for L{_ParseArgs}.
1581

1582
  """
1583
  def __init__(self, exit_error):
1584
    """Initializes instances of this class.
1585

1586
    @type exit_error: bool
1587
    @param exit_error: Whether to report failure on exit
1588

1589
    """
1590
    Exception.__init__(self)
1591
    self.exit_error = exit_error
1592

    
1593

    
1594
class _ShowVersion(Exception):
1595
  """Exception class for L{_ParseArgs}.
1596

1597
  """
1598

    
1599

    
1600
def _ParseArgs(binary, argv, commands, aliases, env_override):
1601
  """Parser for the command line arguments.
1602

1603
  This function parses the arguments and returns the function which
1604
  must be executed together with its (modified) arguments.
1605

1606
  @param binary: Script name
1607
  @param argv: Command line arguments
1608
  @param commands: Dictionary containing command definitions
1609
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1610
  @param env_override: list of env variables allowed for default args
1611
  @raise _ShowUsage: If usage description should be shown
1612
  @raise _ShowVersion: If version should be shown
1613

1614
  """
1615
  assert not (env_override - set(commands))
1616
  assert not (set(aliases.keys()) & set(commands.keys()))
1617

    
1618
  if len(argv) > 1:
1619
    cmd = argv[1]
1620
  else:
1621
    # No option or command given
1622
    raise _ShowUsage(exit_error=True)
1623

    
1624
  if cmd == "--version":
1625
    raise _ShowVersion()
1626
  elif cmd == "--help":
1627
    raise _ShowUsage(exit_error=False)
1628
  elif not (cmd in commands or cmd in aliases):
1629
    raise _ShowUsage(exit_error=True)
1630

    
1631
  # get command, unalias it, and look it up in commands
1632
  if cmd in aliases:
1633
    if aliases[cmd] not in commands:
1634
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1635
                                   " command '%s'" % (cmd, aliases[cmd]))
1636

    
1637
    cmd = aliases[cmd]
1638

    
1639
  if cmd in env_override:
1640
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1641
    env_args = os.environ.get(args_env_name)
1642
    if env_args:
1643
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1644

    
1645
  func, args_def, parser_opts, usage, description = commands[cmd]
1646
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1647
                        description=description,
1648
                        formatter=TitledHelpFormatter(),
1649
                        usage="%%prog %s %s" % (cmd, usage))
1650
  parser.disable_interspersed_args()
1651
  options, args = parser.parse_args(args=argv[2:])
1652

    
1653
  if not _CheckArguments(cmd, args_def, args):
1654
    return None, None, None
1655

    
1656
  return func, options, args
1657

    
1658

    
1659
def _FormatUsage(binary, commands):
1660
  """Generates a nice description of all commands.
1661

1662
  @param binary: Script name
1663
  @param commands: Dictionary containing command definitions
1664

1665
  """
1666
  # compute the max line length for cmd + usage
1667
  mlen = min(60, max(map(len, commands)))
1668

    
1669
  yield "Usage: %s {command} [options...] [argument...]" % binary
1670
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1671
  yield ""
1672
  yield "Commands:"
1673

    
1674
  # and format a nice command list
1675
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1676
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1677
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1678
    for line in help_lines:
1679
      yield " %-*s   %s" % (mlen, "", line)
1680

    
1681
  yield ""
1682

    
1683

    
1684
def _CheckArguments(cmd, args_def, args):
1685
  """Verifies the arguments using the argument definition.
1686

1687
  Algorithm:
1688

1689
    1. Abort with error if values specified by user but none expected.
1690

1691
    1. For each argument in definition
1692

1693
      1. Keep running count of minimum number of values (min_count)
1694
      1. Keep running count of maximum number of values (max_count)
1695
      1. If it has an unlimited number of values
1696

1697
        1. Abort with error if it's not the last argument in the definition
1698

1699
    1. If last argument has limited number of values
1700

1701
      1. Abort with error if number of values doesn't match or is too large
1702

1703
    1. Abort with error if user didn't pass enough values (min_count)
1704

1705
  """
1706
  if args and not args_def:
1707
    ToStderr("Error: Command %s expects no arguments", cmd)
1708
    return False
1709

    
1710
  min_count = None
1711
  max_count = None
1712
  check_max = None
1713

    
1714
  last_idx = len(args_def) - 1
1715

    
1716
  for idx, arg in enumerate(args_def):
1717
    if min_count is None:
1718
      min_count = arg.min
1719
    elif arg.min is not None:
1720
      min_count += arg.min
1721

    
1722
    if max_count is None:
1723
      max_count = arg.max
1724
    elif arg.max is not None:
1725
      max_count += arg.max
1726

    
1727
    if idx == last_idx:
1728
      check_max = (arg.max is not None)
1729

    
1730
    elif arg.max is None:
1731
      raise errors.ProgrammerError("Only the last argument can have max=None")
1732

    
1733
  if check_max:
1734
    # Command with exact number of arguments
1735
    if (min_count is not None and max_count is not None and
1736
        min_count == max_count and len(args) != min_count):
1737
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1738
      return False
1739

    
1740
    # Command with limited number of arguments
1741
    if max_count is not None and len(args) > max_count:
1742
      ToStderr("Error: Command %s expects only %d argument(s)",
1743
               cmd, max_count)
1744
      return False
1745

    
1746
  # Command with some required arguments
1747
  if min_count is not None and len(args) < min_count:
1748
    ToStderr("Error: Command %s expects at least %d argument(s)",
1749
             cmd, min_count)
1750
    return False
1751

    
1752
  return True
1753

    
1754

    
1755
def SplitNodeOption(value):
1756
  """Splits the value of a --node option.
1757

1758
  """
1759
  if value and ":" in value:
1760
    return value.split(":", 1)
1761
  else:
1762
    return (value, None)
1763

    
1764

    
1765
def CalculateOSNames(os_name, os_variants):
1766
  """Calculates all the names an OS can be called, according to its variants.
1767

1768
  @type os_name: string
1769
  @param os_name: base name of the os
1770
  @type os_variants: list or None
1771
  @param os_variants: list of supported variants
1772
  @rtype: list
1773
  @return: list of valid names
1774

1775
  """
1776
  if os_variants:
1777
    return ["%s+%s" % (os_name, v) for v in os_variants]
1778
  else:
1779
    return [os_name]
1780

    
1781

    
1782
def ParseFields(selected, default):
1783
  """Parses the values of "--field"-like options.
1784

1785
  @type selected: string or None
1786
  @param selected: User-selected options
1787
  @type default: list
1788
  @param default: Default fields
1789

1790
  """
1791
  if selected is None:
1792
    return default
1793

    
1794
  if selected.startswith("+"):
1795
    return default + selected[1:].split(",")
1796

    
1797
  return selected.split(",")
1798

    
1799

    
1800
UsesRPC = rpc.RunWithRPC
1801

    
1802

    
1803
def AskUser(text, choices=None):
1804
  """Ask the user a question.
1805

1806
  @param text: the question to ask
1807

1808
  @param choices: list with elements tuples (input_char, return_value,
1809
      description); if not given, it will default to: [('y', True,
1810
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1811
      note that the '?' char is reserved for help
1812

1813
  @return: one of the return values from the choices list; if input is
1814
      not possible (i.e. not running with a tty, we return the last
1815
      entry from the list
1816

1817
  """
1818
  if choices is None:
1819
    choices = [("y", True, "Perform the operation"),
1820
               ("n", False, "Do not perform the operation")]
1821
  if not choices or not isinstance(choices, list):
1822
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1823
  for entry in choices:
1824
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1825
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1826

    
1827
  answer = choices[-1][1]
1828
  new_text = []
1829
  for line in text.splitlines():
1830
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1831
  text = "\n".join(new_text)
1832
  try:
1833
    f = file("/dev/tty", "a+")
1834
  except IOError:
1835
    return answer
1836
  try:
1837
    chars = [entry[0] for entry in choices]
1838
    chars[-1] = "[%s]" % chars[-1]
1839
    chars.append("?")
1840
    maps = dict([(entry[0], entry[1]) for entry in choices])
1841
    while True:
1842
      f.write(text)
1843
      f.write("\n")
1844
      f.write("/".join(chars))
1845
      f.write(": ")
1846
      line = f.readline(2).strip().lower()
1847
      if line in maps:
1848
        answer = maps[line]
1849
        break
1850
      elif line == "?":
1851
        for entry in choices:
1852
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1853
        f.write("\n")
1854
        continue
1855
  finally:
1856
    f.close()
1857
  return answer
1858

    
1859

    
1860
class JobSubmittedException(Exception):
1861
  """Job was submitted, client should exit.
1862

1863
  This exception has one argument, the ID of the job that was
1864
  submitted. The handler should print this ID.
1865

1866
  This is not an error, just a structured way to exit from clients.
1867

1868
  """
1869

    
1870

    
1871
def SendJob(ops, cl=None):
1872
  """Function to submit an opcode without waiting for the results.
1873

1874
  @type ops: list
1875
  @param ops: list of opcodes
1876
  @type cl: luxi.Client
1877
  @param cl: the luxi client to use for communicating with the master;
1878
             if None, a new client will be created
1879

1880
  """
1881
  if cl is None:
1882
    cl = GetClient()
1883

    
1884
  job_id = cl.SubmitJob(ops)
1885

    
1886
  return job_id
1887

    
1888

    
1889
def GenericPollJob(job_id, cbs, report_cbs):
1890
  """Generic job-polling function.
1891

1892
  @type job_id: number
1893
  @param job_id: Job ID
1894
  @type cbs: Instance of L{JobPollCbBase}
1895
  @param cbs: Data callbacks
1896
  @type report_cbs: Instance of L{JobPollReportCbBase}
1897
  @param report_cbs: Reporting callbacks
1898

1899
  """
1900
  prev_job_info = None
1901
  prev_logmsg_serial = None
1902

    
1903
  status = None
1904

    
1905
  while True:
1906
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1907
                                      prev_logmsg_serial)
1908
    if not result:
1909
      # job not found, go away!
1910
      raise errors.JobLost("Job with id %s lost" % job_id)
1911

    
1912
    if result == constants.JOB_NOTCHANGED:
1913
      report_cbs.ReportNotChanged(job_id, status)
1914

    
1915
      # Wait again
1916
      continue
1917

    
1918
    # Split result, a tuple of (field values, log entries)
1919
    (job_info, log_entries) = result
1920
    (status, ) = job_info
1921

    
1922
    if log_entries:
1923
      for log_entry in log_entries:
1924
        (serial, timestamp, log_type, message) = log_entry
1925
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1926
                                    log_type, message)
1927
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1928

    
1929
    # TODO: Handle canceled and archived jobs
1930
    elif status in (constants.JOB_STATUS_SUCCESS,
1931
                    constants.JOB_STATUS_ERROR,
1932
                    constants.JOB_STATUS_CANCELING,
1933
                    constants.JOB_STATUS_CANCELED):
1934
      break
1935

    
1936
    prev_job_info = job_info
1937

    
1938
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1939
  if not jobs:
1940
    raise errors.JobLost("Job with id %s lost" % job_id)
1941

    
1942
  status, opstatus, result = jobs[0]
1943

    
1944
  if status == constants.JOB_STATUS_SUCCESS:
1945
    return result
1946

    
1947
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1948
    raise errors.OpExecError("Job was canceled")
1949

    
1950
  has_ok = False
1951
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1952
    if status == constants.OP_STATUS_SUCCESS:
1953
      has_ok = True
1954
    elif status == constants.OP_STATUS_ERROR:
1955
      errors.MaybeRaise(msg)
1956

    
1957
      if has_ok:
1958
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1959
                                 (idx, msg))
1960

    
1961
      raise errors.OpExecError(str(msg))
1962

    
1963
  # default failure mode
1964
  raise errors.OpExecError(result)
1965

    
1966

    
1967
class JobPollCbBase:
1968
  """Base class for L{GenericPollJob} callbacks.
1969

1970
  """
1971
  def __init__(self):
1972
    """Initializes this class.
1973

1974
    """
1975

    
1976
  def WaitForJobChangeOnce(self, job_id, fields,
1977
                           prev_job_info, prev_log_serial):
1978
    """Waits for changes on a job.
1979

1980
    """
1981
    raise NotImplementedError()
1982

    
1983
  def QueryJobs(self, job_ids, fields):
1984
    """Returns the selected fields for the selected job IDs.
1985

1986
    @type job_ids: list of numbers
1987
    @param job_ids: Job IDs
1988
    @type fields: list of strings
1989
    @param fields: Fields
1990

1991
    """
1992
    raise NotImplementedError()
1993

    
1994

    
1995
class JobPollReportCbBase:
1996
  """Base class for L{GenericPollJob} reporting callbacks.
1997

1998
  """
1999
  def __init__(self):
2000
    """Initializes this class.
2001

2002
    """
2003

    
2004
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2005
    """Handles a log message.
2006

2007
    """
2008
    raise NotImplementedError()
2009

    
2010
  def ReportNotChanged(self, job_id, status):
2011
    """Called for if a job hasn't changed in a while.
2012

2013
    @type job_id: number
2014
    @param job_id: Job ID
2015
    @type status: string or None
2016
    @param status: Job status if available
2017

2018
    """
2019
    raise NotImplementedError()
2020

    
2021

    
2022
class _LuxiJobPollCb(JobPollCbBase):
2023
  def __init__(self, cl):
2024
    """Initializes this class.
2025

2026
    """
2027
    JobPollCbBase.__init__(self)
2028
    self.cl = cl
2029

    
2030
  def WaitForJobChangeOnce(self, job_id, fields,
2031
                           prev_job_info, prev_log_serial):
2032
    """Waits for changes on a job.
2033

2034
    """
2035
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2036
                                        prev_job_info, prev_log_serial)
2037

    
2038
  def QueryJobs(self, job_ids, fields):
2039
    """Returns the selected fields for the selected job IDs.
2040

2041
    """
2042
    return self.cl.QueryJobs(job_ids, fields)
2043

    
2044

    
2045
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2046
  def __init__(self, feedback_fn):
2047
    """Initializes this class.
2048

2049
    """
2050
    JobPollReportCbBase.__init__(self)
2051

    
2052
    self.feedback_fn = feedback_fn
2053

    
2054
    assert callable(feedback_fn)
2055

    
2056
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2057
    """Handles a log message.
2058

2059
    """
2060
    self.feedback_fn((timestamp, log_type, log_msg))
2061

    
2062
  def ReportNotChanged(self, job_id, status):
2063
    """Called if a job hasn't changed in a while.
2064

2065
    """
2066
    # Ignore
2067

    
2068

    
2069
class StdioJobPollReportCb(JobPollReportCbBase):
2070
  def __init__(self):
2071
    """Initializes this class.
2072

2073
    """
2074
    JobPollReportCbBase.__init__(self)
2075

    
2076
    self.notified_queued = False
2077
    self.notified_waitlock = False
2078

    
2079
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2080
    """Handles a log message.
2081

2082
    """
2083
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2084
             FormatLogMessage(log_type, log_msg))
2085

    
2086
  def ReportNotChanged(self, job_id, status):
2087
    """Called if a job hasn't changed in a while.
2088

2089
    """
2090
    if status is None:
2091
      return
2092

    
2093
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2094
      ToStderr("Job %s is waiting in queue", job_id)
2095
      self.notified_queued = True
2096

    
2097
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2098
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2099
      self.notified_waitlock = True
2100

    
2101

    
2102
def FormatLogMessage(log_type, log_msg):
2103
  """Formats a job message according to its type.
2104

2105
  """
2106
  if log_type != constants.ELOG_MESSAGE:
2107
    log_msg = str(log_msg)
2108

    
2109
  return utils.SafeEncode(log_msg)
2110

    
2111

    
2112
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2113
  """Function to poll for the result of a job.
2114

2115
  @type job_id: job identified
2116
  @param job_id: the job to poll for results
2117
  @type cl: luxi.Client
2118
  @param cl: the luxi client to use for communicating with the master;
2119
             if None, a new client will be created
2120

2121
  """
2122
  if cl is None:
2123
    cl = GetClient()
2124

    
2125
  if reporter is None:
2126
    if feedback_fn:
2127
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2128
    else:
2129
      reporter = StdioJobPollReportCb()
2130
  elif feedback_fn:
2131
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2132

    
2133
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2134

    
2135

    
2136
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2137
  """Legacy function to submit an opcode.
2138

2139
  This is just a simple wrapper over the construction of the processor
2140
  instance. It should be extended to better handle feedback and
2141
  interaction functions.
2142

2143
  """
2144
  if cl is None:
2145
    cl = GetClient()
2146

    
2147
  SetGenericOpcodeOpts([op], opts)
2148

    
2149
  job_id = SendJob([op], cl=cl)
2150

    
2151
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2152
                       reporter=reporter)
2153

    
2154
  return op_results[0]
2155

    
2156

    
2157
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2158
  """Wrapper around SubmitOpCode or SendJob.
2159

2160
  This function will decide, based on the 'opts' parameter, whether to
2161
  submit and wait for the result of the opcode (and return it), or
2162
  whether to just send the job and print its identifier. It is used in
2163
  order to simplify the implementation of the '--submit' option.
2164

2165
  It will also process the opcodes if we're sending the via SendJob
2166
  (otherwise SubmitOpCode does it).
2167

2168
  """
2169
  if opts and opts.submit_only:
2170
    job = [op]
2171
    SetGenericOpcodeOpts(job, opts)
2172
    job_id = SendJob(job, cl=cl)
2173
    raise JobSubmittedException(job_id)
2174
  else:
2175
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2176

    
2177

    
2178
def SetGenericOpcodeOpts(opcode_list, options):
2179
  """Processor for generic options.
2180

2181
  This function updates the given opcodes based on generic command
2182
  line options (like debug, dry-run, etc.).
2183

2184
  @param opcode_list: list of opcodes
2185
  @param options: command line options or None
2186
  @return: None (in-place modification)
2187

2188
  """
2189
  if not options:
2190
    return
2191
  for op in opcode_list:
2192
    op.debug_level = options.debug
2193
    if hasattr(options, "dry_run"):
2194
      op.dry_run = options.dry_run
2195
    if getattr(options, "priority", None) is not None:
2196
      op.priority = options.priority
2197

    
2198

    
2199
def GetClient(query=False):
2200
  """Connects to the a luxi socket and returns a client.
2201

2202
  @type query: boolean
2203
  @param query: this signifies that the client will only be
2204
      used for queries; if the build-time parameter
2205
      enable-split-queries is enabled, then the client will be
2206
      connected to the query socket instead of the masterd socket
2207

2208
  """
2209
  if query and constants.ENABLE_SPLIT_QUERY:
2210
    address = pathutils.QUERY_SOCKET
2211
  else:
2212
    address = None
2213
  # TODO: Cache object?
2214
  try:
2215
    client = luxi.Client(address=address)
2216
  except luxi.NoMasterError:
2217
    ss = ssconf.SimpleStore()
2218

    
2219
    # Try to read ssconf file
2220
    try:
2221
      ss.GetMasterNode()
2222
    except errors.ConfigurationError:
2223
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2224
                                 " not part of a cluster",
2225
                                 errors.ECODE_INVAL)
2226

    
2227
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2228
    if master != myself:
2229
      raise errors.OpPrereqError("This is not the master node, please connect"
2230
                                 " to node '%s' and rerun the command" %
2231
                                 master, errors.ECODE_INVAL)
2232
    raise
2233
  return client
2234

    
2235

    
2236
def FormatError(err):
2237
  """Return a formatted error message for a given error.
2238

2239
  This function takes an exception instance and returns a tuple
2240
  consisting of two values: first, the recommended exit code, and
2241
  second, a string describing the error message (not
2242
  newline-terminated).
2243

2244
  """
2245
  retcode = 1
2246
  obuf = StringIO()
2247
  msg = str(err)
2248
  if isinstance(err, errors.ConfigurationError):
2249
    txt = "Corrupt configuration file: %s" % msg
2250
    logging.error(txt)
2251
    obuf.write(txt + "\n")
2252
    obuf.write("Aborting.")
2253
    retcode = 2
2254
  elif isinstance(err, errors.HooksAbort):
2255
    obuf.write("Failure: hooks execution failed:\n")
2256
    for node, script, out in err.args[0]:
2257
      if out:
2258
        obuf.write("  node: %s, script: %s, output: %s\n" %
2259
                   (node, script, out))
2260
      else:
2261
        obuf.write("  node: %s, script: %s (no output)\n" %
2262
                   (node, script))
2263
  elif isinstance(err, errors.HooksFailure):
2264
    obuf.write("Failure: hooks general failure: %s" % msg)
2265
  elif isinstance(err, errors.ResolverError):
2266
    this_host = netutils.Hostname.GetSysName()
2267
    if err.args[0] == this_host:
2268
      msg = "Failure: can't resolve my own hostname ('%s')"
2269
    else:
2270
      msg = "Failure: can't resolve hostname '%s'"
2271
    obuf.write(msg % err.args[0])
2272
  elif isinstance(err, errors.OpPrereqError):
2273
    if len(err.args) == 2:
2274
      obuf.write("Failure: prerequisites not met for this"
2275
                 " operation:\nerror type: %s, error details:\n%s" %
2276
                 (err.args[1], err.args[0]))
2277
    else:
2278
      obuf.write("Failure: prerequisites not met for this"
2279
                 " operation:\n%s" % msg)
2280
  elif isinstance(err, errors.OpExecError):
2281
    obuf.write("Failure: command execution error:\n%s" % msg)
2282
  elif isinstance(err, errors.TagError):
2283
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2284
  elif isinstance(err, errors.JobQueueDrainError):
2285
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2286
               " accept new requests\n")
2287
  elif isinstance(err, errors.JobQueueFull):
2288
    obuf.write("Failure: the job queue is full and doesn't accept new"
2289
               " job submissions until old jobs are archived\n")
2290
  elif isinstance(err, errors.TypeEnforcementError):
2291
    obuf.write("Parameter Error: %s" % msg)
2292
  elif isinstance(err, errors.ParameterError):
2293
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2294
  elif isinstance(err, luxi.NoMasterError):
2295
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
2296
               " and listening for connections?")
2297
  elif isinstance(err, luxi.TimeoutError):
2298
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2299
               " been submitted and will continue to run even if the call"
2300
               " timed out. Useful commands in this situation are \"gnt-job"
2301
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2302
    obuf.write(msg)
2303
  elif isinstance(err, luxi.PermissionError):
2304
    obuf.write("It seems you don't have permissions to connect to the"
2305
               " master daemon.\nPlease retry as a different user.")
2306
  elif isinstance(err, luxi.ProtocolError):
2307
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2308
               "%s" % msg)
2309
  elif isinstance(err, errors.JobLost):
2310
    obuf.write("Error checking job status: %s" % msg)
2311
  elif isinstance(err, errors.QueryFilterParseError):
2312
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2313
    obuf.write("\n".join(err.GetDetails()))
2314
  elif isinstance(err, errors.GenericError):
2315
    obuf.write("Unhandled Ganeti error: %s" % msg)
2316
  elif isinstance(err, JobSubmittedException):
2317
    obuf.write("JobID: %s\n" % err.args[0])
2318
    retcode = 0
2319
  else:
2320
    obuf.write("Unhandled exception: %s" % msg)
2321
  return retcode, obuf.getvalue().rstrip("\n")
2322

    
2323

    
2324
def GenericMain(commands, override=None, aliases=None,
2325
                env_override=frozenset()):
2326
  """Generic main function for all the gnt-* commands.
2327

2328
  @param commands: a dictionary with a special structure, see the design doc
2329
                   for command line handling.
2330
  @param override: if not None, we expect a dictionary with keys that will
2331
                   override command line options; this can be used to pass
2332
                   options from the scripts to generic functions
2333
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2334
  @param env_override: list of environment names which are allowed to submit
2335
                       default args for commands
2336

2337
  """
2338
  # save the program name and the entire command line for later logging
2339
  if sys.argv:
2340
    binary = os.path.basename(sys.argv[0])
2341
    if not binary:
2342
      binary = sys.argv[0]
2343

    
2344
    if len(sys.argv) >= 2:
2345
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2346
    else:
2347
      logname = binary
2348

    
2349
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2350
  else:
2351
    binary = "<unknown program>"
2352
    cmdline = "<unknown>"
2353

    
2354
  if aliases is None:
2355
    aliases = {}
2356

    
2357
  try:
2358
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2359
                                       env_override)
2360
  except _ShowVersion:
2361
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2362
             constants.RELEASE_VERSION)
2363
    return constants.EXIT_SUCCESS
2364
  except _ShowUsage, err:
2365
    for line in _FormatUsage(binary, commands):
2366
      ToStdout(line)
2367

    
2368
    if err.exit_error:
2369
      return constants.EXIT_FAILURE
2370
    else:
2371
      return constants.EXIT_SUCCESS
2372
  except errors.ParameterError, err:
2373
    result, err_msg = FormatError(err)
2374
    ToStderr(err_msg)
2375
    return 1
2376

    
2377
  if func is None: # parse error
2378
    return 1
2379

    
2380
  if override is not None:
2381
    for key, val in override.iteritems():
2382
      setattr(options, key, val)
2383

    
2384
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2385
                     stderr_logging=True)
2386

    
2387
  logging.info("Command line: %s", cmdline)
2388

    
2389
  try:
2390
    result = func(options, args)
2391
  except (errors.GenericError, luxi.ProtocolError,
2392
          JobSubmittedException), err:
2393
    result, err_msg = FormatError(err)
2394
    logging.exception("Error during command processing")
2395
    ToStderr(err_msg)
2396
  except KeyboardInterrupt:
2397
    result = constants.EXIT_FAILURE
2398
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2399
             " might have been submitted and"
2400
             " will continue to run in the background.")
2401
  except IOError, err:
2402
    if err.errno == errno.EPIPE:
2403
      # our terminal went away, we'll exit
2404
      sys.exit(constants.EXIT_FAILURE)
2405
    else:
2406
      raise
2407

    
2408
  return result
2409

    
2410

    
2411
def ParseNicOption(optvalue):
2412
  """Parses the value of the --net option(s).
2413

2414
  """
2415
  try:
2416
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2417
  except (TypeError, ValueError), err:
2418
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2419
                               errors.ECODE_INVAL)
2420

    
2421
  nics = [{}] * nic_max
2422
  for nidx, ndict in optvalue:
2423
    nidx = int(nidx)
2424

    
2425
    if not isinstance(ndict, dict):
2426
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2427
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2428

    
2429
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2430

    
2431
    nics[nidx] = ndict
2432

    
2433
  return nics
2434

    
2435

    
2436
def GenericInstanceCreate(mode, opts, args):
2437
  """Add an instance to the cluster via either creation or import.
2438

2439
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2440
  @param opts: the command line options selected by the user
2441
  @type args: list
2442
  @param args: should contain only one element, the new instance name
2443
  @rtype: int
2444
  @return: the desired exit code
2445

2446
  """
2447
  instance = args[0]
2448

    
2449
  (pnode, snode) = SplitNodeOption(opts.node)
2450

    
2451
  hypervisor = None
2452
  hvparams = {}
2453
  if opts.hypervisor:
2454
    hypervisor, hvparams = opts.hypervisor
2455

    
2456
  if opts.nics:
2457
    nics = ParseNicOption(opts.nics)
2458
  elif opts.no_nics:
2459
    # no nics
2460
    nics = []
2461
  elif mode == constants.INSTANCE_CREATE:
2462
    # default of one nic, all auto
2463
    nics = [{}]
2464
  else:
2465
    # mode == import
2466
    nics = []
2467

    
2468
  if opts.disk_template == constants.DT_DISKLESS:
2469
    if opts.disks or opts.sd_size is not None:
2470
      raise errors.OpPrereqError("Diskless instance but disk"
2471
                                 " information passed", errors.ECODE_INVAL)
2472
    disks = []
2473
  else:
2474
    if (not opts.disks and not opts.sd_size
2475
        and mode == constants.INSTANCE_CREATE):
2476
      raise errors.OpPrereqError("No disk information specified",
2477
                                 errors.ECODE_INVAL)
2478
    if opts.disks and opts.sd_size is not None:
2479
      raise errors.OpPrereqError("Please use either the '--disk' or"
2480
                                 " '-s' option", errors.ECODE_INVAL)
2481
    if opts.sd_size is not None:
2482
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2483

    
2484
    if opts.disks:
2485
      try:
2486
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2487
      except ValueError, err:
2488
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2489
                                   errors.ECODE_INVAL)
2490
      disks = [{}] * disk_max
2491
    else:
2492
      disks = []
2493
    for didx, ddict in opts.disks:
2494
      didx = int(didx)
2495
      if not isinstance(ddict, dict):
2496
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2497
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2498
      elif constants.IDISK_SIZE in ddict:
2499
        if constants.IDISK_ADOPT in ddict:
2500
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2501
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2502
        try:
2503
          ddict[constants.IDISK_SIZE] = \
2504
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2505
        except ValueError, err:
2506
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2507
                                     (didx, err), errors.ECODE_INVAL)
2508
      elif constants.IDISK_ADOPT in ddict:
2509
        if mode == constants.INSTANCE_IMPORT:
2510
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2511
                                     " import", errors.ECODE_INVAL)
2512
        ddict[constants.IDISK_SIZE] = 0
2513
      else:
2514
        raise errors.OpPrereqError("Missing size or adoption source for"
2515
                                   " disk %d" % didx, errors.ECODE_INVAL)
2516
      disks[didx] = ddict
2517

    
2518
  if opts.tags is not None:
2519
    tags = opts.tags.split(",")
2520
  else:
2521
    tags = []
2522

    
2523
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2524
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2525

    
2526
  if mode == constants.INSTANCE_CREATE:
2527
    start = opts.start
2528
    os_type = opts.os
2529
    force_variant = opts.force_variant
2530
    src_node = None
2531
    src_path = None
2532
    no_install = opts.no_install
2533
    identify_defaults = False
2534
  elif mode == constants.INSTANCE_IMPORT:
2535
    start = False
2536
    os_type = None
2537
    force_variant = False
2538
    src_node = opts.src_node
2539
    src_path = opts.src_dir
2540
    no_install = None
2541
    identify_defaults = opts.identify_defaults
2542
  else:
2543
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2544

    
2545
  op = opcodes.OpInstanceCreate(instance_name=instance,
2546
                                disks=disks,
2547
                                disk_template=opts.disk_template,
2548
                                nics=nics,
2549
                                conflicts_check=opts.conflicts_check,
2550
                                pnode=pnode, snode=snode,
2551
                                ip_check=opts.ip_check,
2552
                                name_check=opts.name_check,
2553
                                wait_for_sync=opts.wait_for_sync,
2554
                                file_storage_dir=opts.file_storage_dir,
2555
                                file_driver=opts.file_driver,
2556
                                iallocator=opts.iallocator,
2557
                                hypervisor=hypervisor,
2558
                                hvparams=hvparams,
2559
                                beparams=opts.beparams,
2560
                                osparams=opts.osparams,
2561
                                mode=mode,
2562
                                start=start,
2563
                                os_type=os_type,
2564
                                force_variant=force_variant,
2565
                                src_node=src_node,
2566
                                src_path=src_path,
2567
                                tags=tags,
2568
                                no_install=no_install,
2569
                                identify_defaults=identify_defaults,
2570
                                ignore_ipolicy=opts.ignore_ipolicy)
2571

    
2572
  SubmitOrSend(op, opts)
2573
  return 0
2574

    
2575

    
2576
class _RunWhileClusterStoppedHelper:
2577
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2578

2579
  """
2580
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2581
    """Initializes this class.
2582

2583
    @type feedback_fn: callable
2584
    @param feedback_fn: Feedback function
2585
    @type cluster_name: string
2586
    @param cluster_name: Cluster name
2587
    @type master_node: string
2588
    @param master_node Master node name
2589
    @type online_nodes: list
2590
    @param online_nodes: List of names of online nodes
2591

2592
    """
2593
    self.feedback_fn = feedback_fn
2594
    self.cluster_name = cluster_name
2595
    self.master_node = master_node
2596
    self.online_nodes = online_nodes
2597

    
2598
    self.ssh = ssh.SshRunner(self.cluster_name)
2599

    
2600
    self.nonmaster_nodes = [name for name in online_nodes
2601
                            if name != master_node]
2602

    
2603
    assert self.master_node not in self.nonmaster_nodes
2604

    
2605
  def _RunCmd(self, node_name, cmd):
2606
    """Runs a command on the local or a remote machine.
2607

2608
    @type node_name: string
2609
    @param node_name: Machine name
2610
    @type cmd: list
2611
    @param cmd: Command
2612

2613
    """
2614
    if node_name is None or node_name == self.master_node:
2615
      # No need to use SSH
2616
      result = utils.RunCmd(cmd)
2617
    else:
2618
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2619
                            utils.ShellQuoteArgs(cmd))
2620

    
2621
    if result.failed:
2622
      errmsg = ["Failed to run command %s" % result.cmd]
2623
      if node_name:
2624
        errmsg.append("on node %s" % node_name)
2625
      errmsg.append(": exitcode %s and error %s" %
2626
                    (result.exit_code, result.output))
2627
      raise errors.OpExecError(" ".join(errmsg))
2628

    
2629
  def Call(self, fn, *args):
2630
    """Call function while all daemons are stopped.
2631

2632
    @type fn: callable
2633
    @param fn: Function to be called
2634

2635
    """
2636
    # Pause watcher by acquiring an exclusive lock on watcher state file
2637
    self.feedback_fn("Blocking watcher")
2638
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2639
    try:
2640
      # TODO: Currently, this just blocks. There's no timeout.
2641
      # TODO: Should it be a shared lock?
2642
      watcher_block.Exclusive(blocking=True)
2643

    
2644
      # Stop master daemons, so that no new jobs can come in and all running
2645
      # ones are finished
2646
      self.feedback_fn("Stopping master daemons")
2647
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2648
      try:
2649
        # Stop daemons on all nodes
2650
        for node_name in self.online_nodes:
2651
          self.feedback_fn("Stopping daemons on %s" % node_name)
2652
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2653

    
2654
        # All daemons are shut down now
2655
        try:
2656
          return fn(self, *args)
2657
        except Exception, err:
2658
          _, errmsg = FormatError(err)
2659
          logging.exception("Caught exception")
2660
          self.feedback_fn(errmsg)
2661
          raise
2662
      finally:
2663
        # Start cluster again, master node last
2664
        for node_name in self.nonmaster_nodes + [self.master_node]:
2665
          self.feedback_fn("Starting daemons on %s" % node_name)
2666
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2667
    finally:
2668
      # Resume watcher
2669
      watcher_block.Close()
2670

    
2671

    
2672
def RunWhileClusterStopped(feedback_fn, fn, *args):
2673
  """Calls a function while all cluster daemons are stopped.
2674

2675
  @type feedback_fn: callable
2676
  @param feedback_fn: Feedback function
2677
  @type fn: callable
2678
  @param fn: Function to be called when daemons are stopped
2679

2680
  """
2681
  feedback_fn("Gathering cluster information")
2682

    
2683
  # This ensures we're running on the master daemon
2684
  cl = GetClient()
2685

    
2686
  (cluster_name, master_node) = \
2687
    cl.QueryConfigValues(["cluster_name", "master_node"])
2688

    
2689
  online_nodes = GetOnlineNodes([], cl=cl)
2690

    
2691
  # Don't keep a reference to the client. The master daemon will go away.
2692
  del cl
2693

    
2694
  assert master_node in online_nodes
2695

    
2696
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2697
                                       online_nodes).Call(fn, *args)
2698

    
2699

    
2700
def GenerateTable(headers, fields, separator, data,
2701
                  numfields=None, unitfields=None,
2702
                  units=None):
2703
  """Prints a table with headers and different fields.
2704

2705
  @type headers: dict
2706
  @param headers: dictionary mapping field names to headers for
2707
      the table
2708
  @type fields: list
2709
  @param fields: the field names corresponding to each row in
2710
      the data field
2711
  @param separator: the separator to be used; if this is None,
2712
      the default 'smart' algorithm is used which computes optimal
2713
      field width, otherwise just the separator is used between
2714
      each field
2715
  @type data: list
2716
  @param data: a list of lists, each sublist being one row to be output
2717
  @type numfields: list
2718
  @param numfields: a list with the fields that hold numeric
2719
      values and thus should be right-aligned
2720
  @type unitfields: list
2721
  @param unitfields: a list with the fields that hold numeric
2722
      values that should be formatted with the units field
2723
  @type units: string or None
2724
  @param units: the units we should use for formatting, or None for
2725
      automatic choice (human-readable for non-separator usage, otherwise
2726
      megabytes); this is a one-letter string
2727

2728
  """
2729
  if units is None:
2730
    if separator:
2731
      units = "m"
2732
    else:
2733
      units = "h"
2734

    
2735
  if numfields is None:
2736
    numfields = []
2737
  if unitfields is None:
2738
    unitfields = []
2739

    
2740
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2741
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2742

    
2743
  format_fields = []
2744
  for field in fields:
2745
    if headers and field not in headers:
2746
      # TODO: handle better unknown fields (either revert to old
2747
      # style of raising exception, or deal more intelligently with
2748
      # variable fields)
2749
      headers[field] = field
2750
    if separator is not None:
2751
      format_fields.append("%s")
2752
    elif numfields.Matches(field):
2753
      format_fields.append("%*s")
2754
    else:
2755
      format_fields.append("%-*s")
2756

    
2757
  if separator is None:
2758
    mlens = [0 for name in fields]
2759
    format_str = " ".join(format_fields)
2760
  else:
2761
    format_str = separator.replace("%", "%%").join(format_fields)
2762

    
2763
  for row in data:
2764
    if row is None:
2765
      continue
2766
    for idx, val in enumerate(row):
2767
      if unitfields.Matches(fields[idx]):
2768
        try:
2769
          val = int(val)
2770
        except (TypeError, ValueError):
2771
          pass
2772
        else:
2773
          val = row[idx] = utils.FormatUnit(val, units)
2774
      val = row[idx] = str(val)
2775
      if separator is None:
2776
        mlens[idx] = max(mlens[idx], len(val))
2777

    
2778
  result = []
2779
  if headers:
2780
    args = []
2781
    for idx, name in enumerate(fields):
2782
      hdr = headers[name]
2783
      if separator is None:
2784
        mlens[idx] = max(mlens[idx], len(hdr))
2785
        args.append(mlens[idx])
2786
      args.append(hdr)
2787
    result.append(format_str % tuple(args))
2788

    
2789
  if separator is None:
2790
    assert len(mlens) == len(fields)
2791

    
2792
    if fields and not numfields.Matches(fields[-1]):
2793
      mlens[-1] = 0
2794

    
2795
  for line in data:
2796
    args = []
2797
    if line is None:
2798
      line = ["-" for _ in fields]
2799
    for idx in range(len(fields)):
2800
      if separator is None:
2801
        args.append(mlens[idx])
2802
      args.append(line[idx])
2803
    result.append(format_str % tuple(args))
2804

    
2805
  return result
2806

    
2807

    
2808
def _FormatBool(value):
2809
  """Formats a boolean value as a string.
2810

2811
  """
2812
  if value:
2813
    return "Y"
2814
  return "N"
2815

    
2816

    
2817
#: Default formatting for query results; (callback, align right)
2818
_DEFAULT_FORMAT_QUERY = {
2819
  constants.QFT_TEXT: (str, False),
2820
  constants.QFT_BOOL: (_FormatBool, False),
2821
  constants.QFT_NUMBER: (str, True),
2822
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2823
  constants.QFT_OTHER: (str, False),
2824
  constants.QFT_UNKNOWN: (str, False),
2825
  }
2826

    
2827

    
2828
def _GetColumnFormatter(fdef, override, unit):
2829
  """Returns formatting function for a field.
2830

2831
  @type fdef: L{objects.QueryFieldDefinition}
2832
  @type override: dict
2833
  @param override: Dictionary for overriding field formatting functions,
2834
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2835
  @type unit: string
2836
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2837
  @rtype: tuple; (callable, bool)
2838
  @return: Returns the function to format a value (takes one parameter) and a
2839
    boolean for aligning the value on the right-hand side
2840

2841
  """
2842
  fmt = override.get(fdef.name, None)
2843
  if fmt is not None:
2844
    return fmt
2845

    
2846
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2847

    
2848
  if fdef.kind == constants.QFT_UNIT:
2849
    # Can't keep this information in the static dictionary
2850
    return (lambda value: utils.FormatUnit(value, unit), True)
2851

    
2852
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2853
  if fmt is not None:
2854
    return fmt
2855

    
2856
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2857

    
2858

    
2859
class _QueryColumnFormatter:
2860
  """Callable class for formatting fields of a query.
2861

2862
  """
2863
  def __init__(self, fn, status_fn, verbose):
2864
    """Initializes this class.
2865

2866
    @type fn: callable
2867
    @param fn: Formatting function
2868
    @type status_fn: callable
2869
    @param status_fn: Function to report fields' status
2870
    @type verbose: boolean
2871
    @param verbose: whether to use verbose field descriptions or not
2872

2873
    """
2874
    self._fn = fn
2875
    self._status_fn = status_fn
2876
    self._verbose = verbose
2877

    
2878
  def __call__(self, data):
2879
    """Returns a field's string representation.
2880

2881
    """
2882
    (status, value) = data
2883

    
2884
    # Report status
2885
    self._status_fn(status)
2886

    
2887
    if status == constants.RS_NORMAL:
2888
      return self._fn(value)
2889

    
2890
    assert value is None, \
2891
           "Found value %r for abnormal status %s" % (value, status)
2892

    
2893
    return FormatResultError(status, self._verbose)
2894

    
2895

    
2896
def FormatResultError(status, verbose):
2897
  """Formats result status other than L{constants.RS_NORMAL}.
2898

2899
  @param status: The result status
2900
  @type verbose: boolean
2901
  @param verbose: Whether to return the verbose text
2902
  @return: Text of result status
2903

2904
  """
2905
  assert status != constants.RS_NORMAL, \
2906
         "FormatResultError called with status equal to constants.RS_NORMAL"
2907
  try:
2908
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2909
  except KeyError:
2910
    raise NotImplementedError("Unknown status %s" % status)
2911
  else:
2912
    if verbose:
2913
      return verbose_text
2914
    return normal_text
2915

    
2916

    
2917
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2918
                      header=False, verbose=False):
2919
  """Formats data in L{objects.QueryResponse}.
2920

2921
  @type result: L{objects.QueryResponse}
2922
  @param result: result of query operation
2923
  @type unit: string
2924
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2925
    see L{utils.text.FormatUnit}
2926
  @type format_override: dict
2927
  @param format_override: Dictionary for overriding field formatting functions,
2928
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2929
  @type separator: string or None
2930
  @param separator: String used to separate fields
2931
  @type header: bool
2932
  @param header: Whether to output header row
2933
  @type verbose: boolean
2934
  @param verbose: whether to use verbose field descriptions or not
2935

2936
  """
2937
  if unit is None:
2938
    if separator:
2939
      unit = "m"
2940
    else:
2941
      unit = "h"
2942

    
2943
  if format_override is None:
2944
    format_override = {}
2945

    
2946
  stats = dict.fromkeys(constants.RS_ALL, 0)
2947

    
2948
  def _RecordStatus(status):
2949
    if status in stats:
2950
      stats[status] += 1
2951

    
2952
  columns = []
2953
  for fdef in result.fields:
2954
    assert fdef.title and fdef.name
2955
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2956
    columns.append(TableColumn(fdef.title,
2957
                               _QueryColumnFormatter(fn, _RecordStatus,
2958
                                                     verbose),
2959
                               align_right))
2960

    
2961
  table = FormatTable(result.data, columns, header, separator)
2962

    
2963
  # Collect statistics
2964
  assert len(stats) == len(constants.RS_ALL)
2965
  assert compat.all(count >= 0 for count in stats.values())
2966

    
2967
  # Determine overall status. If there was no data, unknown fields must be
2968
  # detected via the field definitions.
2969
  if (stats[constants.RS_UNKNOWN] or
2970
      (not result.data and _GetUnknownFields(result.fields))):
2971
    status = QR_UNKNOWN
2972
  elif compat.any(count > 0 for key, count in stats.items()
2973
                  if key != constants.RS_NORMAL):
2974
    status = QR_INCOMPLETE
2975
  else:
2976
    status = QR_NORMAL
2977

    
2978
  return (status, table)
2979

    
2980

    
2981
def _GetUnknownFields(fdefs):
2982
  """Returns list of unknown fields included in C{fdefs}.
2983

2984
  @type fdefs: list of L{objects.QueryFieldDefinition}
2985

2986
  """
2987
  return [fdef for fdef in fdefs
2988
          if fdef.kind == constants.QFT_UNKNOWN]
2989

    
2990

    
2991
def _WarnUnknownFields(fdefs):
2992
  """Prints a warning to stderr if a query included unknown fields.
2993

2994
  @type fdefs: list of L{objects.QueryFieldDefinition}
2995

2996
  """
2997
  unknown = _GetUnknownFields(fdefs)
2998
  if unknown:
2999
    ToStderr("Warning: Queried for unknown fields %s",
3000
             utils.CommaJoin(fdef.name for fdef in unknown))
3001
    return True
3002

    
3003
  return False
3004

    
3005

    
3006
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3007
                format_override=None, verbose=False, force_filter=False,
3008
                namefield=None, qfilter=None, isnumeric=False):
3009
  """Generic implementation for listing all items of a resource.
3010

3011
  @param resource: One of L{constants.QR_VIA_LUXI}
3012
  @type fields: list of strings
3013
  @param fields: List of fields to query for
3014
  @type names: list of strings
3015
  @param names: Names of items to query for
3016
  @type unit: string or None
3017
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3018
    None for automatic choice (human-readable for non-separator usage,
3019
    otherwise megabytes); this is a one-letter string
3020
  @type separator: string or None
3021
  @param separator: String used to separate fields
3022
  @type header: bool
3023
  @param header: Whether to show header row
3024
  @type force_filter: bool
3025
  @param force_filter: Whether to always treat names as filter
3026
  @type format_override: dict
3027
  @param format_override: Dictionary for overriding field formatting functions,
3028
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3029
  @type verbose: boolean
3030
  @param verbose: whether to use verbose field descriptions or not
3031
  @type namefield: string
3032
  @param namefield: Name of field to use for simple filters (see
3033
    L{qlang.MakeFilter} for details)
3034
  @type qfilter: list or None
3035
  @param qfilter: Query filter (in addition to names)
3036
  @param isnumeric: bool
3037
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3038
    any simple filters built by namefield should use integer values to
3039
    reflect that
3040

3041
  """
3042
  if not names:
3043
    names = None
3044

    
3045
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3046
                                isnumeric=isnumeric)
3047

    
3048
  if qfilter is None:
3049
    qfilter = namefilter
3050
  elif namefilter is not None:
3051
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3052

    
3053
  if cl is None:
3054
    cl = GetClient()
3055

    
3056
  response = cl.Query(resource, fields, qfilter)
3057

    
3058
  found_unknown = _WarnUnknownFields(response.fields)
3059

    
3060
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3061
                                     header=header,
3062
                                     format_override=format_override,
3063
                                     verbose=verbose)
3064

    
3065
  for line in data:
3066
    ToStdout(line)
3067

    
3068
  assert ((found_unknown and status == QR_UNKNOWN) or
3069
          (not found_unknown and status != QR_UNKNOWN))
3070

    
3071
  if status == QR_UNKNOWN:
3072
    return constants.EXIT_UNKNOWN_FIELD
3073

    
3074
  # TODO: Should the list command fail if not all data could be collected?
3075
  return constants.EXIT_SUCCESS
3076

    
3077

    
3078
def _FieldDescValues(fdef):
3079
  """Helper function for L{GenericListFields} to get query field description.
3080

3081
  @type fdef: L{objects.QueryFieldDefinition}
3082
  @rtype: list
3083

3084
  """
3085
  return [
3086
    fdef.name,
3087
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3088
    fdef.title,
3089
    fdef.doc,
3090
    ]
3091

    
3092

    
3093
def GenericListFields(resource, fields, separator, header, cl=None):
3094
  """Generic implementation for listing fields for a resource.
3095

3096
  @param resource: One of L{constants.QR_VIA_LUXI}
3097
  @type fields: list of strings
3098
  @param fields: List of fields to query for
3099
  @type separator: string or None
3100
  @param separator: String used to separate fields
3101
  @type header: bool
3102
  @param header: Whether to show header row
3103

3104
  """
3105
  if cl is None:
3106
    cl = GetClient()
3107

    
3108
  if not fields:
3109
    fields = None
3110

    
3111
  response = cl.QueryFields(resource, fields)
3112

    
3113
  found_unknown = _WarnUnknownFields(response.fields)
3114

    
3115
  columns = [
3116
    TableColumn("Name", str, False),
3117
    TableColumn("Type", str, False),
3118
    TableColumn("Title", str, False),
3119
    TableColumn("Description", str, False),
3120
    ]
3121

    
3122
  rows = map(_FieldDescValues, response.fields)
3123

    
3124
  for line in FormatTable(rows, columns, header, separator):
3125
    ToStdout(line)
3126

    
3127
  if found_unknown:
3128
    return constants.EXIT_UNKNOWN_FIELD
3129

    
3130
  return constants.EXIT_SUCCESS
3131

    
3132

    
3133
class TableColumn:
3134
  """Describes a column for L{FormatTable}.
3135

3136
  """
3137
  def __init__(self, title, fn, align_right):
3138
    """Initializes this class.
3139

3140
    @type title: string
3141
    @param title: Column title
3142
    @type fn: callable
3143
    @param fn: Formatting function
3144
    @type align_right: bool
3145
    @param align_right: Whether to align values on the right-hand side
3146

3147
    """
3148
    self.title = title
3149
    self.format = fn
3150
    self.align_right = align_right
3151

    
3152

    
3153
def _GetColFormatString(width, align_right):
3154
  """Returns the format string for a field.
3155

3156
  """
3157
  if align_right:
3158
    sign = ""
3159
  else:
3160
    sign = "-"
3161

    
3162
  return "%%%s%ss" % (sign, width)
3163

    
3164

    
3165
def FormatTable(rows, columns, header, separator):
3166
  """Formats data as a table.
3167

3168
  @type rows: list of lists
3169
  @param rows: Row data, one list per row
3170
  @type columns: list of L{TableColumn}
3171
  @param columns: Column descriptions
3172
  @type header: bool
3173
  @param header: Whether to show header row
3174
  @type separator: string or None
3175
  @param separator: String used to separate columns
3176

3177
  """
3178
  if header:
3179
    data = [[col.title for col in columns]]
3180
    colwidth = [len(col.title) for col in columns]
3181
  else:
3182
    data = []
3183
    colwidth = [0 for _ in columns]
3184

    
3185
  # Format row data
3186
  for row in rows:
3187
    assert len(row) == len(columns)
3188

    
3189
    formatted = [col.format(value) for value, col in zip(row, columns)]
3190

    
3191
    if separator is None:
3192
      # Update column widths
3193
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3194
        # Modifying a list's items while iterating is fine
3195
        colwidth[idx] = max(oldwidth, len(value))
3196

    
3197
    data.append(formatted)
3198

    
3199
  if separator is not None:
3200
    # Return early if a separator is used
3201
    return [separator.join(row) for row in data]
3202

    
3203
  if columns and not columns[-1].align_right:
3204
    # Avoid unnecessary spaces at end of line
3205
    colwidth[-1] = 0
3206

    
3207
  # Build format string
3208
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3209
                  for col, width in zip(columns, colwidth)])
3210

    
3211
  return [fmt % tuple(row) for row in data]
3212

    
3213

    
3214
def FormatTimestamp(ts):
3215
  """Formats a given timestamp.
3216

3217
  @type ts: timestamp
3218
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3219

3220
  @rtype: string
3221
  @return: a string with the formatted timestamp
3222

3223
  """
3224
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3225
    return "?"
3226

    
3227
  (sec, usecs) = ts
3228
  return utils.FormatTime(sec, usecs=usecs)
3229

    
3230

    
3231
def ParseTimespec(value):
3232
  """Parse a time specification.
3233

3234
  The following suffixed will be recognized:
3235

3236
    - s: seconds
3237
    - m: minutes
3238
    - h: hours
3239
    - d: day
3240
    - w: weeks
3241

3242
  Without any suffix, the value will be taken to be in seconds.
3243

3244
  """
3245
  value = str(value)
3246
  if not value:
3247
    raise errors.OpPrereqError("Empty time specification passed",
3248
                               errors.ECODE_INVAL)
3249
  suffix_map = {
3250
    "s": 1,
3251
    "m": 60,
3252
    "h": 3600,
3253
    "d": 86400,
3254
    "w": 604800,
3255
    }
3256
  if value[-1] not in suffix_map:
3257
    try:
3258
      value = int(value)
3259
    except (TypeError, ValueError):
3260
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3261
                                 errors.ECODE_INVAL)
3262
  else:
3263
    multiplier = suffix_map[value[-1]]
3264
    value = value[:-1]
3265
    if not value: # no data left after stripping the suffix
3266
      raise errors.OpPrereqError("Invalid time specification (only"
3267
                                 " suffix passed)", errors.ECODE_INVAL)
3268
    try:
3269
      value = int(value) * multiplier
3270
    except (TypeError, ValueError):
3271
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3272
                                 errors.ECODE_INVAL)
3273
  return value
3274

    
3275

    
3276
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3277
                   filter_master=False, nodegroup=None):
3278
  """Returns the names of online nodes.
3279

3280
  This function will also log a warning on stderr with the names of
3281
  the online nodes.
3282

3283
  @param nodes: if not empty, use only this subset of nodes (minus the
3284
      offline ones)
3285
  @param cl: if not None, luxi client to use
3286
  @type nowarn: boolean
3287
  @param nowarn: by default, this function will output a note with the
3288
      offline nodes that are skipped; if this parameter is True the
3289
      note is not displayed
3290
  @type secondary_ips: boolean
3291
  @param secondary_ips: if True, return the secondary IPs instead of the
3292
      names, useful for doing network traffic over the replication interface
3293
      (if any)
3294
  @type filter_master: boolean
3295
  @param filter_master: if True, do not return the master node in the list
3296
      (useful in coordination with secondary_ips where we cannot check our
3297
      node name against the list)
3298
  @type nodegroup: string
3299
  @param nodegroup: If set, only return nodes in this node group
3300

3301
  """
3302
  if cl is None:
3303
    cl = GetClient()
3304

    
3305
  qfilter = []
3306

    
3307
  if nodes:
3308
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3309

    
3310
  if nodegroup is not None:
3311
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3312
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3313

    
3314
  if filter_master:
3315
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3316

    
3317
  if qfilter:
3318
    if len(qfilter) > 1:
3319
      final_filter = [qlang.OP_AND] + qfilter
3320
    else:
3321
      assert len(qfilter) == 1
3322
      final_filter = qfilter[0]
3323
  else:
3324
    final_filter = None
3325

    
3326
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3327

    
3328
  def _IsOffline(row):
3329
    (_, (_, offline), _) = row
3330
    return offline
3331

    
3332
  def _GetName(row):
3333
    ((_, name), _, _) = row
3334
    return name
3335

    
3336
  def _GetSip(row):
3337
    (_, _, (_, sip)) = row
3338
    return sip
3339

    
3340
  (offline, online) = compat.partition(result.data, _IsOffline)
3341

    
3342
  if offline and not nowarn:
3343
    ToStderr("Note: skipping offline node(s): %s" %
3344
             utils.CommaJoin(map(_GetName, offline)))
3345

    
3346
  if secondary_ips:
3347
    fn = _GetSip
3348
  else:
3349
    fn = _GetName
3350

    
3351
  return map(fn, online)
3352

    
3353

    
3354
def _ToStream(stream, txt, *args):
3355
  """Write a message to a stream, bypassing the logging system
3356

3357
  @type stream: file object
3358
  @param stream: the file to which we should write
3359
  @type txt: str
3360
  @param txt: the message
3361

3362
  """
3363
  try:
3364
    if args:
3365
      args = tuple(args)
3366
      stream.write(txt % args)
3367
    else:
3368
      stream.write(txt)
3369
    stream.write("\n")
3370
    stream.flush()
3371
  except IOError, err:
3372
    if err.errno == errno.EPIPE:
3373
      # our terminal went away, we'll exit
3374
      sys.exit(constants.EXIT_FAILURE)
3375
    else:
3376
      raise
3377

    
3378

    
3379
def ToStdout(txt, *args):
3380
  """Write a message to stdout only, bypassing the logging system
3381

3382
  This is just a wrapper over _ToStream.
3383

3384
  @type txt: str
3385
  @param txt: the message
3386

3387
  """
3388
  _ToStream(sys.stdout, txt, *args)
3389

    
3390

    
3391
def ToStderr(txt, *args):
3392
  """Write a message to stderr only, bypassing the logging system
3393

3394
  This is just a wrapper over _ToStream.
3395

3396
  @type txt: str
3397
  @param txt: the message
3398

3399
  """
3400
  _ToStream(sys.stderr, txt, *args)
3401

    
3402

    
3403
class JobExecutor(object):
3404
  """Class which manages the submission and execution of multiple jobs.
3405

3406
  Note that instances of this class should not be reused between
3407
  GetResults() calls.
3408

3409
  """
3410
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3411
    self.queue = []
3412
    if cl is None:
3413
      cl = GetClient()
3414
    self.cl = cl
3415
    self.verbose = verbose
3416
    self.jobs = []
3417
    self.opts = opts
3418
    self.feedback_fn = feedback_fn
3419
    self._counter = itertools.count()
3420

    
3421
  @staticmethod
3422
  def _IfName(name, fmt):
3423
    """Helper function for formatting name.
3424

3425
    """
3426
    if name:
3427
      return fmt % name
3428

    
3429
    return ""
3430

    
3431
  def QueueJob(self, name, *ops):
3432
    """Record a job for later submit.
3433

3434
    @type name: string
3435
    @param name: a description of the job, will be used in WaitJobSet
3436

3437
    """
3438
    SetGenericOpcodeOpts(ops, self.opts)
3439
    self.queue.append((self._counter.next(), name, ops))
3440

    
3441
  def AddJobId(self, name, status, job_id):
3442
    """Adds a job ID to the internal queue.
3443

3444
    """
3445
    self.jobs.append((self._counter.next(), status, job_id, name))
3446

    
3447
  def SubmitPending(self, each=False):
3448
    """Submit all pending jobs.
3449

3450
    """
3451
    if each:
3452
      results = []
3453
      for (_, _, ops) in self.queue:
3454
        # SubmitJob will remove the success status, but raise an exception if
3455
        # the submission fails, so we'll notice that anyway.
3456
        results.append([True, self.cl.SubmitJob(ops)[0]])
3457
    else:
3458
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3459
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3460
      self.jobs.append((idx, status, data, name))
3461

    
3462
  def _ChooseJob(self):
3463
    """Choose a non-waiting/queued job to poll next.
3464

3465
    """
3466
    assert self.jobs, "_ChooseJob called with empty job list"
3467

    
3468
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3469
                               ["status"])
3470
    assert result
3471

    
3472
    for job_data, status in zip(self.jobs, result):
3473
      if (isinstance(status, list) and status and
3474
          status[0] in (constants.JOB_STATUS_QUEUED,
3475
                        constants.JOB_STATUS_WAITING,
3476
                        constants.JOB_STATUS_CANCELING)):
3477
        # job is still present and waiting
3478
        continue
3479
      # good candidate found (either running job or lost job)
3480
      self.jobs.remove(job_data)
3481
      return job_data
3482

    
3483
    # no job found
3484
    return self.jobs.pop(0)
3485

    
3486
  def GetResults(self):
3487
    """Wait for and return the results of all jobs.
3488

3489
    @rtype: list
3490
    @return: list of tuples (success, job results), in the same order
3491
        as the submitted jobs; if a job has failed, instead of the result
3492
        there will be the error message
3493

3494
    """
3495
    if not self.jobs:
3496
      self.SubmitPending()
3497
    results = []
3498
    if self.verbose:
3499
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3500
      if ok_jobs:
3501
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3502

    
3503
    # first, remove any non-submitted jobs
3504
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3505
    for idx, _, jid, name in failures:
3506
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3507
      results.append((idx, False, jid))
3508

    
3509
    while self.jobs:
3510
      (idx, _, jid, name) = self._ChooseJob()
3511
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3512
      try:
3513
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3514
        success = True
3515
      except errors.JobLost, err:
3516
        _, job_result = FormatError(err)
3517
        ToStderr("Job %s%s has been archived, cannot check its result",
3518
                 jid, self._IfName(name, " for %s"))
3519
        success = False
3520
      except (errors.GenericError, luxi.ProtocolError), err:
3521
        _, job_result = FormatError(err)
3522
        success = False
3523
        # the error message will always be shown, verbose or not
3524
        ToStderr("Job %s%s has failed: %s",
3525
                 jid, self._IfName(name, " for %s"), job_result)
3526

    
3527
      results.append((idx, success, job_result))
3528

    
3529
    # sort based on the index, then drop it
3530
    results.sort()
3531
    results = [i[1:] for i in results]
3532

    
3533
    return results
3534

    
3535
  def WaitOrShow(self, wait):
3536
    """Wait for job results or only print the job IDs.
3537

3538
    @type wait: boolean
3539
    @param wait: whether to wait or not
3540

3541
    """
3542
    if wait:
3543
      return self.GetResults()
3544
    else:
3545
      if not self.jobs:
3546
        self.SubmitPending()
3547
      for _, status, result, name in self.jobs:
3548
        if status:
3549
          ToStdout("%s: %s", result, name)
3550
        else:
3551
          ToStderr("Failure for %s: %s", name, result)
3552
      return [row[1:3] for row in self.jobs]
3553

    
3554

    
3555
def FormatParameterDict(buf, param_dict, actual, level=1):
3556
  """Formats a parameter dictionary.
3557

3558
  @type buf: L{StringIO}
3559
  @param buf: the buffer into which to write
3560
  @type param_dict: dict
3561
  @param param_dict: the own parameters
3562
  @type actual: dict
3563
  @param actual: the current parameter set (including defaults)
3564
  @param level: Level of indent
3565

3566
  """
3567
  indent = "  " * level
3568

    
3569
  for key in sorted(actual):
3570
    data = actual[key]
3571
    buf.write("%s- %s:" % (indent, key))
3572

    
3573
    if isinstance(data, dict) and data:
3574
      buf.write("\n")
3575
      FormatParameterDict(buf, param_dict.get(key, {}), data,
3576
                          level=level + 1)
3577
    else:
3578
      val = param_dict.get(key, "default (%s)" % data)
3579
      buf.write(" %s\n" % val)
3580

    
3581

    
3582
def ConfirmOperation(names, list_type, text, extra=""):
3583
  """Ask the user to confirm an operation on a list of list_type.
3584

3585
  This function is used to request confirmation for doing an operation
3586
  on a given list of list_type.
3587

3588
  @type names: list
3589
  @param names: the list of names that we display when
3590
      we ask for confirmation
3591
  @type list_type: str
3592
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3593
  @type text: str
3594
  @param text: the operation that the user should confirm
3595
  @rtype: boolean
3596
  @return: True or False depending on user's confirmation.
3597

3598
  """
3599
  count = len(names)
3600
  msg = ("The %s will operate on %d %s.\n%s"
3601
         "Do you want to continue?" % (text, count, list_type, extra))
3602
  affected = (("\nAffected %s:\n" % list_type) +
3603
              "\n".join(["  %s" % name for name in names]))
3604

    
3605
  choices = [("y", True, "Yes, execute the %s" % text),
3606
             ("n", False, "No, abort the %s" % text)]
3607

    
3608
  if count > 20:
3609
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3610
    question = msg
3611
  else:
3612
    question = msg + affected
3613

    
3614
  choice = AskUser(question, choices)
3615
  if choice == "v":
3616
    choices.pop(1)
3617
    choice = AskUser(msg + affected, choices)
3618
  return choice
3619

    
3620

    
3621
def _MaybeParseUnit(elements):
3622
  """Parses and returns an array of potential values with units.
3623

3624
  """
3625
  parsed = {}
3626
  for k, v in elements.items():
3627
    if v == constants.VALUE_DEFAULT:
3628
      parsed[k] = v
3629
    else:
3630
      parsed[k] = utils.ParseUnit(v)
3631
  return parsed
3632

    
3633

    
3634
def CreateIPolicyFromOpts(ispecs_mem_size=None,
3635
                          ispecs_cpu_count=None,
3636
                          ispecs_disk_count=None,
3637
                          ispecs_disk_size=None,
3638
                          ispecs_nic_count=None,
3639
                          ipolicy_disk_templates=None,
3640
                          ipolicy_vcpu_ratio=None,
3641
                          ipolicy_spindle_ratio=None,
3642
                          group_ipolicy=False,
3643
                          allowed_values=None,
3644
                          fill_all=False):
3645
  """Creation of instance policy based on command line options.
3646

3647
  @param fill_all: whether for cluster policies we should ensure that
3648
    all values are filled
3649

3650

3651
  """
3652
  try:
3653
    if ispecs_mem_size:
3654
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3655
    if ispecs_disk_size:
3656
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3657
  except (TypeError, ValueError, errors.UnitParseError), err:
3658
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3659
                               " in policy: %s" %
3660
                               (ispecs_disk_size, ispecs_mem_size, err),
3661
                               errors.ECODE_INVAL)
3662

    
3663
  # prepare ipolicy dict
3664
  ipolicy_transposed = {
3665
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3666
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3667
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3668
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3669
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3670
    }
3671

    
3672
  # first, check that the values given are correct
3673
  if group_ipolicy:
3674
    forced_type = TISPECS_GROUP_TYPES
3675
  else:
3676
    forced_type = TISPECS_CLUSTER_TYPES
3677

    
3678
  for specs in ipolicy_transposed.values():
3679
    utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3680

    
3681
  # then transpose
3682
  ipolicy_out = objects.MakeEmptyIPolicy()
3683
  for name, specs in ipolicy_transposed.iteritems():
3684
    assert name in constants.ISPECS_PARAMETERS
3685
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3686
      ipolicy_out[key][name] = val
3687

    
3688
  # no filldict for non-dicts
3689
  if not group_ipolicy and fill_all:
3690
    if ipolicy_disk_templates is None:
3691
      ipolicy_disk_templates = constants.DISK_TEMPLATES
3692
    if ipolicy_vcpu_ratio is None:
3693
      ipolicy_vcpu_ratio = \
3694
        constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
3695
    if ipolicy_spindle_ratio is None:
3696
      ipolicy_spindle_ratio = \
3697
        constants.IPOLICY_DEFAULTS[constants.IPOLICY_SPINDLE_RATIO]
3698
  if ipolicy_disk_templates is not None:
3699
    ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3700
  if ipolicy_vcpu_ratio is not None:
3701
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3702
  if ipolicy_spindle_ratio is not None:
3703
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
3704

    
3705
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
3706

    
3707
  return ipolicy_out