Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ a51b19de

History | View | Annotate | Download (136.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import rpc
41
from ganeti import ssh
42
from ganeti import compat
43
from ganeti import netutils
44
from ganeti import qlang
45
from ganeti import objects
46
from ganeti import pathutils
47

    
48
from ganeti.runtime import (GetClient)
49

    
50
from optparse import (OptionParser, TitledHelpFormatter,
51
                      Option, OptionValueError)
52

    
53

    
54
__all__ = [
55
  # Command line options
56
  "ABSOLUTE_OPT",
57
  "ADD_UIDS_OPT",
58
  "ADD_RESERVED_IPS_OPT",
59
  "ALLOCATABLE_OPT",
60
  "ALLOC_POLICY_OPT",
61
  "ALL_OPT",
62
  "ALLOW_FAILOVER_OPT",
63
  "AUTO_PROMOTE_OPT",
64
  "AUTO_REPLACE_OPT",
65
  "BACKEND_OPT",
66
  "BLK_OS_OPT",
67
  "CAPAB_MASTER_OPT",
68
  "CAPAB_VM_OPT",
69
  "CLEANUP_OPT",
70
  "CLUSTER_DOMAIN_SECRET_OPT",
71
  "CONFIRM_OPT",
72
  "CP_SIZE_OPT",
73
  "DEBUG_OPT",
74
  "DEBUG_SIMERR_OPT",
75
  "DISKIDX_OPT",
76
  "DISK_OPT",
77
  "DISK_PARAMS_OPT",
78
  "DISK_TEMPLATE_OPT",
79
  "DRAINED_OPT",
80
  "DRY_RUN_OPT",
81
  "DRBD_HELPER_OPT",
82
  "DST_NODE_OPT",
83
  "EARLY_RELEASE_OPT",
84
  "ENABLED_HV_OPT",
85
  "ENABLED_DISK_TEMPLATES_OPT",
86
  "ERROR_CODES_OPT",
87
  "FAILURE_ONLY_OPT",
88
  "FIELDS_OPT",
89
  "FILESTORE_DIR_OPT",
90
  "FILESTORE_DRIVER_OPT",
91
  "FORCE_FILTER_OPT",
92
  "FORCE_OPT",
93
  "FORCE_VARIANT_OPT",
94
  "GATEWAY_OPT",
95
  "GATEWAY6_OPT",
96
  "GLOBAL_FILEDIR_OPT",
97
  "HID_OS_OPT",
98
  "GLOBAL_SHARED_FILEDIR_OPT",
99
  "HOTPLUG_OPT",
100
  "HVLIST_OPT",
101
  "HVOPTS_OPT",
102
  "HYPERVISOR_OPT",
103
  "IALLOCATOR_OPT",
104
  "DEFAULT_IALLOCATOR_OPT",
105
  "IDENTIFY_DEFAULTS_OPT",
106
  "IGNORE_CONSIST_OPT",
107
  "IGNORE_ERRORS_OPT",
108
  "IGNORE_FAILURES_OPT",
109
  "IGNORE_OFFLINE_OPT",
110
  "IGNORE_REMOVE_FAILURES_OPT",
111
  "IGNORE_SECONDARIES_OPT",
112
  "IGNORE_SIZE_OPT",
113
  "INCLUDEDEFAULTS_OPT",
114
  "INTERVAL_OPT",
115
  "MAC_PREFIX_OPT",
116
  "MAINTAIN_NODE_HEALTH_OPT",
117
  "MASTER_NETDEV_OPT",
118
  "MASTER_NETMASK_OPT",
119
  "MC_OPT",
120
  "MIGRATION_MODE_OPT",
121
  "MODIFY_ETCHOSTS_OPT",
122
  "NET_OPT",
123
  "NETWORK_OPT",
124
  "NETWORK6_OPT",
125
  "NEW_CLUSTER_CERT_OPT",
126
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
127
  "NEW_CONFD_HMAC_KEY_OPT",
128
  "NEW_RAPI_CERT_OPT",
129
  "NEW_PRIMARY_OPT",
130
  "NEW_SECONDARY_OPT",
131
  "NEW_SPICE_CERT_OPT",
132
  "NIC_PARAMS_OPT",
133
  "NOCONFLICTSCHECK_OPT",
134
  "NODE_FORCE_JOIN_OPT",
135
  "NODE_LIST_OPT",
136
  "NODE_PLACEMENT_OPT",
137
  "NODEGROUP_OPT",
138
  "NODE_PARAMS_OPT",
139
  "NODE_POWERED_OPT",
140
  "NOHDR_OPT",
141
  "NOIPCHECK_OPT",
142
  "NO_INSTALL_OPT",
143
  "NONAMECHECK_OPT",
144
  "NOMODIFY_ETCHOSTS_OPT",
145
  "NOMODIFY_SSH_SETUP_OPT",
146
  "NONICS_OPT",
147
  "NONLIVE_OPT",
148
  "NONPLUS1_OPT",
149
  "NORUNTIME_CHGS_OPT",
150
  "NOSHUTDOWN_OPT",
151
  "NOSTART_OPT",
152
  "NOSSH_KEYCHECK_OPT",
153
  "NOVOTING_OPT",
154
  "NO_REMEMBER_OPT",
155
  "NWSYNC_OPT",
156
  "OFFLINE_INST_OPT",
157
  "ONLINE_INST_OPT",
158
  "ON_PRIMARY_OPT",
159
  "ON_SECONDARY_OPT",
160
  "OFFLINE_OPT",
161
  "OSPARAMS_OPT",
162
  "OS_OPT",
163
  "OS_SIZE_OPT",
164
  "OOB_TIMEOUT_OPT",
165
  "POWER_DELAY_OPT",
166
  "PREALLOC_WIPE_DISKS_OPT",
167
  "PRIMARY_IP_VERSION_OPT",
168
  "PRIMARY_ONLY_OPT",
169
  "PRINT_JOBID_OPT",
170
  "PRIORITY_OPT",
171
  "RAPI_CERT_OPT",
172
  "READD_OPT",
173
  "REASON_OPT",
174
  "REBOOT_TYPE_OPT",
175
  "REMOVE_INSTANCE_OPT",
176
  "REMOVE_RESERVED_IPS_OPT",
177
  "REMOVE_UIDS_OPT",
178
  "RESERVED_LVS_OPT",
179
  "RUNTIME_MEM_OPT",
180
  "ROMAN_OPT",
181
  "SECONDARY_IP_OPT",
182
  "SECONDARY_ONLY_OPT",
183
  "SELECT_OS_OPT",
184
  "SEP_OPT",
185
  "SHOWCMD_OPT",
186
  "SHOW_MACHINE_OPT",
187
  "COMPRESS_OPT",
188
  "SHUTDOWN_TIMEOUT_OPT",
189
  "SINGLE_NODE_OPT",
190
  "SPECS_CPU_COUNT_OPT",
191
  "SPECS_DISK_COUNT_OPT",
192
  "SPECS_DISK_SIZE_OPT",
193
  "SPECS_MEM_SIZE_OPT",
194
  "SPECS_NIC_COUNT_OPT",
195
  "SPLIT_ISPECS_OPTS",
196
  "IPOLICY_STD_SPECS_OPT",
197
  "IPOLICY_DISK_TEMPLATES",
198
  "IPOLICY_VCPU_RATIO",
199
  "SPICE_CACERT_OPT",
200
  "SPICE_CERT_OPT",
201
  "SRC_DIR_OPT",
202
  "SRC_NODE_OPT",
203
  "SUBMIT_OPT",
204
  "SUBMIT_OPTS",
205
  "STARTUP_PAUSED_OPT",
206
  "STATIC_OPT",
207
  "SYNC_OPT",
208
  "TAG_ADD_OPT",
209
  "TAG_SRC_OPT",
210
  "TIMEOUT_OPT",
211
  "TO_GROUP_OPT",
212
  "UIDPOOL_OPT",
213
  "USEUNITS_OPT",
214
  "USE_EXTERNAL_MIP_SCRIPT",
215
  "USE_REPL_NET_OPT",
216
  "VERBOSE_OPT",
217
  "VG_NAME_OPT",
218
  "WFSYNC_OPT",
219
  "YES_DOIT_OPT",
220
  "DISK_STATE_OPT",
221
  "HV_STATE_OPT",
222
  "IGNORE_IPOLICY_OPT",
223
  "INSTANCE_POLICY_OPTS",
224
  # Generic functions for CLI programs
225
  "ConfirmOperation",
226
  "CreateIPolicyFromOpts",
227
  "GenericMain",
228
  "GenericInstanceCreate",
229
  "GenericList",
230
  "GenericListFields",
231
  "GetClient",
232
  "GetOnlineNodes",
233
  "GetNodesSshPorts",
234
  "JobExecutor",
235
  "JobSubmittedException",
236
  "ParseTimespec",
237
  "RunWhileClusterStopped",
238
  "SubmitOpCode",
239
  "SubmitOpCodeToDrainedQueue",
240
  "SubmitOrSend",
241
  "UsesRPC",
242
  # Formatting functions
243
  "ToStderr", "ToStdout",
244
  "FormatError",
245
  "FormatQueryResult",
246
  "FormatParamsDictInfo",
247
  "FormatPolicyInfo",
248
  "PrintIPolicyCommand",
249
  "PrintGenericInfo",
250
  "GenerateTable",
251
  "AskUser",
252
  "FormatTimestamp",
253
  "FormatLogMessage",
254
  # Tags functions
255
  "ListTags",
256
  "AddTags",
257
  "RemoveTags",
258
  # command line options support infrastructure
259
  "ARGS_MANY_INSTANCES",
260
  "ARGS_MANY_NODES",
261
  "ARGS_MANY_GROUPS",
262
  "ARGS_MANY_NETWORKS",
263
  "ARGS_NONE",
264
  "ARGS_ONE_INSTANCE",
265
  "ARGS_ONE_NODE",
266
  "ARGS_ONE_GROUP",
267
  "ARGS_ONE_OS",
268
  "ARGS_ONE_NETWORK",
269
  "ArgChoice",
270
  "ArgCommand",
271
  "ArgFile",
272
  "ArgGroup",
273
  "ArgHost",
274
  "ArgInstance",
275
  "ArgJobId",
276
  "ArgNetwork",
277
  "ArgNode",
278
  "ArgOs",
279
  "ArgExtStorage",
280
  "ArgSuggest",
281
  "ArgUnknown",
282
  "OPT_COMPL_INST_ADD_NODES",
283
  "OPT_COMPL_MANY_NODES",
284
  "OPT_COMPL_ONE_IALLOCATOR",
285
  "OPT_COMPL_ONE_INSTANCE",
286
  "OPT_COMPL_ONE_NODE",
287
  "OPT_COMPL_ONE_NODEGROUP",
288
  "OPT_COMPL_ONE_NETWORK",
289
  "OPT_COMPL_ONE_OS",
290
  "OPT_COMPL_ONE_EXTSTORAGE",
291
  "cli_option",
292
  "SplitNodeOption",
293
  "CalculateOSNames",
294
  "ParseFields",
295
  "COMMON_CREATE_OPTS",
296
  ]
297

    
298
NO_PREFIX = "no_"
299
UN_PREFIX = "-"
300

    
301
#: Priorities (sorted)
302
_PRIORITY_NAMES = [
303
  ("low", constants.OP_PRIO_LOW),
304
  ("normal", constants.OP_PRIO_NORMAL),
305
  ("high", constants.OP_PRIO_HIGH),
306
  ]
307

    
308
#: Priority dictionary for easier lookup
309
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
310
# we migrate to Python 2.6
311
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
312

    
313
# Query result status for clients
314
(QR_NORMAL,
315
 QR_UNKNOWN,
316
 QR_INCOMPLETE) = range(3)
317

    
318
#: Maximum batch size for ChooseJob
319
_CHOOSE_BATCH = 25
320

    
321

    
322
# constants used to create InstancePolicy dictionary
323
TISPECS_GROUP_TYPES = {
324
  constants.ISPECS_MIN: constants.VTYPE_INT,
325
  constants.ISPECS_MAX: constants.VTYPE_INT,
326
  }
327

    
328
TISPECS_CLUSTER_TYPES = {
329
  constants.ISPECS_MIN: constants.VTYPE_INT,
330
  constants.ISPECS_MAX: constants.VTYPE_INT,
331
  constants.ISPECS_STD: constants.VTYPE_INT,
332
  }
333

    
334
#: User-friendly names for query2 field types
335
_QFT_NAMES = {
336
  constants.QFT_UNKNOWN: "Unknown",
337
  constants.QFT_TEXT: "Text",
338
  constants.QFT_BOOL: "Boolean",
339
  constants.QFT_NUMBER: "Number",
340
  constants.QFT_UNIT: "Storage size",
341
  constants.QFT_TIMESTAMP: "Timestamp",
342
  constants.QFT_OTHER: "Custom",
343
  }
344

    
345

    
346
class _Argument:
347
  def __init__(self, min=0, max=None): # pylint: disable=W0622
348
    self.min = min
349
    self.max = max
350

    
351
  def __repr__(self):
352
    return ("<%s min=%s max=%s>" %
353
            (self.__class__.__name__, self.min, self.max))
354

    
355

    
356
class ArgSuggest(_Argument):
357
  """Suggesting argument.
358

359
  Value can be any of the ones passed to the constructor.
360

361
  """
362
  # pylint: disable=W0622
363
  def __init__(self, min=0, max=None, choices=None):
364
    _Argument.__init__(self, min=min, max=max)
365
    self.choices = choices
366

    
367
  def __repr__(self):
368
    return ("<%s min=%s max=%s choices=%r>" %
369
            (self.__class__.__name__, self.min, self.max, self.choices))
370

    
371

    
372
class ArgChoice(ArgSuggest):
373
  """Choice argument.
374

375
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
376
  but value must be one of the choices.
377

378
  """
379

    
380

    
381
class ArgUnknown(_Argument):
382
  """Unknown argument to program (e.g. determined at runtime).
383

384
  """
385

    
386

    
387
class ArgInstance(_Argument):
388
  """Instances argument.
389

390
  """
391

    
392

    
393
class ArgNode(_Argument):
394
  """Node argument.
395

396
  """
397

    
398

    
399
class ArgNetwork(_Argument):
400
  """Network argument.
401

402
  """
403

    
404

    
405
class ArgGroup(_Argument):
406
  """Node group argument.
407

408
  """
409

    
410

    
411
class ArgJobId(_Argument):
412
  """Job ID argument.
413

414
  """
415

    
416

    
417
class ArgFile(_Argument):
418
  """File path argument.
419

420
  """
421

    
422

    
423
class ArgCommand(_Argument):
424
  """Command argument.
425

426
  """
427

    
428

    
429
class ArgHost(_Argument):
430
  """Host argument.
431

432
  """
433

    
434

    
435
class ArgOs(_Argument):
436
  """OS argument.
437

438
  """
439

    
440

    
441
class ArgExtStorage(_Argument):
442
  """ExtStorage argument.
443

444
  """
445

    
446

    
447
ARGS_NONE = []
448
ARGS_MANY_INSTANCES = [ArgInstance()]
449
ARGS_MANY_NETWORKS = [ArgNetwork()]
450
ARGS_MANY_NODES = [ArgNode()]
451
ARGS_MANY_GROUPS = [ArgGroup()]
452
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
453
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
454
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
455
# TODO
456
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
457
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
458

    
459

    
460
def _ExtractTagsObject(opts, args):
461
  """Extract the tag type object.
462

463
  Note that this function will modify its args parameter.
464

465
  """
466
  if not hasattr(opts, "tag_type"):
467
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
468
  kind = opts.tag_type
469
  if kind == constants.TAG_CLUSTER:
470
    retval = kind, ""
471
  elif kind in (constants.TAG_NODEGROUP,
472
                constants.TAG_NODE,
473
                constants.TAG_NETWORK,
474
                constants.TAG_INSTANCE):
475
    if not args:
476
      raise errors.OpPrereqError("no arguments passed to the command",
477
                                 errors.ECODE_INVAL)
478
    name = args.pop(0)
479
    retval = kind, name
480
  else:
481
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
482
  return retval
483

    
484

    
485
def _ExtendTags(opts, args):
486
  """Extend the args if a source file has been given.
487

488
  This function will extend the tags with the contents of the file
489
  passed in the 'tags_source' attribute of the opts parameter. A file
490
  named '-' will be replaced by stdin.
491

492
  """
493
  fname = opts.tags_source
494
  if fname is None:
495
    return
496
  if fname == "-":
497
    new_fh = sys.stdin
498
  else:
499
    new_fh = open(fname, "r")
500
  new_data = []
501
  try:
502
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
503
    # because of python bug 1633941
504
    while True:
505
      line = new_fh.readline()
506
      if not line:
507
        break
508
      new_data.append(line.strip())
509
  finally:
510
    new_fh.close()
511
  args.extend(new_data)
512

    
513

    
514
def ListTags(opts, args):
515
  """List the tags on a given object.
516

517
  This is a generic implementation that knows how to deal with all
518
  three cases of tag objects (cluster, node, instance). The opts
519
  argument is expected to contain a tag_type field denoting what
520
  object type we work on.
521

522
  """
523
  kind, name = _ExtractTagsObject(opts, args)
524
  cl = GetClient(query=True)
525
  result = cl.QueryTags(kind, name)
526
  result = list(result)
527
  result.sort()
528
  for tag in result:
529
    ToStdout(tag)
530

    
531

    
532
def AddTags(opts, args):
533
  """Add tags on a given object.
534

535
  This is a generic implementation that knows how to deal with all
536
  three cases of tag objects (cluster, node, instance). The opts
537
  argument is expected to contain a tag_type field denoting what
538
  object type we work on.
539

540
  """
541
  kind, name = _ExtractTagsObject(opts, args)
542
  _ExtendTags(opts, args)
543
  if not args:
544
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
545
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
546
  SubmitOrSend(op, opts)
547

    
548

    
549
def RemoveTags(opts, args):
550
  """Remove tags from a given object.
551

552
  This is a generic implementation that knows how to deal with all
553
  three cases of tag objects (cluster, node, instance). The opts
554
  argument is expected to contain a tag_type field denoting what
555
  object type we work on.
556

557
  """
558
  kind, name = _ExtractTagsObject(opts, args)
559
  _ExtendTags(opts, args)
560
  if not args:
561
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
562
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
563
  SubmitOrSend(op, opts)
564

    
565

    
566
def check_unit(option, opt, value): # pylint: disable=W0613
567
  """OptParsers custom converter for units.
568

569
  """
570
  try:
571
    return utils.ParseUnit(value)
572
  except errors.UnitParseError, err:
573
    raise OptionValueError("option %s: %s" % (opt, err))
574

    
575

    
576
def _SplitKeyVal(opt, data, parse_prefixes):
577
  """Convert a KeyVal string into a dict.
578

579
  This function will convert a key=val[,...] string into a dict. Empty
580
  values will be converted specially: keys which have the prefix 'no_'
581
  will have the value=False and the prefix stripped, keys with the prefix
582
  "-" will have value=None and the prefix stripped, and the others will
583
  have value=True.
584

585
  @type opt: string
586
  @param opt: a string holding the option name for which we process the
587
      data, used in building error messages
588
  @type data: string
589
  @param data: a string of the format key=val,key=val,...
590
  @type parse_prefixes: bool
591
  @param parse_prefixes: whether to handle prefixes specially
592
  @rtype: dict
593
  @return: {key=val, key=val}
594
  @raises errors.ParameterError: if there are duplicate keys
595

596
  """
597
  kv_dict = {}
598
  if data:
599
    for elem in utils.UnescapeAndSplit(data, sep=","):
600
      if "=" in elem:
601
        key, val = elem.split("=", 1)
602
      elif parse_prefixes:
603
        if elem.startswith(NO_PREFIX):
604
          key, val = elem[len(NO_PREFIX):], False
605
        elif elem.startswith(UN_PREFIX):
606
          key, val = elem[len(UN_PREFIX):], None
607
        else:
608
          key, val = elem, True
609
      else:
610
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
611
                                    (elem, opt))
612
      if key in kv_dict:
613
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
614
                                    (key, opt))
615
      kv_dict[key] = val
616
  return kv_dict
617

    
618

    
619
def _SplitIdentKeyVal(opt, value, parse_prefixes):
620
  """Helper function to parse "ident:key=val,key=val" options.
621

622
  @type opt: string
623
  @param opt: option name, used in error messages
624
  @type value: string
625
  @param value: expected to be in the format "ident:key=val,key=val,..."
626
  @type parse_prefixes: bool
627
  @param parse_prefixes: whether to handle prefixes specially (see
628
      L{_SplitKeyVal})
629
  @rtype: tuple
630
  @return: (ident, {key=val, key=val})
631
  @raises errors.ParameterError: in case of duplicates or other parsing errors
632

633
  """
634
  if ":" not in value:
635
    ident, rest = value, ""
636
  else:
637
    ident, rest = value.split(":", 1)
638

    
639
  if parse_prefixes and ident.startswith(NO_PREFIX):
640
    if rest:
641
      msg = "Cannot pass options when removing parameter groups: %s" % value
642
      raise errors.ParameterError(msg)
643
    retval = (ident[len(NO_PREFIX):], False)
644
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
645
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
646
    if rest:
647
      msg = "Cannot pass options when removing parameter groups: %s" % value
648
      raise errors.ParameterError(msg)
649
    retval = (ident[len(UN_PREFIX):], None)
650
  else:
651
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
652
    retval = (ident, kv_dict)
653
  return retval
654

    
655

    
656
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
657
  """Custom parser for ident:key=val,key=val options.
658

659
  This will store the parsed values as a tuple (ident, {key: val}). As such,
660
  multiple uses of this option via action=append is possible.
661

662
  """
663
  return _SplitIdentKeyVal(opt, value, True)
664

    
665

    
666
def check_key_val(option, opt, value):  # pylint: disable=W0613
667
  """Custom parser class for key=val,key=val options.
668

669
  This will store the parsed values as a dict {key: val}.
670

671
  """
672
  return _SplitKeyVal(opt, value, True)
673

    
674

    
675
def _SplitListKeyVal(opt, value):
676
  retval = {}
677
  for elem in value.split("/"):
678
    if not elem:
679
      raise errors.ParameterError("Empty section in option '%s'" % opt)
680
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
681
    if ident in retval:
682
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
683
             (ident, opt, elem))
684
      raise errors.ParameterError(msg)
685
    retval[ident] = valdict
686
  return retval
687

    
688

    
689
def check_multilist_ident_key_val(_, opt, value):
690
  """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
691

692
  @rtype: list of dictionary
693
  @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
694

695
  """
696
  retval = []
697
  for line in value.split("//"):
698
    retval.append(_SplitListKeyVal(opt, line))
699
  return retval
700

    
701

    
702
def check_bool(option, opt, value): # pylint: disable=W0613
703
  """Custom parser for yes/no options.
704

705
  This will store the parsed value as either True or False.
706

707
  """
708
  value = value.lower()
709
  if value == constants.VALUE_FALSE or value == "no":
710
    return False
711
  elif value == constants.VALUE_TRUE or value == "yes":
712
    return True
713
  else:
714
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
715

    
716

    
717
def check_list(option, opt, value): # pylint: disable=W0613
718
  """Custom parser for comma-separated lists.
719

720
  """
721
  # we have to make this explicit check since "".split(",") is [""],
722
  # not an empty list :(
723
  if not value:
724
    return []
725
  else:
726
    return utils.UnescapeAndSplit(value)
727

    
728

    
729
def check_maybefloat(option, opt, value): # pylint: disable=W0613
730
  """Custom parser for float numbers which might be also defaults.
731

732
  """
733
  value = value.lower()
734

    
735
  if value == constants.VALUE_DEFAULT:
736
    return value
737
  else:
738
    return float(value)
739

    
740

    
741
# completion_suggestion is normally a list. Using numeric values not evaluating
742
# to False for dynamic completion.
743
(OPT_COMPL_MANY_NODES,
744
 OPT_COMPL_ONE_NODE,
745
 OPT_COMPL_ONE_INSTANCE,
746
 OPT_COMPL_ONE_OS,
747
 OPT_COMPL_ONE_EXTSTORAGE,
748
 OPT_COMPL_ONE_IALLOCATOR,
749
 OPT_COMPL_ONE_NETWORK,
750
 OPT_COMPL_INST_ADD_NODES,
751
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
752

    
753
OPT_COMPL_ALL = compat.UniqueFrozenset([
754
  OPT_COMPL_MANY_NODES,
755
  OPT_COMPL_ONE_NODE,
756
  OPT_COMPL_ONE_INSTANCE,
757
  OPT_COMPL_ONE_OS,
758
  OPT_COMPL_ONE_EXTSTORAGE,
759
  OPT_COMPL_ONE_IALLOCATOR,
760
  OPT_COMPL_ONE_NETWORK,
761
  OPT_COMPL_INST_ADD_NODES,
762
  OPT_COMPL_ONE_NODEGROUP,
763
  ])
764

    
765

    
766
class CliOption(Option):
767
  """Custom option class for optparse.
768

769
  """
770
  ATTRS = Option.ATTRS + [
771
    "completion_suggest",
772
    ]
773
  TYPES = Option.TYPES + (
774
    "multilistidentkeyval",
775
    "identkeyval",
776
    "keyval",
777
    "unit",
778
    "bool",
779
    "list",
780
    "maybefloat",
781
    )
782
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
783
  TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
784
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
785
  TYPE_CHECKER["keyval"] = check_key_val
786
  TYPE_CHECKER["unit"] = check_unit
787
  TYPE_CHECKER["bool"] = check_bool
788
  TYPE_CHECKER["list"] = check_list
789
  TYPE_CHECKER["maybefloat"] = check_maybefloat
790

    
791

    
792
# optparse.py sets make_option, so we do it for our own option class, too
793
cli_option = CliOption
794

    
795

    
796
_YORNO = "yes|no"
797

    
798
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
799
                       help="Increase debugging level")
800

    
801
NOHDR_OPT = cli_option("--no-headers", default=False,
802
                       action="store_true", dest="no_headers",
803
                       help="Don't display column headers")
804

    
805
SEP_OPT = cli_option("--separator", default=None,
806
                     action="store", dest="separator",
807
                     help=("Separator between output fields"
808
                           " (defaults to one space)"))
809

    
810
USEUNITS_OPT = cli_option("--units", default=None,
811
                          dest="units", choices=("h", "m", "g", "t"),
812
                          help="Specify units for output (one of h/m/g/t)")
813

    
814
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
815
                        type="string", metavar="FIELDS",
816
                        help="Comma separated list of output fields")
817

    
818
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
819
                       default=False, help="Force the operation")
820

    
821
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
822
                         default=False, help="Do not require confirmation")
823

    
824
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
825
                                  action="store_true", default=False,
826
                                  help=("Ignore offline nodes and do as much"
827
                                        " as possible"))
828

    
829
TAG_ADD_OPT = cli_option("--tags", dest="tags",
830
                         default=None, help="Comma-separated list of instance"
831
                                            " tags")
832

    
833
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
834
                         default=None, help="File with tag names")
835

    
836
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
837
                        default=False, action="store_true",
838
                        help=("Submit the job and return the job ID, but"
839
                              " don't wait for the job to finish"))
840

    
841
PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
842
                             default=False, action="store_true",
843
                             help=("Additionally print the job as first line"
844
                                   " on stdout (for scripting)."))
845

    
846
SYNC_OPT = cli_option("--sync", dest="do_locking",
847
                      default=False, action="store_true",
848
                      help=("Grab locks while doing the queries"
849
                            " in order to ensure more consistent results"))
850

    
851
DRY_RUN_OPT = cli_option("--dry-run", default=False,
852
                         action="store_true",
853
                         help=("Do not execute the operation, just run the"
854
                               " check steps and verify if it could be"
855
                               " executed"))
856

    
857
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
858
                         action="store_true",
859
                         help="Increase the verbosity of the operation")
860

    
861
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
862
                              action="store_true", dest="simulate_errors",
863
                              help="Debugging option that makes the operation"
864
                              " treat most runtime checks as failed")
865

    
866
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
867
                        default=True, action="store_false",
868
                        help="Don't wait for sync (DANGEROUS!)")
869

    
870
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
871
                        default=False, action="store_true",
872
                        help="Wait for disks to sync")
873

    
874
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
875
                             action="store_true", default=False,
876
                             help="Enable offline instance")
877

    
878
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
879
                              action="store_true", default=False,
880
                              help="Disable down instance")
881

    
882
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
883
                               help=("Custom disk setup (%s)" %
884
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
885
                               default=None, metavar="TEMPL",
886
                               choices=list(constants.DISK_TEMPLATES))
887

    
888
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
889
                        help="Do not create any network cards for"
890
                        " the instance")
891

    
892
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
893
                               help="Relative path under default cluster-wide"
894
                               " file storage dir to store file-based disks",
895
                               default=None, metavar="<DIR>")
896

    
897
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
898
                                  help="Driver to use for image files",
899
                                  default=None, metavar="<DRIVER>",
900
                                  choices=list(constants.FILE_DRIVER))
901

    
902
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
903
                            help="Select nodes for the instance automatically"
904
                            " using the <NAME> iallocator plugin",
905
                            default=None, type="string",
906
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
907

    
908
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
909
                                    metavar="<NAME>",
910
                                    help="Set the default instance"
911
                                    " allocator plugin",
912
                                    default=None, type="string",
913
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
914

    
915
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
916
                    metavar="<os>",
917
                    completion_suggest=OPT_COMPL_ONE_OS)
918

    
919
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
920
                          type="keyval", default={},
921
                          help="OS parameters")
922

    
923
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
924
                               action="store_true", default=False,
925
                               help="Force an unknown variant")
926

    
927
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
928
                            action="store_true", default=False,
929
                            help="Do not install the OS (will"
930
                            " enable no-start)")
931

    
932
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
933
                                dest="allow_runtime_chgs",
934
                                default=True, action="store_false",
935
                                help="Don't allow runtime changes")
936

    
937
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
938
                         type="keyval", default={},
939
                         help="Backend parameters")
940

    
941
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
942
                        default={}, dest="hvparams",
943
                        help="Hypervisor parameters")
944

    
945
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
946
                             help="Disk template parameters, in the format"
947
                             " template:option=value,option=value,...",
948
                             type="identkeyval", action="append", default=[])
949

    
950
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
951
                                 type="keyval", default={},
952
                                 help="Memory size specs: list of key=value,"
953
                                " where key is one of min, max, std"
954
                                 " (in MB or using a unit)")
955

    
956
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
957
                                 type="keyval", default={},
958
                                 help="CPU count specs: list of key=value,"
959
                                 " where key is one of min, max, std")
960

    
961
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
962
                                  dest="ispecs_disk_count",
963
                                  type="keyval", default={},
964
                                  help="Disk count specs: list of key=value,"
965
                                  " where key is one of min, max, std")
966

    
967
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
968
                                 type="keyval", default={},
969
                                 help="Disk size specs: list of key=value,"
970
                                 " where key is one of min, max, std"
971
                                 " (in MB or using a unit)")
972

    
973
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
974
                                 type="keyval", default={},
975
                                 help="NIC count specs: list of key=value,"
976
                                 " where key is one of min, max, std")
977

    
978
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
979
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
980
                                      dest="ipolicy_bounds_specs",
981
                                      type="multilistidentkeyval", default=None,
982
                                      help="Complete instance specs limits")
983

    
984
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
985
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
986
                                   dest="ipolicy_std_specs",
987
                                   type="keyval", default=None,
988
                                   help="Complte standard instance specs")
989

    
990
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
991
                                    dest="ipolicy_disk_templates",
992
                                    type="list", default=None,
993
                                    help="Comma-separated list of"
994
                                    " enabled disk templates")
995

    
996
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
997
                                 dest="ipolicy_vcpu_ratio",
998
                                 type="maybefloat", default=None,
999
                                 help="The maximum allowed vcpu-to-cpu ratio")
1000

    
1001
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
1002
                                   dest="ipolicy_spindle_ratio",
1003
                                   type="maybefloat", default=None,
1004
                                   help=("The maximum allowed instances to"
1005
                                         " spindle ratio"))
1006

    
1007
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1008
                            help="Hypervisor and hypervisor options, in the"
1009
                            " format hypervisor:option=value,option=value,...",
1010
                            default=None, type="identkeyval")
1011

    
1012
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1013
                        help="Hypervisor and hypervisor options, in the"
1014
                        " format hypervisor:option=value,option=value,...",
1015
                        default=[], action="append", type="identkeyval")
1016

    
1017
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1018
                           action="store_false",
1019
                           help="Don't check that the instance's IP"
1020
                           " is alive")
1021

    
1022
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1023
                             default=True, action="store_false",
1024
                             help="Don't check that the instance's name"
1025
                             " is resolvable")
1026

    
1027
NET_OPT = cli_option("--net",
1028
                     help="NIC parameters", default=[],
1029
                     dest="nics", action="append", type="identkeyval")
1030

    
1031
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1032
                      dest="disks", action="append", type="identkeyval")
1033

    
1034
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1035
                         help="Comma-separated list of disks"
1036
                         " indices to act on (e.g. 0,2) (optional,"
1037
                         " defaults to all disks)")
1038

    
1039
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1040
                         help="Enforces a single-disk configuration using the"
1041
                         " given disk size, in MiB unless a suffix is used",
1042
                         default=None, type="unit", metavar="<size>")
1043

    
1044
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1045
                                dest="ignore_consistency",
1046
                                action="store_true", default=False,
1047
                                help="Ignore the consistency of the disks on"
1048
                                " the secondary")
1049

    
1050
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1051
                                dest="allow_failover",
1052
                                action="store_true", default=False,
1053
                                help="If migration is not possible fallback to"
1054
                                     " failover")
1055

    
1056
NONLIVE_OPT = cli_option("--non-live", dest="live",
1057
                         default=True, action="store_false",
1058
                         help="Do a non-live migration (this usually means"
1059
                         " freeze the instance, save the state, transfer and"
1060
                         " only then resume running on the secondary node)")
1061

    
1062
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1063
                                default=None,
1064
                                choices=list(constants.HT_MIGRATION_MODES),
1065
                                help="Override default migration mode (choose"
1066
                                " either live or non-live")
1067

    
1068
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1069
                                help="Target node and optional secondary node",
1070
                                metavar="<pnode>[:<snode>]",
1071
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1072

    
1073
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1074
                           action="append", metavar="<node>",
1075
                           help="Use only this node (can be used multiple"
1076
                           " times, if not given defaults to all nodes)",
1077
                           completion_suggest=OPT_COMPL_ONE_NODE)
1078

    
1079
NODEGROUP_OPT_NAME = "--node-group"
1080
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1081
                           dest="nodegroup",
1082
                           help="Node group (name or uuid)",
1083
                           metavar="<nodegroup>",
1084
                           default=None, type="string",
1085
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1086

    
1087
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1088
                             metavar="<node>",
1089
                             completion_suggest=OPT_COMPL_ONE_NODE)
1090

    
1091
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1092
                         action="store_false",
1093
                         help="Don't start the instance after creation")
1094

    
1095
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1096
                         action="store_true", default=False,
1097
                         help="Show command instead of executing it")
1098

    
1099
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1100
                         default=False, action="store_true",
1101
                         help="Instead of performing the migration/failover,"
1102
                         " try to recover from a failed cleanup. This is safe"
1103
                         " to run even if the instance is healthy, but it"
1104
                         " will create extra replication traffic and "
1105
                         " disrupt briefly the replication (like during the"
1106
                         " migration/failover")
1107

    
1108
STATIC_OPT = cli_option("-s", "--static", dest="static",
1109
                        action="store_true", default=False,
1110
                        help="Only show configuration data, not runtime data")
1111

    
1112
ALL_OPT = cli_option("--all", dest="show_all",
1113
                     default=False, action="store_true",
1114
                     help="Show info on all instances on the cluster."
1115
                     " This can take a long time to run, use wisely")
1116

    
1117
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1118
                           action="store_true", default=False,
1119
                           help="Interactive OS reinstall, lists available"
1120
                           " OS templates for selection")
1121

    
1122
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1123
                                 action="store_true", default=False,
1124
                                 help="Remove the instance from the cluster"
1125
                                 " configuration even if there are failures"
1126
                                 " during the removal process")
1127

    
1128
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1129
                                        dest="ignore_remove_failures",
1130
                                        action="store_true", default=False,
1131
                                        help="Remove the instance from the"
1132
                                        " cluster configuration even if there"
1133
                                        " are failures during the removal"
1134
                                        " process")
1135

    
1136
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1137
                                 action="store_true", default=False,
1138
                                 help="Remove the instance from the cluster")
1139

    
1140
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1141
                               help="Specifies the new node for the instance",
1142
                               metavar="NODE", default=None,
1143
                               completion_suggest=OPT_COMPL_ONE_NODE)
1144

    
1145
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1146
                               help="Specifies the new secondary node",
1147
                               metavar="NODE", default=None,
1148
                               completion_suggest=OPT_COMPL_ONE_NODE)
1149

    
1150
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1151
                             help="Specifies the new primary node",
1152
                             metavar="<node>", default=None,
1153
                             completion_suggest=OPT_COMPL_ONE_NODE)
1154

    
1155
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1156
                            default=False, action="store_true",
1157
                            help="Replace the disk(s) on the primary"
1158
                                 " node (applies only to internally mirrored"
1159
                                 " disk templates, e.g. %s)" %
1160
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1161

    
1162
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1163
                              default=False, action="store_true",
1164
                              help="Replace the disk(s) on the secondary"
1165
                                   " node (applies only to internally mirrored"
1166
                                   " disk templates, e.g. %s)" %
1167
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1168

    
1169
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1170
                              default=False, action="store_true",
1171
                              help="Lock all nodes and auto-promote as needed"
1172
                              " to MC status")
1173

    
1174
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1175
                              default=False, action="store_true",
1176
                              help="Automatically replace faulty disks"
1177
                                   " (applies only to internally mirrored"
1178
                                   " disk templates, e.g. %s)" %
1179
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1180

    
1181
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1182
                             default=False, action="store_true",
1183
                             help="Ignore current recorded size"
1184
                             " (useful for forcing activation when"
1185
                             " the recorded size is wrong)")
1186

    
1187
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1188
                          metavar="<node>",
1189
                          completion_suggest=OPT_COMPL_ONE_NODE)
1190

    
1191
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1192
                         metavar="<dir>")
1193

    
1194
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1195
                              help="Specify the secondary ip for the node",
1196
                              metavar="ADDRESS", default=None)
1197

    
1198
READD_OPT = cli_option("--readd", dest="readd",
1199
                       default=False, action="store_true",
1200
                       help="Readd old node after replacing it")
1201

    
1202
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1203
                                default=True, action="store_false",
1204
                                help="Disable SSH key fingerprint checking")
1205

    
1206
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1207
                                 default=False, action="store_true",
1208
                                 help="Force the joining of a node")
1209

    
1210
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1211
                    type="bool", default=None, metavar=_YORNO,
1212
                    help="Set the master_candidate flag on the node")
1213

    
1214
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1215
                         type="bool", default=None,
1216
                         help=("Set the offline flag on the node"
1217
                               " (cluster does not communicate with offline"
1218
                               " nodes)"))
1219

    
1220
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1221
                         type="bool", default=None,
1222
                         help=("Set the drained flag on the node"
1223
                               " (excluded from allocation operations)"))
1224

    
1225
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1226
                              type="bool", default=None, metavar=_YORNO,
1227
                              help="Set the master_capable flag on the node")
1228

    
1229
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1230
                          type="bool", default=None, metavar=_YORNO,
1231
                          help="Set the vm_capable flag on the node")
1232

    
1233
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1234
                             type="bool", default=None, metavar=_YORNO,
1235
                             help="Set the allocatable flag on a volume")
1236

    
1237
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1238
                            dest="enabled_hypervisors",
1239
                            help="Comma-separated list of hypervisors",
1240
                            type="string", default=None)
1241

    
1242
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1243
                                        dest="enabled_disk_templates",
1244
                                        help="Comma-separated list of "
1245
                                             "disk templates",
1246
                                        type="string", default=None)
1247

    
1248
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1249
                            type="keyval", default={},
1250
                            help="NIC parameters")
1251

    
1252
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1253
                         dest="candidate_pool_size", type="int",
1254
                         help="Set the candidate pool size")
1255

    
1256
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1257
                         help=("Enables LVM and specifies the volume group"
1258
                               " name (cluster-wide) for disk allocation"
1259
                               " [%s]" % constants.DEFAULT_VG),
1260
                         metavar="VG", default=None)
1261

    
1262
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1263
                          help="Destroy cluster", action="store_true")
1264

    
1265
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1266
                          help="Skip node agreement check (dangerous)",
1267
                          action="store_true", default=False)
1268

    
1269
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1270
                            help="Specify the mac prefix for the instance IP"
1271
                            " addresses, in the format XX:XX:XX",
1272
                            metavar="PREFIX",
1273
                            default=None)
1274

    
1275
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1276
                               help="Specify the node interface (cluster-wide)"
1277
                               " on which the master IP address will be added"
1278
                               " (cluster init default: %s)" %
1279
                               constants.DEFAULT_BRIDGE,
1280
                               metavar="NETDEV",
1281
                               default=None)
1282

    
1283
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1284
                                help="Specify the netmask of the master IP",
1285
                                metavar="NETMASK",
1286
                                default=None)
1287

    
1288
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1289
                                     dest="use_external_mip_script",
1290
                                     help="Specify whether to run a"
1291
                                     " user-provided script for the master"
1292
                                     " IP address turnup and"
1293
                                     " turndown operations",
1294
                                     type="bool", metavar=_YORNO, default=None)
1295

    
1296
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1297
                                help="Specify the default directory (cluster-"
1298
                                "wide) for storing the file-based disks [%s]" %
1299
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1300
                                metavar="DIR",
1301
                                default=None)
1302

    
1303
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1304
  "--shared-file-storage-dir",
1305
  dest="shared_file_storage_dir",
1306
  help="Specify the default directory (cluster-wide) for storing the"
1307
  " shared file-based disks [%s]" %
1308
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1309
  metavar="SHAREDDIR", default=None)
1310

    
1311
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1312
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1313
                                   action="store_false", default=True)
1314

    
1315
MODIFY_ETCHOSTS_OPT = \
1316
 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1317
            default=None, type="bool",
1318
            help="Defines whether the cluster should autonomously modify"
1319
            " and keep in sync the /etc/hosts file of the nodes")
1320

    
1321
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1322
                                    help="Don't initialize SSH keys",
1323
                                    action="store_false", default=True)
1324

    
1325
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1326
                             help="Enable parseable error messages",
1327
                             action="store_true", default=False)
1328

    
1329
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1330
                          help="Skip N+1 memory redundancy tests",
1331
                          action="store_true", default=False)
1332

    
1333
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1334
                             help="Type of reboot: soft/hard/full",
1335
                             default=constants.INSTANCE_REBOOT_HARD,
1336
                             metavar="<REBOOT>",
1337
                             choices=list(constants.REBOOT_TYPES))
1338

    
1339
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1340
                                    dest="ignore_secondaries",
1341
                                    default=False, action="store_true",
1342
                                    help="Ignore errors from secondaries")
1343

    
1344
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1345
                            action="store_false", default=True,
1346
                            help="Don't shutdown the instance (unsafe)")
1347

    
1348
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1349
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1350
                         help="Maximum time to wait")
1351

    
1352
COMPRESS_OPT = cli_option("--compress", dest="compress",
1353
                          default=constants.IEC_NONE,
1354
                          help="The compression mode to use",
1355
                          choices=list(constants.IEC_ALL))
1356

    
1357
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1358
                                  dest="shutdown_timeout", type="int",
1359
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1360
                                  help="Maximum time to wait for instance"
1361
                                  " shutdown")
1362

    
1363
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1364
                          default=None,
1365
                          help=("Number of seconds between repetions of the"
1366
                                " command"))
1367

    
1368
EARLY_RELEASE_OPT = cli_option("--early-release",
1369
                               dest="early_release", default=False,
1370
                               action="store_true",
1371
                               help="Release the locks on the secondary"
1372
                               " node(s) early")
1373

    
1374
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1375
                                  dest="new_cluster_cert",
1376
                                  default=False, action="store_true",
1377
                                  help="Generate a new cluster certificate")
1378

    
1379
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1380
                           default=None,
1381
                           help="File containing new RAPI certificate")
1382

    
1383
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1384
                               default=None, action="store_true",
1385
                               help=("Generate a new self-signed RAPI"
1386
                                     " certificate"))
1387

    
1388
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1389
                            default=None,
1390
                            help="File containing new SPICE certificate")
1391

    
1392
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1393
                              default=None,
1394
                              help="File containing the certificate of the CA"
1395
                              " which signed the SPICE certificate")
1396

    
1397
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1398
                                dest="new_spice_cert", default=None,
1399
                                action="store_true",
1400
                                help=("Generate a new self-signed SPICE"
1401
                                      " certificate"))
1402

    
1403
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1404
                                    dest="new_confd_hmac_key",
1405
                                    default=False, action="store_true",
1406
                                    help=("Create a new HMAC key for %s" %
1407
                                          constants.CONFD))
1408

    
1409
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1410
                                       dest="cluster_domain_secret",
1411
                                       default=None,
1412
                                       help=("Load new new cluster domain"
1413
                                             " secret from file"))
1414

    
1415
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1416
                                           dest="new_cluster_domain_secret",
1417
                                           default=False, action="store_true",
1418
                                           help=("Create a new cluster domain"
1419
                                                 " secret"))
1420

    
1421
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1422
                              dest="use_replication_network",
1423
                              help="Whether to use the replication network"
1424
                              " for talking to the nodes",
1425
                              action="store_true", default=False)
1426

    
1427
MAINTAIN_NODE_HEALTH_OPT = \
1428
    cli_option("--maintain-node-health", dest="maintain_node_health",
1429
               metavar=_YORNO, default=None, type="bool",
1430
               help="Configure the cluster to automatically maintain node"
1431
               " health, by shutting down unknown instances, shutting down"
1432
               " unknown DRBD devices, etc.")
1433

    
1434
IDENTIFY_DEFAULTS_OPT = \
1435
    cli_option("--identify-defaults", dest="identify_defaults",
1436
               default=False, action="store_true",
1437
               help="Identify which saved instance parameters are equal to"
1438
               " the current cluster defaults and set them as such, instead"
1439
               " of marking them as overridden")
1440

    
1441
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1442
                         action="store", dest="uid_pool",
1443
                         help=("A list of user-ids or user-id"
1444
                               " ranges separated by commas"))
1445

    
1446
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1447
                          action="store", dest="add_uids",
1448
                          help=("A list of user-ids or user-id"
1449
                                " ranges separated by commas, to be"
1450
                                " added to the user-id pool"))
1451

    
1452
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1453
                             action="store", dest="remove_uids",
1454
                             help=("A list of user-ids or user-id"
1455
                                   " ranges separated by commas, to be"
1456
                                   " removed from the user-id pool"))
1457

    
1458
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1459
                              action="store", dest="reserved_lvs",
1460
                              help=("A comma-separated list of reserved"
1461
                                    " logical volumes names, that will be"
1462
                                    " ignored by cluster verify"))
1463

    
1464
ROMAN_OPT = cli_option("--roman",
1465
                       dest="roman_integers", default=False,
1466
                       action="store_true",
1467
                       help="Use roman numbers for positive integers")
1468

    
1469
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1470
                             action="store", default=None,
1471
                             help="Specifies usermode helper for DRBD")
1472

    
1473
PRIMARY_IP_VERSION_OPT = \
1474
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1475
               action="store", dest="primary_ip_version",
1476
               metavar="%d|%d" % (constants.IP4_VERSION,
1477
                                  constants.IP6_VERSION),
1478
               help="Cluster-wide IP version for primary IP")
1479

    
1480
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1481
                              action="store_true",
1482
                              help="Show machine name for every line in output")
1483

    
1484
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1485
                              action="store_true",
1486
                              help=("Hide successful results and show failures"
1487
                                    " only (determined by the exit code)"))
1488

    
1489
REASON_OPT = cli_option("--reason", default=None,
1490
                        help="The reason for executing the command")
1491

    
1492

    
1493
def _PriorityOptionCb(option, _, value, parser):
1494
  """Callback for processing C{--priority} option.
1495

1496
  """
1497
  value = _PRIONAME_TO_VALUE[value]
1498

    
1499
  setattr(parser.values, option.dest, value)
1500

    
1501

    
1502
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1503
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1504
                          choices=_PRIONAME_TO_VALUE.keys(),
1505
                          action="callback", type="choice",
1506
                          callback=_PriorityOptionCb,
1507
                          help="Priority for opcode processing")
1508

    
1509
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1510
                        type="bool", default=None, metavar=_YORNO,
1511
                        help="Sets the hidden flag on the OS")
1512

    
1513
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1514
                        type="bool", default=None, metavar=_YORNO,
1515
                        help="Sets the blacklisted flag on the OS")
1516

    
1517
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1518
                                     type="bool", metavar=_YORNO,
1519
                                     dest="prealloc_wipe_disks",
1520
                                     help=("Wipe disks prior to instance"
1521
                                           " creation"))
1522

    
1523
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1524
                             type="keyval", default=None,
1525
                             help="Node parameters")
1526

    
1527
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1528
                              action="store", metavar="POLICY", default=None,
1529
                              help="Allocation policy for the node group")
1530

    
1531
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1532
                              type="bool", metavar=_YORNO,
1533
                              dest="node_powered",
1534
                              help="Specify if the SoR for node is powered")
1535

    
1536
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1537
                             default=constants.OOB_TIMEOUT,
1538
                             help="Maximum time to wait for out-of-band helper")
1539

    
1540
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1541
                             default=constants.OOB_POWER_DELAY,
1542
                             help="Time in seconds to wait between power-ons")
1543

    
1544
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1545
                              action="store_true", default=False,
1546
                              help=("Whether command argument should be treated"
1547
                                    " as filter"))
1548

    
1549
NO_REMEMBER_OPT = cli_option("--no-remember",
1550
                             dest="no_remember",
1551
                             action="store_true", default=False,
1552
                             help="Perform but do not record the change"
1553
                             " in the configuration")
1554

    
1555
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1556
                              default=False, action="store_true",
1557
                              help="Evacuate primary instances only")
1558

    
1559
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1560
                                default=False, action="store_true",
1561
                                help="Evacuate secondary instances only"
1562
                                     " (applies only to internally mirrored"
1563
                                     " disk templates, e.g. %s)" %
1564
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1565

    
1566
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1567
                                action="store_true", default=False,
1568
                                help="Pause instance at startup")
1569

    
1570
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1571
                          help="Destination node group (name or uuid)",
1572
                          default=None, action="append",
1573
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1574

    
1575
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1576
                               action="append", dest="ignore_errors",
1577
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1578
                               help="Error code to be ignored")
1579

    
1580
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1581
                            action="append",
1582
                            help=("Specify disk state information in the"
1583
                                  " format"
1584
                                  " storage_type/identifier:option=value,...;"
1585
                                  " note this is unused for now"),
1586
                            type="identkeyval")
1587

    
1588
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1589
                          action="append",
1590
                          help=("Specify hypervisor state information in the"
1591
                                " format hypervisor:option=value,...;"
1592
                                " note this is unused for now"),
1593
                          type="identkeyval")
1594

    
1595
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1596
                                action="store_true", default=False,
1597
                                help="Ignore instance policy violations")
1598

    
1599
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1600
                             help="Sets the instance's runtime memory,"
1601
                             " ballooning it up or down to the new value",
1602
                             default=None, type="unit", metavar="<size>")
1603

    
1604
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1605
                          action="store_true", default=False,
1606
                          help="Marks the grow as absolute instead of the"
1607
                          " (default) relative mode")
1608

    
1609
NETWORK_OPT = cli_option("--network",
1610
                         action="store", default=None, dest="network",
1611
                         help="IP network in CIDR notation")
1612

    
1613
GATEWAY_OPT = cli_option("--gateway",
1614
                         action="store", default=None, dest="gateway",
1615
                         help="IP address of the router (gateway)")
1616

    
1617
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1618
                                  action="store", default=None,
1619
                                  dest="add_reserved_ips",
1620
                                  help="Comma-separated list of"
1621
                                  " reserved IPs to add")
1622

    
1623
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1624
                                     action="store", default=None,
1625
                                     dest="remove_reserved_ips",
1626
                                     help="Comma-delimited list of"
1627
                                     " reserved IPs to remove")
1628

    
1629
NETWORK6_OPT = cli_option("--network6",
1630
                          action="store", default=None, dest="network6",
1631
                          help="IP network in CIDR notation")
1632

    
1633
GATEWAY6_OPT = cli_option("--gateway6",
1634
                          action="store", default=None, dest="gateway6",
1635
                          help="IP6 address of the router (gateway)")
1636

    
1637
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1638
                                  dest="conflicts_check",
1639
                                  default=True,
1640
                                  action="store_false",
1641
                                  help="Don't check for conflicting IPs")
1642

    
1643
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1644
                                 default=False, action="store_true",
1645
                                 help="Include default values")
1646

    
1647
HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1648
                         action="store_true", default=False,
1649
                         help="Hotplug supported devices (NICs and Disks)")
1650

    
1651
#: Options provided by all commands
1652
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1653

    
1654
# options related to asynchronous job handling
1655

    
1656
SUBMIT_OPTS = [
1657
  SUBMIT_OPT,
1658
  PRINT_JOBID_OPT,
1659
  ]
1660

    
1661
# common options for creating instances. add and import then add their own
1662
# specific ones.
1663
COMMON_CREATE_OPTS = [
1664
  BACKEND_OPT,
1665
  DISK_OPT,
1666
  DISK_TEMPLATE_OPT,
1667
  FILESTORE_DIR_OPT,
1668
  FILESTORE_DRIVER_OPT,
1669
  HYPERVISOR_OPT,
1670
  IALLOCATOR_OPT,
1671
  NET_OPT,
1672
  NODE_PLACEMENT_OPT,
1673
  NOIPCHECK_OPT,
1674
  NOCONFLICTSCHECK_OPT,
1675
  NONAMECHECK_OPT,
1676
  NONICS_OPT,
1677
  NWSYNC_OPT,
1678
  OSPARAMS_OPT,
1679
  OS_SIZE_OPT,
1680
  SUBMIT_OPT,
1681
  PRINT_JOBID_OPT,
1682
  TAG_ADD_OPT,
1683
  DRY_RUN_OPT,
1684
  PRIORITY_OPT,
1685
  ]
1686

    
1687
# common instance policy options
1688
INSTANCE_POLICY_OPTS = [
1689
  IPOLICY_BOUNDS_SPECS_OPT,
1690
  IPOLICY_DISK_TEMPLATES,
1691
  IPOLICY_VCPU_RATIO,
1692
  IPOLICY_SPINDLE_RATIO,
1693
  ]
1694

    
1695
# instance policy split specs options
1696
SPLIT_ISPECS_OPTS = [
1697
  SPECS_CPU_COUNT_OPT,
1698
  SPECS_DISK_COUNT_OPT,
1699
  SPECS_DISK_SIZE_OPT,
1700
  SPECS_MEM_SIZE_OPT,
1701
  SPECS_NIC_COUNT_OPT,
1702
  ]
1703

    
1704

    
1705
class _ShowUsage(Exception):
1706
  """Exception class for L{_ParseArgs}.
1707

1708
  """
1709
  def __init__(self, exit_error):
1710
    """Initializes instances of this class.
1711

1712
    @type exit_error: bool
1713
    @param exit_error: Whether to report failure on exit
1714

1715
    """
1716
    Exception.__init__(self)
1717
    self.exit_error = exit_error
1718

    
1719

    
1720
class _ShowVersion(Exception):
1721
  """Exception class for L{_ParseArgs}.
1722

1723
  """
1724

    
1725

    
1726
def _ParseArgs(binary, argv, commands, aliases, env_override):
1727
  """Parser for the command line arguments.
1728

1729
  This function parses the arguments and returns the function which
1730
  must be executed together with its (modified) arguments.
1731

1732
  @param binary: Script name
1733
  @param argv: Command line arguments
1734
  @param commands: Dictionary containing command definitions
1735
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1736
  @param env_override: list of env variables allowed for default args
1737
  @raise _ShowUsage: If usage description should be shown
1738
  @raise _ShowVersion: If version should be shown
1739

1740
  """
1741
  assert not (env_override - set(commands))
1742
  assert not (set(aliases.keys()) & set(commands.keys()))
1743

    
1744
  if len(argv) > 1:
1745
    cmd = argv[1]
1746
  else:
1747
    # No option or command given
1748
    raise _ShowUsage(exit_error=True)
1749

    
1750
  if cmd == "--version":
1751
    raise _ShowVersion()
1752
  elif cmd == "--help":
1753
    raise _ShowUsage(exit_error=False)
1754
  elif not (cmd in commands or cmd in aliases):
1755
    raise _ShowUsage(exit_error=True)
1756

    
1757
  # get command, unalias it, and look it up in commands
1758
  if cmd in aliases:
1759
    if aliases[cmd] not in commands:
1760
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1761
                                   " command '%s'" % (cmd, aliases[cmd]))
1762

    
1763
    cmd = aliases[cmd]
1764

    
1765
  if cmd in env_override:
1766
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1767
    env_args = os.environ.get(args_env_name)
1768
    if env_args:
1769
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1770

    
1771
  func, args_def, parser_opts, usage, description = commands[cmd]
1772
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1773
                        description=description,
1774
                        formatter=TitledHelpFormatter(),
1775
                        usage="%%prog %s %s" % (cmd, usage))
1776
  parser.disable_interspersed_args()
1777
  options, args = parser.parse_args(args=argv[2:])
1778

    
1779
  if not _CheckArguments(cmd, args_def, args):
1780
    return None, None, None
1781

    
1782
  return func, options, args
1783

    
1784

    
1785
def _FormatUsage(binary, commands):
1786
  """Generates a nice description of all commands.
1787

1788
  @param binary: Script name
1789
  @param commands: Dictionary containing command definitions
1790

1791
  """
1792
  # compute the max line length for cmd + usage
1793
  mlen = min(60, max(map(len, commands)))
1794

    
1795
  yield "Usage: %s {command} [options...] [argument...]" % binary
1796
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1797
  yield ""
1798
  yield "Commands:"
1799

    
1800
  # and format a nice command list
1801
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1802
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1803
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1804
    for line in help_lines:
1805
      yield " %-*s   %s" % (mlen, "", line)
1806

    
1807
  yield ""
1808

    
1809

    
1810
def _CheckArguments(cmd, args_def, args):
1811
  """Verifies the arguments using the argument definition.
1812

1813
  Algorithm:
1814

1815
    1. Abort with error if values specified by user but none expected.
1816

1817
    1. For each argument in definition
1818

1819
      1. Keep running count of minimum number of values (min_count)
1820
      1. Keep running count of maximum number of values (max_count)
1821
      1. If it has an unlimited number of values
1822

1823
        1. Abort with error if it's not the last argument in the definition
1824

1825
    1. If last argument has limited number of values
1826

1827
      1. Abort with error if number of values doesn't match or is too large
1828

1829
    1. Abort with error if user didn't pass enough values (min_count)
1830

1831
  """
1832
  if args and not args_def:
1833
    ToStderr("Error: Command %s expects no arguments", cmd)
1834
    return False
1835

    
1836
  min_count = None
1837
  max_count = None
1838
  check_max = None
1839

    
1840
  last_idx = len(args_def) - 1
1841

    
1842
  for idx, arg in enumerate(args_def):
1843
    if min_count is None:
1844
      min_count = arg.min
1845
    elif arg.min is not None:
1846
      min_count += arg.min
1847

    
1848
    if max_count is None:
1849
      max_count = arg.max
1850
    elif arg.max is not None:
1851
      max_count += arg.max
1852

    
1853
    if idx == last_idx:
1854
      check_max = (arg.max is not None)
1855

    
1856
    elif arg.max is None:
1857
      raise errors.ProgrammerError("Only the last argument can have max=None")
1858

    
1859
  if check_max:
1860
    # Command with exact number of arguments
1861
    if (min_count is not None and max_count is not None and
1862
        min_count == max_count and len(args) != min_count):
1863
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1864
      return False
1865

    
1866
    # Command with limited number of arguments
1867
    if max_count is not None and len(args) > max_count:
1868
      ToStderr("Error: Command %s expects only %d argument(s)",
1869
               cmd, max_count)
1870
      return False
1871

    
1872
  # Command with some required arguments
1873
  if min_count is not None and len(args) < min_count:
1874
    ToStderr("Error: Command %s expects at least %d argument(s)",
1875
             cmd, min_count)
1876
    return False
1877

    
1878
  return True
1879

    
1880

    
1881
def SplitNodeOption(value):
1882
  """Splits the value of a --node option.
1883

1884
  """
1885
  if value and ":" in value:
1886
    return value.split(":", 1)
1887
  else:
1888
    return (value, None)
1889

    
1890

    
1891
def CalculateOSNames(os_name, os_variants):
1892
  """Calculates all the names an OS can be called, according to its variants.
1893

1894
  @type os_name: string
1895
  @param os_name: base name of the os
1896
  @type os_variants: list or None
1897
  @param os_variants: list of supported variants
1898
  @rtype: list
1899
  @return: list of valid names
1900

1901
  """
1902
  if os_variants:
1903
    return ["%s+%s" % (os_name, v) for v in os_variants]
1904
  else:
1905
    return [os_name]
1906

    
1907

    
1908
def ParseFields(selected, default):
1909
  """Parses the values of "--field"-like options.
1910

1911
  @type selected: string or None
1912
  @param selected: User-selected options
1913
  @type default: list
1914
  @param default: Default fields
1915

1916
  """
1917
  if selected is None:
1918
    return default
1919

    
1920
  if selected.startswith("+"):
1921
    return default + selected[1:].split(",")
1922

    
1923
  return selected.split(",")
1924

    
1925

    
1926
UsesRPC = rpc.RunWithRPC
1927

    
1928

    
1929
def AskUser(text, choices=None):
1930
  """Ask the user a question.
1931

1932
  @param text: the question to ask
1933

1934
  @param choices: list with elements tuples (input_char, return_value,
1935
      description); if not given, it will default to: [('y', True,
1936
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1937
      note that the '?' char is reserved for help
1938

1939
  @return: one of the return values from the choices list; if input is
1940
      not possible (i.e. not running with a tty, we return the last
1941
      entry from the list
1942

1943
  """
1944
  if choices is None:
1945
    choices = [("y", True, "Perform the operation"),
1946
               ("n", False, "Do not perform the operation")]
1947
  if not choices or not isinstance(choices, list):
1948
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1949
  for entry in choices:
1950
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1951
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1952

    
1953
  answer = choices[-1][1]
1954
  new_text = []
1955
  for line in text.splitlines():
1956
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1957
  text = "\n".join(new_text)
1958
  try:
1959
    f = file("/dev/tty", "a+")
1960
  except IOError:
1961
    return answer
1962
  try:
1963
    chars = [entry[0] for entry in choices]
1964
    chars[-1] = "[%s]" % chars[-1]
1965
    chars.append("?")
1966
    maps = dict([(entry[0], entry[1]) for entry in choices])
1967
    while True:
1968
      f.write(text)
1969
      f.write("\n")
1970
      f.write("/".join(chars))
1971
      f.write(": ")
1972
      line = f.readline(2).strip().lower()
1973
      if line in maps:
1974
        answer = maps[line]
1975
        break
1976
      elif line == "?":
1977
        for entry in choices:
1978
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1979
        f.write("\n")
1980
        continue
1981
  finally:
1982
    f.close()
1983
  return answer
1984

    
1985

    
1986
class JobSubmittedException(Exception):
1987
  """Job was submitted, client should exit.
1988

1989
  This exception has one argument, the ID of the job that was
1990
  submitted. The handler should print this ID.
1991

1992
  This is not an error, just a structured way to exit from clients.
1993

1994
  """
1995

    
1996

    
1997
def SendJob(ops, cl=None):
1998
  """Function to submit an opcode without waiting for the results.
1999

2000
  @type ops: list
2001
  @param ops: list of opcodes
2002
  @type cl: luxi.Client
2003
  @param cl: the luxi client to use for communicating with the master;
2004
             if None, a new client will be created
2005

2006
  """
2007
  if cl is None:
2008
    cl = GetClient()
2009

    
2010
  job_id = cl.SubmitJob(ops)
2011

    
2012
  return job_id
2013

    
2014

    
2015
def GenericPollJob(job_id, cbs, report_cbs):
2016
  """Generic job-polling function.
2017

2018
  @type job_id: number
2019
  @param job_id: Job ID
2020
  @type cbs: Instance of L{JobPollCbBase}
2021
  @param cbs: Data callbacks
2022
  @type report_cbs: Instance of L{JobPollReportCbBase}
2023
  @param report_cbs: Reporting callbacks
2024

2025
  """
2026
  prev_job_info = None
2027
  prev_logmsg_serial = None
2028

    
2029
  status = None
2030

    
2031
  while True:
2032
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2033
                                      prev_logmsg_serial)
2034
    if not result:
2035
      # job not found, go away!
2036
      raise errors.JobLost("Job with id %s lost" % job_id)
2037

    
2038
    if result == constants.JOB_NOTCHANGED:
2039
      report_cbs.ReportNotChanged(job_id, status)
2040

    
2041
      # Wait again
2042
      continue
2043

    
2044
    # Split result, a tuple of (field values, log entries)
2045
    (job_info, log_entries) = result
2046
    (status, ) = job_info
2047

    
2048
    if log_entries:
2049
      for log_entry in log_entries:
2050
        (serial, timestamp, log_type, message) = log_entry
2051
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2052
                                    log_type, message)
2053
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2054

    
2055
    # TODO: Handle canceled and archived jobs
2056
    elif status in (constants.JOB_STATUS_SUCCESS,
2057
                    constants.JOB_STATUS_ERROR,
2058
                    constants.JOB_STATUS_CANCELING,
2059
                    constants.JOB_STATUS_CANCELED):
2060
      break
2061

    
2062
    prev_job_info = job_info
2063

    
2064
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2065
  if not jobs:
2066
    raise errors.JobLost("Job with id %s lost" % job_id)
2067

    
2068
  status, opstatus, result = jobs[0]
2069

    
2070
  if status == constants.JOB_STATUS_SUCCESS:
2071
    return result
2072

    
2073
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2074
    raise errors.OpExecError("Job was canceled")
2075

    
2076
  has_ok = False
2077
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2078
    if status == constants.OP_STATUS_SUCCESS:
2079
      has_ok = True
2080
    elif status == constants.OP_STATUS_ERROR:
2081
      errors.MaybeRaise(msg)
2082

    
2083
      if has_ok:
2084
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2085
                                 (idx, msg))
2086

    
2087
      raise errors.OpExecError(str(msg))
2088

    
2089
  # default failure mode
2090
  raise errors.OpExecError(result)
2091

    
2092

    
2093
class JobPollCbBase:
2094
  """Base class for L{GenericPollJob} callbacks.
2095

2096
  """
2097
  def __init__(self):
2098
    """Initializes this class.
2099

2100
    """
2101

    
2102
  def WaitForJobChangeOnce(self, job_id, fields,
2103
                           prev_job_info, prev_log_serial):
2104
    """Waits for changes on a job.
2105

2106
    """
2107
    raise NotImplementedError()
2108

    
2109
  def QueryJobs(self, job_ids, fields):
2110
    """Returns the selected fields for the selected job IDs.
2111

2112
    @type job_ids: list of numbers
2113
    @param job_ids: Job IDs
2114
    @type fields: list of strings
2115
    @param fields: Fields
2116

2117
    """
2118
    raise NotImplementedError()
2119

    
2120

    
2121
class JobPollReportCbBase:
2122
  """Base class for L{GenericPollJob} reporting callbacks.
2123

2124
  """
2125
  def __init__(self):
2126
    """Initializes this class.
2127

2128
    """
2129

    
2130
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2131
    """Handles a log message.
2132

2133
    """
2134
    raise NotImplementedError()
2135

    
2136
  def ReportNotChanged(self, job_id, status):
2137
    """Called for if a job hasn't changed in a while.
2138

2139
    @type job_id: number
2140
    @param job_id: Job ID
2141
    @type status: string or None
2142
    @param status: Job status if available
2143

2144
    """
2145
    raise NotImplementedError()
2146

    
2147

    
2148
class _LuxiJobPollCb(JobPollCbBase):
2149
  def __init__(self, cl):
2150
    """Initializes this class.
2151

2152
    """
2153
    JobPollCbBase.__init__(self)
2154
    self.cl = cl
2155

    
2156
  def WaitForJobChangeOnce(self, job_id, fields,
2157
                           prev_job_info, prev_log_serial):
2158
    """Waits for changes on a job.
2159

2160
    """
2161
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2162
                                        prev_job_info, prev_log_serial)
2163

    
2164
  def QueryJobs(self, job_ids, fields):
2165
    """Returns the selected fields for the selected job IDs.
2166

2167
    """
2168
    return self.cl.QueryJobs(job_ids, fields)
2169

    
2170

    
2171
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2172
  def __init__(self, feedback_fn):
2173
    """Initializes this class.
2174

2175
    """
2176
    JobPollReportCbBase.__init__(self)
2177

    
2178
    self.feedback_fn = feedback_fn
2179

    
2180
    assert callable(feedback_fn)
2181

    
2182
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2183
    """Handles a log message.
2184

2185
    """
2186
    self.feedback_fn((timestamp, log_type, log_msg))
2187

    
2188
  def ReportNotChanged(self, job_id, status):
2189
    """Called if a job hasn't changed in a while.
2190

2191
    """
2192
    # Ignore
2193

    
2194

    
2195
class StdioJobPollReportCb(JobPollReportCbBase):
2196
  def __init__(self):
2197
    """Initializes this class.
2198

2199
    """
2200
    JobPollReportCbBase.__init__(self)
2201

    
2202
    self.notified_queued = False
2203
    self.notified_waitlock = False
2204

    
2205
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2206
    """Handles a log message.
2207

2208
    """
2209
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2210
             FormatLogMessage(log_type, log_msg))
2211

    
2212
  def ReportNotChanged(self, job_id, status):
2213
    """Called if a job hasn't changed in a while.
2214

2215
    """
2216
    if status is None:
2217
      return
2218

    
2219
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2220
      ToStderr("Job %s is waiting in queue", job_id)
2221
      self.notified_queued = True
2222

    
2223
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2224
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2225
      self.notified_waitlock = True
2226

    
2227

    
2228
def FormatLogMessage(log_type, log_msg):
2229
  """Formats a job message according to its type.
2230

2231
  """
2232
  if log_type != constants.ELOG_MESSAGE:
2233
    log_msg = str(log_msg)
2234

    
2235
  return utils.SafeEncode(log_msg)
2236

    
2237

    
2238
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2239
  """Function to poll for the result of a job.
2240

2241
  @type job_id: job identified
2242
  @param job_id: the job to poll for results
2243
  @type cl: luxi.Client
2244
  @param cl: the luxi client to use for communicating with the master;
2245
             if None, a new client will be created
2246

2247
  """
2248
  if cl is None:
2249
    cl = GetClient()
2250

    
2251
  if reporter is None:
2252
    if feedback_fn:
2253
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2254
    else:
2255
      reporter = StdioJobPollReportCb()
2256
  elif feedback_fn:
2257
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2258

    
2259
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2260

    
2261

    
2262
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2263
  """Legacy function to submit an opcode.
2264

2265
  This is just a simple wrapper over the construction of the processor
2266
  instance. It should be extended to better handle feedback and
2267
  interaction functions.
2268

2269
  """
2270
  if cl is None:
2271
    cl = GetClient()
2272

    
2273
  SetGenericOpcodeOpts([op], opts)
2274

    
2275
  job_id = SendJob([op], cl=cl)
2276
  if hasattr(opts, "print_jobid") and opts.print_jobid:
2277
    ToStdout("%d" % job_id)
2278

    
2279
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2280
                       reporter=reporter)
2281

    
2282
  return op_results[0]
2283

    
2284

    
2285
def SubmitOpCodeToDrainedQueue(op):
2286
  """Forcefully insert a job in the queue, even if it is drained.
2287

2288
  """
2289
  cl = GetClient()
2290
  job_id = cl.SubmitJobToDrainedQueue([op])
2291
  op_results = PollJob(job_id, cl=cl)
2292
  return op_results[0]
2293

    
2294

    
2295
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2296
  """Wrapper around SubmitOpCode or SendJob.
2297

2298
  This function will decide, based on the 'opts' parameter, whether to
2299
  submit and wait for the result of the opcode (and return it), or
2300
  whether to just send the job and print its identifier. It is used in
2301
  order to simplify the implementation of the '--submit' option.
2302

2303
  It will also process the opcodes if we're sending the via SendJob
2304
  (otherwise SubmitOpCode does it).
2305

2306
  """
2307
  if opts and opts.submit_only:
2308
    job = [op]
2309
    SetGenericOpcodeOpts(job, opts)
2310
    job_id = SendJob(job, cl=cl)
2311
    if opts.print_jobid:
2312
      ToStdout("%d" % job_id)
2313
    raise JobSubmittedException(job_id)
2314
  else:
2315
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2316

    
2317

    
2318
def _InitReasonTrail(op, opts):
2319
  """Builds the first part of the reason trail
2320

2321
  Builds the initial part of the reason trail, adding the user provided reason
2322
  (if it exists) and the name of the command starting the operation.
2323

2324
  @param op: the opcode the reason trail will be added to
2325
  @param opts: the command line options selected by the user
2326

2327
  """
2328
  assert len(sys.argv) >= 2
2329
  trail = []
2330

    
2331
  if opts.reason:
2332
    trail.append((constants.OPCODE_REASON_SRC_USER,
2333
                  opts.reason,
2334
                  utils.EpochNano()))
2335

    
2336
  binary = os.path.basename(sys.argv[0])
2337
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2338
  command = sys.argv[1]
2339
  trail.append((source, command, utils.EpochNano()))
2340
  op.reason = trail
2341

    
2342

    
2343
def SetGenericOpcodeOpts(opcode_list, options):
2344
  """Processor for generic options.
2345

2346
  This function updates the given opcodes based on generic command
2347
  line options (like debug, dry-run, etc.).
2348

2349
  @param opcode_list: list of opcodes
2350
  @param options: command line options or None
2351
  @return: None (in-place modification)
2352

2353
  """
2354
  if not options:
2355
    return
2356
  for op in opcode_list:
2357
    op.debug_level = options.debug
2358
    if hasattr(options, "dry_run"):
2359
      op.dry_run = options.dry_run
2360
    if getattr(options, "priority", None) is not None:
2361
      op.priority = options.priority
2362
    _InitReasonTrail(op, options)
2363

    
2364

    
2365
def FormatError(err):
2366
  """Return a formatted error message for a given error.
2367

2368
  This function takes an exception instance and returns a tuple
2369
  consisting of two values: first, the recommended exit code, and
2370
  second, a string describing the error message (not
2371
  newline-terminated).
2372

2373
  """
2374
  retcode = 1
2375
  obuf = StringIO()
2376
  msg = str(err)
2377
  if isinstance(err, errors.ConfigurationError):
2378
    txt = "Corrupt configuration file: %s" % msg
2379
    logging.error(txt)
2380
    obuf.write(txt + "\n")
2381
    obuf.write("Aborting.")
2382
    retcode = 2
2383
  elif isinstance(err, errors.HooksAbort):
2384
    obuf.write("Failure: hooks execution failed:\n")
2385
    for node, script, out in err.args[0]:
2386
      if out:
2387
        obuf.write("  node: %s, script: %s, output: %s\n" %
2388
                   (node, script, out))
2389
      else:
2390
        obuf.write("  node: %s, script: %s (no output)\n" %
2391
                   (node, script))
2392
  elif isinstance(err, errors.HooksFailure):
2393
    obuf.write("Failure: hooks general failure: %s" % msg)
2394
  elif isinstance(err, errors.ResolverError):
2395
    this_host = netutils.Hostname.GetSysName()
2396
    if err.args[0] == this_host:
2397
      msg = "Failure: can't resolve my own hostname ('%s')"
2398
    else:
2399
      msg = "Failure: can't resolve hostname '%s'"
2400
    obuf.write(msg % err.args[0])
2401
  elif isinstance(err, errors.OpPrereqError):
2402
    if len(err.args) == 2:
2403
      obuf.write("Failure: prerequisites not met for this"
2404
                 " operation:\nerror type: %s, error details:\n%s" %
2405
                 (err.args[1], err.args[0]))
2406
    else:
2407
      obuf.write("Failure: prerequisites not met for this"
2408
                 " operation:\n%s" % msg)
2409
  elif isinstance(err, errors.OpExecError):
2410
    obuf.write("Failure: command execution error:\n%s" % msg)
2411
  elif isinstance(err, errors.TagError):
2412
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2413
  elif isinstance(err, errors.JobQueueDrainError):
2414
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2415
               " accept new requests\n")
2416
  elif isinstance(err, errors.JobQueueFull):
2417
    obuf.write("Failure: the job queue is full and doesn't accept new"
2418
               " job submissions until old jobs are archived\n")
2419
  elif isinstance(err, errors.TypeEnforcementError):
2420
    obuf.write("Parameter Error: %s" % msg)
2421
  elif isinstance(err, errors.ParameterError):
2422
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2423
  elif isinstance(err, luxi.NoMasterError):
2424
    if err.args[0] == pathutils.MASTER_SOCKET:
2425
      daemon = "the master daemon"
2426
    elif err.args[0] == pathutils.QUERY_SOCKET:
2427
      daemon = "the config daemon"
2428
    else:
2429
      daemon = "socket '%s'" % str(err.args[0])
2430
    obuf.write("Cannot communicate with %s.\nIs the process running"
2431
               " and listening for connections?" % daemon)
2432
  elif isinstance(err, luxi.TimeoutError):
2433
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2434
               " been submitted and will continue to run even if the call"
2435
               " timed out. Useful commands in this situation are \"gnt-job"
2436
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2437
    obuf.write(msg)
2438
  elif isinstance(err, luxi.PermissionError):
2439
    obuf.write("It seems you don't have permissions to connect to the"
2440
               " master daemon.\nPlease retry as a different user.")
2441
  elif isinstance(err, luxi.ProtocolError):
2442
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2443
               "%s" % msg)
2444
  elif isinstance(err, errors.JobLost):
2445
    obuf.write("Error checking job status: %s" % msg)
2446
  elif isinstance(err, errors.QueryFilterParseError):
2447
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2448
    obuf.write("\n".join(err.GetDetails()))
2449
  elif isinstance(err, errors.GenericError):
2450
    obuf.write("Unhandled Ganeti error: %s" % msg)
2451
  elif isinstance(err, JobSubmittedException):
2452
    obuf.write("JobID: %s\n" % err.args[0])
2453
    retcode = 0
2454
  else:
2455
    obuf.write("Unhandled exception: %s" % msg)
2456
  return retcode, obuf.getvalue().rstrip("\n")
2457

    
2458

    
2459
def GenericMain(commands, override=None, aliases=None,
2460
                env_override=frozenset()):
2461
  """Generic main function for all the gnt-* commands.
2462

2463
  @param commands: a dictionary with a special structure, see the design doc
2464
                   for command line handling.
2465
  @param override: if not None, we expect a dictionary with keys that will
2466
                   override command line options; this can be used to pass
2467
                   options from the scripts to generic functions
2468
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2469
  @param env_override: list of environment names which are allowed to submit
2470
                       default args for commands
2471

2472
  """
2473
  # save the program name and the entire command line for later logging
2474
  if sys.argv:
2475
    binary = os.path.basename(sys.argv[0])
2476
    if not binary:
2477
      binary = sys.argv[0]
2478

    
2479
    if len(sys.argv) >= 2:
2480
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2481
    else:
2482
      logname = binary
2483

    
2484
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2485
  else:
2486
    binary = "<unknown program>"
2487
    cmdline = "<unknown>"
2488

    
2489
  if aliases is None:
2490
    aliases = {}
2491

    
2492
  try:
2493
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2494
                                       env_override)
2495
  except _ShowVersion:
2496
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2497
             constants.RELEASE_VERSION)
2498
    return constants.EXIT_SUCCESS
2499
  except _ShowUsage, err:
2500
    for line in _FormatUsage(binary, commands):
2501
      ToStdout(line)
2502

    
2503
    if err.exit_error:
2504
      return constants.EXIT_FAILURE
2505
    else:
2506
      return constants.EXIT_SUCCESS
2507
  except errors.ParameterError, err:
2508
    result, err_msg = FormatError(err)
2509
    ToStderr(err_msg)
2510
    return 1
2511

    
2512
  if func is None: # parse error
2513
    return 1
2514

    
2515
  if override is not None:
2516
    for key, val in override.iteritems():
2517
      setattr(options, key, val)
2518

    
2519
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2520
                     stderr_logging=True)
2521

    
2522
  logging.info("Command line: %s", cmdline)
2523

    
2524
  try:
2525
    result = func(options, args)
2526
  except (errors.GenericError, luxi.ProtocolError,
2527
          JobSubmittedException), err:
2528
    result, err_msg = FormatError(err)
2529
    logging.exception("Error during command processing")
2530
    ToStderr(err_msg)
2531
  except KeyboardInterrupt:
2532
    result = constants.EXIT_FAILURE
2533
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2534
             " might have been submitted and"
2535
             " will continue to run in the background.")
2536
  except IOError, err:
2537
    if err.errno == errno.EPIPE:
2538
      # our terminal went away, we'll exit
2539
      sys.exit(constants.EXIT_FAILURE)
2540
    else:
2541
      raise
2542

    
2543
  return result
2544

    
2545

    
2546
def ParseNicOption(optvalue):
2547
  """Parses the value of the --net option(s).
2548

2549
  """
2550
  try:
2551
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2552
  except (TypeError, ValueError), err:
2553
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2554
                               errors.ECODE_INVAL)
2555

    
2556
  nics = [{}] * nic_max
2557
  for nidx, ndict in optvalue:
2558
    nidx = int(nidx)
2559

    
2560
    if not isinstance(ndict, dict):
2561
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2562
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2563

    
2564
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2565

    
2566
    nics[nidx] = ndict
2567

    
2568
  return nics
2569

    
2570

    
2571
def GenericInstanceCreate(mode, opts, args):
2572
  """Add an instance to the cluster via either creation or import.
2573

2574
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2575
  @param opts: the command line options selected by the user
2576
  @type args: list
2577
  @param args: should contain only one element, the new instance name
2578
  @rtype: int
2579
  @return: the desired exit code
2580

2581
  """
2582
  instance = args[0]
2583

    
2584
  (pnode, snode) = SplitNodeOption(opts.node)
2585

    
2586
  hypervisor = None
2587
  hvparams = {}
2588
  if opts.hypervisor:
2589
    hypervisor, hvparams = opts.hypervisor
2590

    
2591
  if opts.nics:
2592
    nics = ParseNicOption(opts.nics)
2593
  elif opts.no_nics:
2594
    # no nics
2595
    nics = []
2596
  elif mode == constants.INSTANCE_CREATE:
2597
    # default of one nic, all auto
2598
    nics = [{}]
2599
  else:
2600
    # mode == import
2601
    nics = []
2602

    
2603
  if opts.disk_template == constants.DT_DISKLESS:
2604
    if opts.disks or opts.sd_size is not None:
2605
      raise errors.OpPrereqError("Diskless instance but disk"
2606
                                 " information passed", errors.ECODE_INVAL)
2607
    disks = []
2608
  else:
2609
    if (not opts.disks and not opts.sd_size
2610
        and mode == constants.INSTANCE_CREATE):
2611
      raise errors.OpPrereqError("No disk information specified",
2612
                                 errors.ECODE_INVAL)
2613
    if opts.disks and opts.sd_size is not None:
2614
      raise errors.OpPrereqError("Please use either the '--disk' or"
2615
                                 " '-s' option", errors.ECODE_INVAL)
2616
    if opts.sd_size is not None:
2617
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2618

    
2619
    if opts.disks:
2620
      try:
2621
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2622
      except ValueError, err:
2623
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2624
                                   errors.ECODE_INVAL)
2625
      disks = [{}] * disk_max
2626
    else:
2627
      disks = []
2628
    for didx, ddict in opts.disks:
2629
      didx = int(didx)
2630
      if not isinstance(ddict, dict):
2631
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2632
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2633
      elif constants.IDISK_SIZE in ddict:
2634
        if constants.IDISK_ADOPT in ddict:
2635
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2636
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2637
        try:
2638
          ddict[constants.IDISK_SIZE] = \
2639
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2640
        except ValueError, err:
2641
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2642
                                     (didx, err), errors.ECODE_INVAL)
2643
      elif constants.IDISK_ADOPT in ddict:
2644
        if constants.IDISK_SPINDLES in ddict:
2645
          raise errors.OpPrereqError("spindles is not a valid option when"
2646
                                     " adopting a disk", errors.ECODE_INVAL)
2647
        if mode == constants.INSTANCE_IMPORT:
2648
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2649
                                     " import", errors.ECODE_INVAL)
2650
        ddict[constants.IDISK_SIZE] = 0
2651
      else:
2652
        raise errors.OpPrereqError("Missing size or adoption source for"
2653
                                   " disk %d" % didx, errors.ECODE_INVAL)
2654
      disks[didx] = ddict
2655

    
2656
  if opts.tags is not None:
2657
    tags = opts.tags.split(",")
2658
  else:
2659
    tags = []
2660

    
2661
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2662
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2663

    
2664
  if mode == constants.INSTANCE_CREATE:
2665
    start = opts.start
2666
    os_type = opts.os
2667
    force_variant = opts.force_variant
2668
    src_node = None
2669
    src_path = None
2670
    no_install = opts.no_install
2671
    identify_defaults = False
2672
    compress = constants.IEC_NONE
2673
  elif mode == constants.INSTANCE_IMPORT:
2674
    start = False
2675
    os_type = None
2676
    force_variant = False
2677
    src_node = opts.src_node
2678
    src_path = opts.src_dir
2679
    no_install = None
2680
    identify_defaults = opts.identify_defaults
2681
    compress = opts.compress
2682
  else:
2683
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2684

    
2685
  op = opcodes.OpInstanceCreate(instance_name=instance,
2686
                                disks=disks,
2687
                                disk_template=opts.disk_template,
2688
                                nics=nics,
2689
                                conflicts_check=opts.conflicts_check,
2690
                                pnode=pnode, snode=snode,
2691
                                ip_check=opts.ip_check,
2692
                                name_check=opts.name_check,
2693
                                wait_for_sync=opts.wait_for_sync,
2694
                                file_storage_dir=opts.file_storage_dir,
2695
                                file_driver=opts.file_driver,
2696
                                iallocator=opts.iallocator,
2697
                                hypervisor=hypervisor,
2698
                                hvparams=hvparams,
2699
                                beparams=opts.beparams,
2700
                                osparams=opts.osparams,
2701
                                mode=mode,
2702
                                start=start,
2703
                                os_type=os_type,
2704
                                force_variant=force_variant,
2705
                                src_node=src_node,
2706
                                src_path=src_path,
2707
                                compress=compress,
2708
                                tags=tags,
2709
                                no_install=no_install,
2710
                                identify_defaults=identify_defaults,
2711
                                ignore_ipolicy=opts.ignore_ipolicy)
2712

    
2713
  SubmitOrSend(op, opts)
2714
  return 0
2715

    
2716

    
2717
class _RunWhileClusterStoppedHelper:
2718
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2719

2720
  """
2721
  def __init__(self, feedback_fn, cluster_name, master_node,
2722
               online_nodes, ssh_ports):
2723
    """Initializes this class.
2724

2725
    @type feedback_fn: callable
2726
    @param feedback_fn: Feedback function
2727
    @type cluster_name: string
2728
    @param cluster_name: Cluster name
2729
    @type master_node: string
2730
    @param master_node Master node name
2731
    @type online_nodes: list
2732
    @param online_nodes: List of names of online nodes
2733
    @type ssh_ports: list
2734
    @param ssh_ports: List of SSH ports of online nodes
2735

2736
    """
2737
    self.feedback_fn = feedback_fn
2738
    self.cluster_name = cluster_name
2739
    self.master_node = master_node
2740
    self.online_nodes = online_nodes
2741
    self.ssh_ports = dict(zip(online_nodes, ssh_ports))
2742

    
2743
    self.ssh = ssh.SshRunner(self.cluster_name)
2744

    
2745
    self.nonmaster_nodes = [name for name in online_nodes
2746
                            if name != master_node]
2747

    
2748
    assert self.master_node not in self.nonmaster_nodes
2749

    
2750
  def _RunCmd(self, node_name, cmd):
2751
    """Runs a command on the local or a remote machine.
2752

2753
    @type node_name: string
2754
    @param node_name: Machine name
2755
    @type cmd: list
2756
    @param cmd: Command
2757

2758
    """
2759
    if node_name is None or node_name == self.master_node:
2760
      # No need to use SSH
2761
      result = utils.RunCmd(cmd)
2762
    else:
2763
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2764
                            utils.ShellQuoteArgs(cmd),
2765
                            port=self.ssh_ports[node_name])
2766

    
2767
    if result.failed:
2768
      errmsg = ["Failed to run command %s" % result.cmd]
2769
      if node_name:
2770
        errmsg.append("on node %s" % node_name)
2771
      errmsg.append(": exitcode %s and error %s" %
2772
                    (result.exit_code, result.output))
2773
      raise errors.OpExecError(" ".join(errmsg))
2774

    
2775
  def Call(self, fn, *args):
2776
    """Call function while all daemons are stopped.
2777

2778
    @type fn: callable
2779
    @param fn: Function to be called
2780

2781
    """
2782
    # Pause watcher by acquiring an exclusive lock on watcher state file
2783
    self.feedback_fn("Blocking watcher")
2784
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2785
    try:
2786
      # TODO: Currently, this just blocks. There's no timeout.
2787
      # TODO: Should it be a shared lock?
2788
      watcher_block.Exclusive(blocking=True)
2789

    
2790
      # Stop master daemons, so that no new jobs can come in and all running
2791
      # ones are finished
2792
      self.feedback_fn("Stopping master daemons")
2793
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2794
      try:
2795
        # Stop daemons on all nodes
2796
        for node_name in self.online_nodes:
2797
          self.feedback_fn("Stopping daemons on %s" % node_name)
2798
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2799

    
2800
        # All daemons are shut down now
2801
        try:
2802
          return fn(self, *args)
2803
        except Exception, err:
2804
          _, errmsg = FormatError(err)
2805
          logging.exception("Caught exception")
2806
          self.feedback_fn(errmsg)
2807
          raise
2808
      finally:
2809
        # Start cluster again, master node last
2810
        for node_name in self.nonmaster_nodes + [self.master_node]:
2811
          self.feedback_fn("Starting daemons on %s" % node_name)
2812
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2813
    finally:
2814
      # Resume watcher
2815
      watcher_block.Close()
2816

    
2817

    
2818
def RunWhileClusterStopped(feedback_fn, fn, *args):
2819
  """Calls a function while all cluster daemons are stopped.
2820

2821
  @type feedback_fn: callable
2822
  @param feedback_fn: Feedback function
2823
  @type fn: callable
2824
  @param fn: Function to be called when daemons are stopped
2825

2826
  """
2827
  feedback_fn("Gathering cluster information")
2828

    
2829
  # This ensures we're running on the master daemon
2830
  cl = GetClient()
2831
  # Query client
2832
  qcl = GetClient(query=True)
2833

    
2834
  (cluster_name, master_node) = \
2835
    cl.QueryConfigValues(["cluster_name", "master_node"])
2836

    
2837
  online_nodes = GetOnlineNodes([], cl=qcl)
2838
  ssh_ports = GetNodesSshPorts(online_nodes, qcl)
2839

    
2840
  # Don't keep a reference to the client. The master daemon will go away.
2841
  del cl
2842
  del qcl
2843

    
2844
  assert master_node in online_nodes
2845

    
2846
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2847
                                       online_nodes, ssh_ports).Call(fn, *args)
2848

    
2849

    
2850
def GenerateTable(headers, fields, separator, data,
2851
                  numfields=None, unitfields=None,
2852
                  units=None):
2853
  """Prints a table with headers and different fields.
2854

2855
  @type headers: dict
2856
  @param headers: dictionary mapping field names to headers for
2857
      the table
2858
  @type fields: list
2859
  @param fields: the field names corresponding to each row in
2860
      the data field
2861
  @param separator: the separator to be used; if this is None,
2862
      the default 'smart' algorithm is used which computes optimal
2863
      field width, otherwise just the separator is used between
2864
      each field
2865
  @type data: list
2866
  @param data: a list of lists, each sublist being one row to be output
2867
  @type numfields: list
2868
  @param numfields: a list with the fields that hold numeric
2869
      values and thus should be right-aligned
2870
  @type unitfields: list
2871
  @param unitfields: a list with the fields that hold numeric
2872
      values that should be formatted with the units field
2873
  @type units: string or None
2874
  @param units: the units we should use for formatting, or None for
2875
      automatic choice (human-readable for non-separator usage, otherwise
2876
      megabytes); this is a one-letter string
2877

2878
  """
2879
  if units is None:
2880
    if separator:
2881
      units = "m"
2882
    else:
2883
      units = "h"
2884

    
2885
  if numfields is None:
2886
    numfields = []
2887
  if unitfields is None:
2888
    unitfields = []
2889

    
2890
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2891
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2892

    
2893
  format_fields = []
2894
  for field in fields:
2895
    if headers and field not in headers:
2896
      # TODO: handle better unknown fields (either revert to old
2897
      # style of raising exception, or deal more intelligently with
2898
      # variable fields)
2899
      headers[field] = field
2900
    if separator is not None:
2901
      format_fields.append("%s")
2902
    elif numfields.Matches(field):
2903
      format_fields.append("%*s")
2904
    else:
2905
      format_fields.append("%-*s")
2906

    
2907
  if separator is None:
2908
    mlens = [0 for name in fields]
2909
    format_str = " ".join(format_fields)
2910
  else:
2911
    format_str = separator.replace("%", "%%").join(format_fields)
2912

    
2913
  for row in data:
2914
    if row is None:
2915
      continue
2916
    for idx, val in enumerate(row):
2917
      if unitfields.Matches(fields[idx]):
2918
        try:
2919
          val = int(val)
2920
        except (TypeError, ValueError):
2921
          pass
2922
        else:
2923
          val = row[idx] = utils.FormatUnit(val, units)
2924
      val = row[idx] = str(val)
2925
      if separator is None:
2926
        mlens[idx] = max(mlens[idx], len(val))
2927

    
2928
  result = []
2929
  if headers:
2930
    args = []
2931
    for idx, name in enumerate(fields):
2932
      hdr = headers[name]
2933
      if separator is None:
2934
        mlens[idx] = max(mlens[idx], len(hdr))
2935
        args.append(mlens[idx])
2936
      args.append(hdr)
2937
    result.append(format_str % tuple(args))
2938

    
2939
  if separator is None:
2940
    assert len(mlens) == len(fields)
2941

    
2942
    if fields and not numfields.Matches(fields[-1]):
2943
      mlens[-1] = 0
2944

    
2945
  for line in data:
2946
    args = []
2947
    if line is None:
2948
      line = ["-" for _ in fields]
2949
    for idx in range(len(fields)):
2950
      if separator is None:
2951
        args.append(mlens[idx])
2952
      args.append(line[idx])
2953
    result.append(format_str % tuple(args))
2954

    
2955
  return result
2956

    
2957

    
2958
def _FormatBool(value):
2959
  """Formats a boolean value as a string.
2960

2961
  """
2962
  if value:
2963
    return "Y"
2964
  return "N"
2965

    
2966

    
2967
#: Default formatting for query results; (callback, align right)
2968
_DEFAULT_FORMAT_QUERY = {
2969
  constants.QFT_TEXT: (str, False),
2970
  constants.QFT_BOOL: (_FormatBool, False),
2971
  constants.QFT_NUMBER: (str, True),
2972
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2973
  constants.QFT_OTHER: (str, False),
2974
  constants.QFT_UNKNOWN: (str, False),
2975
  }
2976

    
2977

    
2978
def _GetColumnFormatter(fdef, override, unit):
2979
  """Returns formatting function for a field.
2980

2981
  @type fdef: L{objects.QueryFieldDefinition}
2982
  @type override: dict
2983
  @param override: Dictionary for overriding field formatting functions,
2984
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2985
  @type unit: string
2986
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2987
  @rtype: tuple; (callable, bool)
2988
  @return: Returns the function to format a value (takes one parameter) and a
2989
    boolean for aligning the value on the right-hand side
2990

2991
  """
2992
  fmt = override.get(fdef.name, None)
2993
  if fmt is not None:
2994
    return fmt
2995

    
2996
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2997

    
2998
  if fdef.kind == constants.QFT_UNIT:
2999
    # Can't keep this information in the static dictionary
3000
    return (lambda value: utils.FormatUnit(value, unit), True)
3001

    
3002
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3003
  if fmt is not None:
3004
    return fmt
3005

    
3006
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3007

    
3008

    
3009
class _QueryColumnFormatter:
3010
  """Callable class for formatting fields of a query.
3011

3012
  """
3013
  def __init__(self, fn, status_fn, verbose):
3014
    """Initializes this class.
3015

3016
    @type fn: callable
3017
    @param fn: Formatting function
3018
    @type status_fn: callable
3019
    @param status_fn: Function to report fields' status
3020
    @type verbose: boolean
3021
    @param verbose: whether to use verbose field descriptions or not
3022

3023
    """
3024
    self._fn = fn
3025
    self._status_fn = status_fn
3026
    self._verbose = verbose
3027

    
3028
  def __call__(self, data):
3029
    """Returns a field's string representation.
3030

3031
    """
3032
    (status, value) = data
3033

    
3034
    # Report status
3035
    self._status_fn(status)
3036

    
3037
    if status == constants.RS_NORMAL:
3038
      return self._fn(value)
3039

    
3040
    assert value is None, \
3041
           "Found value %r for abnormal status %s" % (value, status)
3042

    
3043
    return FormatResultError(status, self._verbose)
3044

    
3045

    
3046
def FormatResultError(status, verbose):
3047
  """Formats result status other than L{constants.RS_NORMAL}.
3048

3049
  @param status: The result status
3050
  @type verbose: boolean
3051
  @param verbose: Whether to return the verbose text
3052
  @return: Text of result status
3053

3054
  """
3055
  assert status != constants.RS_NORMAL, \
3056
         "FormatResultError called with status equal to constants.RS_NORMAL"
3057
  try:
3058
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3059
  except KeyError:
3060
    raise NotImplementedError("Unknown status %s" % status)
3061
  else:
3062
    if verbose:
3063
      return verbose_text
3064
    return normal_text
3065

    
3066

    
3067
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3068
                      header=False, verbose=False):
3069
  """Formats data in L{objects.QueryResponse}.
3070

3071
  @type result: L{objects.QueryResponse}
3072
  @param result: result of query operation
3073
  @type unit: string
3074
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3075
    see L{utils.text.FormatUnit}
3076
  @type format_override: dict
3077
  @param format_override: Dictionary for overriding field formatting functions,
3078
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3079
  @type separator: string or None
3080
  @param separator: String used to separate fields
3081
  @type header: bool
3082
  @param header: Whether to output header row
3083
  @type verbose: boolean
3084
  @param verbose: whether to use verbose field descriptions or not
3085

3086
  """
3087
  if unit is None:
3088
    if separator:
3089
      unit = "m"
3090
    else:
3091
      unit = "h"
3092

    
3093
  if format_override is None:
3094
    format_override = {}
3095

    
3096
  stats = dict.fromkeys(constants.RS_ALL, 0)
3097

    
3098
  def _RecordStatus(status):
3099
    if status in stats:
3100
      stats[status] += 1
3101

    
3102
  columns = []
3103
  for fdef in result.fields:
3104
    assert fdef.title and fdef.name
3105
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3106
    columns.append(TableColumn(fdef.title,
3107
                               _QueryColumnFormatter(fn, _RecordStatus,
3108
                                                     verbose),
3109
                               align_right))
3110

    
3111
  table = FormatTable(result.data, columns, header, separator)
3112

    
3113
  # Collect statistics
3114
  assert len(stats) == len(constants.RS_ALL)
3115
  assert compat.all(count >= 0 for count in stats.values())
3116

    
3117
  # Determine overall status. If there was no data, unknown fields must be
3118
  # detected via the field definitions.
3119
  if (stats[constants.RS_UNKNOWN] or
3120
      (not result.data and _GetUnknownFields(result.fields))):
3121
    status = QR_UNKNOWN
3122
  elif compat.any(count > 0 for key, count in stats.items()
3123
                  if key != constants.RS_NORMAL):
3124
    status = QR_INCOMPLETE
3125
  else:
3126
    status = QR_NORMAL
3127

    
3128
  return (status, table)
3129

    
3130

    
3131
def _GetUnknownFields(fdefs):
3132
  """Returns list of unknown fields included in C{fdefs}.
3133

3134
  @type fdefs: list of L{objects.QueryFieldDefinition}
3135

3136
  """
3137
  return [fdef for fdef in fdefs
3138
          if fdef.kind == constants.QFT_UNKNOWN]
3139

    
3140

    
3141
def _WarnUnknownFields(fdefs):
3142
  """Prints a warning to stderr if a query included unknown fields.
3143

3144
  @type fdefs: list of L{objects.QueryFieldDefinition}
3145

3146
  """
3147
  unknown = _GetUnknownFields(fdefs)
3148
  if unknown:
3149
    ToStderr("Warning: Queried for unknown fields %s",
3150
             utils.CommaJoin(fdef.name for fdef in unknown))
3151
    return True
3152

    
3153
  return False
3154

    
3155

    
3156
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3157
                format_override=None, verbose=False, force_filter=False,
3158
                namefield=None, qfilter=None, isnumeric=False):
3159
  """Generic implementation for listing all items of a resource.
3160

3161
  @param resource: One of L{constants.QR_VIA_LUXI}
3162
  @type fields: list of strings
3163
  @param fields: List of fields to query for
3164
  @type names: list of strings
3165
  @param names: Names of items to query for
3166
  @type unit: string or None
3167
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3168
    None for automatic choice (human-readable for non-separator usage,
3169
    otherwise megabytes); this is a one-letter string
3170
  @type separator: string or None
3171
  @param separator: String used to separate fields
3172
  @type header: bool
3173
  @param header: Whether to show header row
3174
  @type force_filter: bool
3175
  @param force_filter: Whether to always treat names as filter
3176
  @type format_override: dict
3177
  @param format_override: Dictionary for overriding field formatting functions,
3178
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3179
  @type verbose: boolean
3180
  @param verbose: whether to use verbose field descriptions or not
3181
  @type namefield: string
3182
  @param namefield: Name of field to use for simple filters (see
3183
    L{qlang.MakeFilter} for details)
3184
  @type qfilter: list or None
3185
  @param qfilter: Query filter (in addition to names)
3186
  @param isnumeric: bool
3187
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3188
    any simple filters built by namefield should use integer values to
3189
    reflect that
3190

3191
  """
3192
  if not names:
3193
    names = None
3194

    
3195
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3196
                                isnumeric=isnumeric)
3197

    
3198
  if qfilter is None:
3199
    qfilter = namefilter
3200
  elif namefilter is not None:
3201
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3202

    
3203
  if cl is None:
3204
    cl = GetClient()
3205

    
3206
  response = cl.Query(resource, fields, qfilter)
3207

    
3208
  found_unknown = _WarnUnknownFields(response.fields)
3209

    
3210
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3211
                                     header=header,
3212
                                     format_override=format_override,
3213
                                     verbose=verbose)
3214

    
3215
  for line in data:
3216
    ToStdout(line)
3217

    
3218
  assert ((found_unknown and status == QR_UNKNOWN) or
3219
          (not found_unknown and status != QR_UNKNOWN))
3220

    
3221
  if status == QR_UNKNOWN:
3222
    return constants.EXIT_UNKNOWN_FIELD
3223

    
3224
  # TODO: Should the list command fail if not all data could be collected?
3225
  return constants.EXIT_SUCCESS
3226

    
3227

    
3228
def _FieldDescValues(fdef):
3229
  """Helper function for L{GenericListFields} to get query field description.
3230

3231
  @type fdef: L{objects.QueryFieldDefinition}
3232
  @rtype: list
3233

3234
  """
3235
  return [
3236
    fdef.name,
3237
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3238
    fdef.title,
3239
    fdef.doc,
3240
    ]
3241

    
3242

    
3243
def GenericListFields(resource, fields, separator, header, cl=None):
3244
  """Generic implementation for listing fields for a resource.
3245

3246
  @param resource: One of L{constants.QR_VIA_LUXI}
3247
  @type fields: list of strings
3248
  @param fields: List of fields to query for
3249
  @type separator: string or None
3250
  @param separator: String used to separate fields
3251
  @type header: bool
3252
  @param header: Whether to show header row
3253

3254
  """
3255
  if cl is None:
3256
    cl = GetClient()
3257

    
3258
  if not fields:
3259
    fields = None
3260

    
3261
  response = cl.QueryFields(resource, fields)
3262

    
3263
  found_unknown = _WarnUnknownFields(response.fields)
3264

    
3265
  columns = [
3266
    TableColumn("Name", str, False),
3267
    TableColumn("Type", str, False),
3268
    TableColumn("Title", str, False),
3269
    TableColumn("Description", str, False),
3270
    ]
3271

    
3272
  rows = map(_FieldDescValues, response.fields)
3273

    
3274
  for line in FormatTable(rows, columns, header, separator):
3275
    ToStdout(line)
3276

    
3277
  if found_unknown:
3278
    return constants.EXIT_UNKNOWN_FIELD
3279

    
3280
  return constants.EXIT_SUCCESS
3281

    
3282

    
3283
class TableColumn:
3284
  """Describes a column for L{FormatTable}.
3285

3286
  """
3287
  def __init__(self, title, fn, align_right):
3288
    """Initializes this class.
3289

3290
    @type title: string
3291
    @param title: Column title
3292
    @type fn: callable
3293
    @param fn: Formatting function
3294
    @type align_right: bool
3295
    @param align_right: Whether to align values on the right-hand side
3296

3297
    """
3298
    self.title = title
3299
    self.format = fn
3300
    self.align_right = align_right
3301

    
3302

    
3303
def _GetColFormatString(width, align_right):
3304
  """Returns the format string for a field.
3305

3306
  """
3307
  if align_right:
3308
    sign = ""
3309
  else:
3310
    sign = "-"
3311

    
3312
  return "%%%s%ss" % (sign, width)
3313

    
3314

    
3315
def FormatTable(rows, columns, header, separator):
3316
  """Formats data as a table.
3317

3318
  @type rows: list of lists
3319
  @param rows: Row data, one list per row
3320
  @type columns: list of L{TableColumn}
3321
  @param columns: Column descriptions
3322
  @type header: bool
3323
  @param header: Whether to show header row
3324
  @type separator: string or None
3325
  @param separator: String used to separate columns
3326

3327
  """
3328
  if header:
3329
    data = [[col.title for col in columns]]
3330
    colwidth = [len(col.title) for col in columns]
3331
  else:
3332
    data = []
3333
    colwidth = [0 for _ in columns]
3334

    
3335
  # Format row data
3336
  for row in rows:
3337
    assert len(row) == len(columns)
3338

    
3339
    formatted = [col.format(value) for value, col in zip(row, columns)]
3340

    
3341
    if separator is None:
3342
      # Update column widths
3343
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3344
        # Modifying a list's items while iterating is fine
3345
        colwidth[idx] = max(oldwidth, len(value))
3346

    
3347
    data.append(formatted)
3348

    
3349
  if separator is not None:
3350
    # Return early if a separator is used
3351
    return [separator.join(row) for row in data]
3352

    
3353
  if columns and not columns[-1].align_right:
3354
    # Avoid unnecessary spaces at end of line
3355
    colwidth[-1] = 0
3356

    
3357
  # Build format string
3358
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3359
                  for col, width in zip(columns, colwidth)])
3360

    
3361
  return [fmt % tuple(row) for row in data]
3362

    
3363

    
3364
def FormatTimestamp(ts):
3365
  """Formats a given timestamp.
3366

3367
  @type ts: timestamp
3368
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3369

3370
  @rtype: string
3371
  @return: a string with the formatted timestamp
3372

3373
  """
3374
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3375
    return "?"
3376

    
3377
  (sec, usecs) = ts
3378
  return utils.FormatTime(sec, usecs=usecs)
3379

    
3380

    
3381
def ParseTimespec(value):
3382
  """Parse a time specification.
3383

3384
  The following suffixed will be recognized:
3385

3386
    - s: seconds
3387
    - m: minutes
3388
    - h: hours
3389
    - d: day
3390
    - w: weeks
3391

3392
  Without any suffix, the value will be taken to be in seconds.
3393

3394
  """
3395
  value = str(value)
3396
  if not value:
3397
    raise errors.OpPrereqError("Empty time specification passed",
3398
                               errors.ECODE_INVAL)
3399
  suffix_map = {
3400
    "s": 1,
3401
    "m": 60,
3402
    "h": 3600,
3403
    "d": 86400,
3404
    "w": 604800,
3405
    }
3406
  if value[-1] not in suffix_map:
3407
    try:
3408
      value = int(value)
3409
    except (TypeError, ValueError):
3410
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3411
                                 errors.ECODE_INVAL)
3412
  else:
3413
    multiplier = suffix_map[value[-1]]
3414
    value = value[:-1]
3415
    if not value: # no data left after stripping the suffix
3416
      raise errors.OpPrereqError("Invalid time specification (only"
3417
                                 " suffix passed)", errors.ECODE_INVAL)
3418
    try:
3419
      value = int(value) * multiplier
3420
    except (TypeError, ValueError):
3421
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3422
                                 errors.ECODE_INVAL)
3423
  return value
3424

    
3425

    
3426
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3427
                   filter_master=False, nodegroup=None):
3428
  """Returns the names of online nodes.
3429

3430
  This function will also log a warning on stderr with the names of
3431
  the online nodes.
3432

3433
  @param nodes: if not empty, use only this subset of nodes (minus the
3434
      offline ones)
3435
  @param cl: if not None, luxi client to use
3436
  @type nowarn: boolean
3437
  @param nowarn: by default, this function will output a note with the
3438
      offline nodes that are skipped; if this parameter is True the
3439
      note is not displayed
3440
  @type secondary_ips: boolean
3441
  @param secondary_ips: if True, return the secondary IPs instead of the
3442
      names, useful for doing network traffic over the replication interface
3443
      (if any)
3444
  @type filter_master: boolean
3445
  @param filter_master: if True, do not return the master node in the list
3446
      (useful in coordination with secondary_ips where we cannot check our
3447
      node name against the list)
3448
  @type nodegroup: string
3449
  @param nodegroup: If set, only return nodes in this node group
3450

3451
  """
3452
  if cl is None:
3453
    cl = GetClient(query=True)
3454

    
3455
  qfilter = []
3456

    
3457
  if nodes:
3458
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3459

    
3460
  if nodegroup is not None:
3461
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3462
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3463

    
3464
  if filter_master:
3465
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3466

    
3467
  if qfilter:
3468
    if len(qfilter) > 1:
3469
      final_filter = [qlang.OP_AND] + qfilter
3470
    else:
3471
      assert len(qfilter) == 1
3472
      final_filter = qfilter[0]
3473
  else:
3474
    final_filter = None
3475

    
3476
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3477

    
3478
  def _IsOffline(row):
3479
    (_, (_, offline), _) = row
3480
    return offline
3481

    
3482
  def _GetName(row):
3483
    ((_, name), _, _) = row
3484
    return name
3485

    
3486
  def _GetSip(row):
3487
    (_, _, (_, sip)) = row
3488
    return sip
3489

    
3490
  (offline, online) = compat.partition(result.data, _IsOffline)
3491

    
3492
  if offline and not nowarn:
3493
    ToStderr("Note: skipping offline node(s): %s" %
3494
             utils.CommaJoin(map(_GetName, offline)))
3495

    
3496
  if secondary_ips:
3497
    fn = _GetSip
3498
  else:
3499
    fn = _GetName
3500

    
3501
  return map(fn, online)
3502

    
3503

    
3504
def GetNodesSshPorts(nodes, cl):
3505
  """Retrieves SSH ports of given nodes.
3506

3507
  @param nodes: the names of nodes
3508
  @type nodes: a list of strings
3509
  @param cl: a client to use for the query
3510
  @type cl: L{Client}
3511
  @return: the list of SSH ports corresponding to the nodes
3512
  @rtype: a list of tuples
3513
  """
3514
  return map(lambda t: t[0],
3515
             cl.QueryNodes(names=nodes,
3516
                           fields=["ndp/ssh_port"],
3517
                           use_locking=False))
3518

    
3519

    
3520
def _ToStream(stream, txt, *args):
3521
  """Write a message to a stream, bypassing the logging system
3522

3523
  @type stream: file object
3524
  @param stream: the file to which we should write
3525
  @type txt: str
3526
  @param txt: the message
3527

3528
  """
3529
  try:
3530
    if args:
3531
      args = tuple(args)
3532
      stream.write(txt % args)
3533
    else:
3534
      stream.write(txt)
3535
    stream.write("\n")
3536
    stream.flush()
3537
  except IOError, err:
3538
    if err.errno == errno.EPIPE:
3539
      # our terminal went away, we'll exit
3540
      sys.exit(constants.EXIT_FAILURE)
3541
    else:
3542
      raise
3543

    
3544

    
3545
def ToStdout(txt, *args):
3546
  """Write a message to stdout only, bypassing the logging system
3547

3548
  This is just a wrapper over _ToStream.
3549

3550
  @type txt: str
3551
  @param txt: the message
3552

3553
  """
3554
  _ToStream(sys.stdout, txt, *args)
3555

    
3556

    
3557
def ToStderr(txt, *args):
3558
  """Write a message to stderr only, bypassing the logging system
3559

3560
  This is just a wrapper over _ToStream.
3561

3562
  @type txt: str
3563
  @param txt: the message
3564

3565
  """
3566
  _ToStream(sys.stderr, txt, *args)
3567

    
3568

    
3569
class JobExecutor(object):
3570
  """Class which manages the submission and execution of multiple jobs.
3571

3572
  Note that instances of this class should not be reused between
3573
  GetResults() calls.
3574

3575
  """
3576
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3577
    self.queue = []
3578
    if cl is None:
3579
      cl = GetClient()
3580
    self.cl = cl
3581
    self.verbose = verbose
3582
    self.jobs = []
3583
    self.opts = opts
3584
    self.feedback_fn = feedback_fn
3585
    self._counter = itertools.count()
3586

    
3587
  @staticmethod
3588
  def _IfName(name, fmt):
3589
    """Helper function for formatting name.
3590

3591
    """
3592
    if name:
3593
      return fmt % name
3594

    
3595
    return ""
3596

    
3597
  def QueueJob(self, name, *ops):
3598
    """Record a job for later submit.
3599

3600
    @type name: string
3601
    @param name: a description of the job, will be used in WaitJobSet
3602

3603
    """
3604
    SetGenericOpcodeOpts(ops, self.opts)
3605
    self.queue.append((self._counter.next(), name, ops))
3606

    
3607
  def AddJobId(self, name, status, job_id):
3608
    """Adds a job ID to the internal queue.
3609

3610
    """
3611
    self.jobs.append((self._counter.next(), status, job_id, name))
3612

    
3613
  def SubmitPending(self, each=False):
3614
    """Submit all pending jobs.
3615

3616
    """
3617
    if each:
3618
      results = []
3619
      for (_, _, ops) in self.queue:
3620
        # SubmitJob will remove the success status, but raise an exception if
3621
        # the submission fails, so we'll notice that anyway.
3622
        results.append([True, self.cl.SubmitJob(ops)[0]])
3623
    else:
3624
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3625
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3626
      self.jobs.append((idx, status, data, name))
3627

    
3628
  def _ChooseJob(self):
3629
    """Choose a non-waiting/queued job to poll next.
3630

3631
    """
3632
    assert self.jobs, "_ChooseJob called with empty job list"
3633

    
3634
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3635
                               ["status"])
3636
    assert result
3637

    
3638
    for job_data, status in zip(self.jobs, result):
3639
      if (isinstance(status, list) and status and
3640
          status[0] in (constants.JOB_STATUS_QUEUED,
3641
                        constants.JOB_STATUS_WAITING,
3642
                        constants.JOB_STATUS_CANCELING)):
3643
        # job is still present and waiting
3644
        continue
3645
      # good candidate found (either running job or lost job)
3646
      self.jobs.remove(job_data)
3647
      return job_data
3648

    
3649
    # no job found
3650
    return self.jobs.pop(0)
3651

    
3652
  def GetResults(self):
3653
    """Wait for and return the results of all jobs.
3654

3655
    @rtype: list
3656
    @return: list of tuples (success, job results), in the same order
3657
        as the submitted jobs; if a job has failed, instead of the result
3658
        there will be the error message
3659

3660
    """
3661
    if not self.jobs:
3662
      self.SubmitPending()
3663
    results = []
3664
    if self.verbose:
3665
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3666
      if ok_jobs:
3667
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3668

    
3669
    # first, remove any non-submitted jobs
3670
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3671
    for idx, _, jid, name in failures:
3672
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3673
      results.append((idx, False, jid))
3674

    
3675
    while self.jobs:
3676
      (idx, _, jid, name) = self._ChooseJob()
3677
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3678
      try:
3679
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3680
        success = True
3681
      except errors.JobLost, err:
3682
        _, job_result = FormatError(err)
3683
        ToStderr("Job %s%s has been archived, cannot check its result",
3684
                 jid, self._IfName(name, " for %s"))
3685
        success = False
3686
      except (errors.GenericError, luxi.ProtocolError), err:
3687
        _, job_result = FormatError(err)
3688
        success = False
3689
        # the error message will always be shown, verbose or not
3690
        ToStderr("Job %s%s has failed: %s",
3691
                 jid, self._IfName(name, " for %s"), job_result)
3692

    
3693
      results.append((idx, success, job_result))
3694

    
3695
    # sort based on the index, then drop it
3696
    results.sort()
3697
    results = [i[1:] for i in results]
3698

    
3699
    return results
3700

    
3701
  def WaitOrShow(self, wait):
3702
    """Wait for job results or only print the job IDs.
3703

3704
    @type wait: boolean
3705
    @param wait: whether to wait or not
3706

3707
    """
3708
    if wait:
3709
      return self.GetResults()
3710
    else:
3711
      if not self.jobs:
3712
        self.SubmitPending()
3713
      for _, status, result, name in self.jobs:
3714
        if status:
3715
          ToStdout("%s: %s", result, name)
3716
        else:
3717
          ToStderr("Failure for %s: %s", name, result)
3718
      return [row[1:3] for row in self.jobs]
3719

    
3720

    
3721
def FormatParamsDictInfo(param_dict, actual):
3722
  """Formats a parameter dictionary.
3723

3724
  @type param_dict: dict
3725
  @param param_dict: the own parameters
3726
  @type actual: dict
3727
  @param actual: the current parameter set (including defaults)
3728
  @rtype: dict
3729
  @return: dictionary where the value of each parameter is either a fully
3730
      formatted string or a dictionary containing formatted strings
3731

3732
  """
3733
  ret = {}
3734
  for (key, data) in actual.items():
3735
    if isinstance(data, dict) and data:
3736
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3737
    else:
3738
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3739
  return ret
3740

    
3741

    
3742
def _FormatListInfoDefault(data, def_data):
3743
  if data is not None:
3744
    ret = utils.CommaJoin(data)
3745
  else:
3746
    ret = "default (%s)" % utils.CommaJoin(def_data)
3747
  return ret
3748

    
3749

    
3750
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3751
  """Formats an instance policy.
3752

3753
  @type custom_ipolicy: dict
3754
  @param custom_ipolicy: own policy
3755
  @type eff_ipolicy: dict
3756
  @param eff_ipolicy: effective policy (including defaults); ignored for
3757
      cluster
3758
  @type iscluster: bool
3759
  @param iscluster: the policy is at cluster level
3760
  @rtype: list of pairs
3761
  @return: formatted data, suitable for L{PrintGenericInfo}
3762

3763
  """
3764
  if iscluster:
3765
    eff_ipolicy = custom_ipolicy
3766

    
3767
  minmax_out = []
3768
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3769
  if custom_minmax:
3770
    for (k, minmax) in enumerate(custom_minmax):
3771
      minmax_out.append([
3772
        ("%s/%s" % (key, k),
3773
         FormatParamsDictInfo(minmax[key], minmax[key]))
3774
        for key in constants.ISPECS_MINMAX_KEYS
3775
        ])
3776
  else:
3777
    for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3778
      minmax_out.append([
3779
        ("%s/%s" % (key, k),
3780
         FormatParamsDictInfo({}, minmax[key]))
3781
        for key in constants.ISPECS_MINMAX_KEYS
3782
        ])
3783
  ret = [("bounds specs", minmax_out)]
3784

    
3785
  if iscluster:
3786
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3787
    ret.append(
3788
      (constants.ISPECS_STD,
3789
       FormatParamsDictInfo(stdspecs, stdspecs))
3790
      )
3791

    
3792
  ret.append(
3793
    ("allowed disk templates",
3794
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3795
                            eff_ipolicy[constants.IPOLICY_DTS]))
3796
    )
3797
  ret.extend([
3798
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3799
    for key in constants.IPOLICY_PARAMETERS
3800
    ])
3801
  return ret
3802

    
3803

    
3804
def _PrintSpecsParameters(buf, specs):
3805
  values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3806
  buf.write(",".join(values))
3807

    
3808

    
3809
def PrintIPolicyCommand(buf, ipolicy, isgroup):
3810
  """Print the command option used to generate the given instance policy.
3811

3812
  Currently only the parts dealing with specs are supported.
3813

3814
  @type buf: StringIO
3815
  @param buf: stream to write into
3816
  @type ipolicy: dict
3817
  @param ipolicy: instance policy
3818
  @type isgroup: bool
3819
  @param isgroup: whether the policy is at group level
3820

3821
  """
3822
  if not isgroup:
3823
    stdspecs = ipolicy.get("std")
3824
    if stdspecs:
3825
      buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3826
      _PrintSpecsParameters(buf, stdspecs)
3827
  minmaxes = ipolicy.get("minmax", [])
3828
  first = True
3829
  for minmax in minmaxes:
3830
    minspecs = minmax.get("min")
3831
    maxspecs = minmax.get("max")
3832
    if minspecs and maxspecs:
3833
      if first:
3834
        buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3835
        first = False
3836
      else:
3837
        buf.write("//")
3838
      buf.write("min:")
3839
      _PrintSpecsParameters(buf, minspecs)
3840
      buf.write("/max:")
3841
      _PrintSpecsParameters(buf, maxspecs)
3842

    
3843

    
3844
def ConfirmOperation(names, list_type, text, extra=""):
3845
  """Ask the user to confirm an operation on a list of list_type.
3846

3847
  This function is used to request confirmation for doing an operation
3848
  on a given list of list_type.
3849

3850
  @type names: list
3851
  @param names: the list of names that we display when
3852
      we ask for confirmation
3853
  @type list_type: str
3854
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3855
  @type text: str
3856
  @param text: the operation that the user should confirm
3857
  @rtype: boolean
3858
  @return: True or False depending on user's confirmation.
3859

3860
  """
3861
  count = len(names)
3862
  msg = ("The %s will operate on %d %s.\n%s"
3863
         "Do you want to continue?" % (text, count, list_type, extra))
3864
  affected = (("\nAffected %s:\n" % list_type) +
3865
              "\n".join(["  %s" % name for name in names]))
3866

    
3867
  choices = [("y", True, "Yes, execute the %s" % text),
3868
             ("n", False, "No, abort the %s" % text)]
3869

    
3870
  if count > 20:
3871
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3872
    question = msg
3873
  else:
3874
    question = msg + affected
3875

    
3876
  choice = AskUser(question, choices)
3877
  if choice == "v":
3878
    choices.pop(1)
3879
    choice = AskUser(msg + affected, choices)
3880
  return choice
3881

    
3882

    
3883
def _MaybeParseUnit(elements):
3884
  """Parses and returns an array of potential values with units.
3885

3886
  """
3887
  parsed = {}
3888
  for k, v in elements.items():
3889
    if v == constants.VALUE_DEFAULT:
3890
      parsed[k] = v
3891
    else:
3892
      parsed[k] = utils.ParseUnit(v)
3893
  return parsed
3894

    
3895

    
3896
def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3897
                             ispecs_disk_count, ispecs_disk_size,
3898
                             ispecs_nic_count, group_ipolicy, fill_all):
3899
  try:
3900
    if ispecs_mem_size:
3901
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3902
    if ispecs_disk_size:
3903
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3904
  except (TypeError, ValueError, errors.UnitParseError), err:
3905
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3906
                               " in policy: %s" %
3907
                               (ispecs_disk_size, ispecs_mem_size, err),
3908
                               errors.ECODE_INVAL)
3909

    
3910
  # prepare ipolicy dict
3911
  ispecs_transposed = {
3912
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3913
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3914
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3915
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3916
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3917
    }
3918

    
3919
  # first, check that the values given are correct
3920
  if group_ipolicy:
3921
    forced_type = TISPECS_GROUP_TYPES
3922
  else:
3923
    forced_type = TISPECS_CLUSTER_TYPES
3924
  for specs in ispecs_transposed.values():
3925
    assert type(specs) is dict
3926
    utils.ForceDictType(specs, forced_type)
3927

    
3928
  # then transpose
3929
  ispecs = {
3930
    constants.ISPECS_MIN: {},
3931
    constants.ISPECS_MAX: {},
3932
    constants.ISPECS_STD: {},
3933
    }
3934
  for (name, specs) in ispecs_transposed.iteritems():
3935
    assert name in constants.ISPECS_PARAMETERS
3936
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3937
      assert key in ispecs
3938
      ispecs[key][name] = val
3939
  minmax_out = {}
3940
  for key in constants.ISPECS_MINMAX_KEYS:
3941
    if fill_all:
3942
      minmax_out[key] = \
3943
        objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3944
    else:
3945
      minmax_out[key] = ispecs[key]
3946
  ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3947
  if fill_all:
3948
    ipolicy[constants.ISPECS_STD] = \
3949
        objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3950
                         ispecs[constants.ISPECS_STD])
3951
  else:
3952
    ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3953

    
3954

    
3955
def _ParseSpecUnit(spec, keyname):
3956
  ret = spec.copy()
3957
  for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3958
    if k in ret:
3959
      try:
3960
        ret[k] = utils.ParseUnit(ret[k])
3961
      except (TypeError, ValueError, errors.UnitParseError), err:
3962
        raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3963
                                    " specs: %s" % (k, ret[k], keyname, err)),
3964
                                   errors.ECODE_INVAL)
3965
  return ret
3966

    
3967

    
3968
def _ParseISpec(spec, keyname, required):
3969
  ret = _ParseSpecUnit(spec, keyname)
3970
  utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3971
  missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3972
  if required and missing:
3973
    raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3974
                               (keyname, utils.CommaJoin(missing)),
3975
                               errors.ECODE_INVAL)
3976
  return ret
3977

    
3978

    
3979
def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
3980
  ret = None
3981
  if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
3982
      len(minmax_ispecs[0]) == 1):
3983
    for (key, spec) in minmax_ispecs[0].items():
3984
      # This loop is executed exactly once
3985
      if key in allowed_values and not spec:
3986
        ret = key
3987
  return ret
3988

    
3989

    
3990
def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
3991
                            group_ipolicy, allowed_values):
3992
  found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
3993
  if found_allowed is not None:
3994
    ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
3995
  elif minmax_ispecs is not None:
3996
    minmax_out = []
3997
    for mmpair in minmax_ispecs:
3998
      mmpair_out = {}
3999
      for (key, spec) in mmpair.items():
4000
        if key not in constants.ISPECS_MINMAX_KEYS:
4001
          msg = "Invalid key in bounds instance specifications: %s" % key
4002
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
4003
        mmpair_out[key] = _ParseISpec(spec, key, True)
4004
      minmax_out.append(mmpair_out)
4005
    ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
4006
  if std_ispecs is not None:
4007
    assert not group_ipolicy # This is not an option for gnt-group
4008
    ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
4009

    
4010

    
4011
def CreateIPolicyFromOpts(ispecs_mem_size=None,
4012
                          ispecs_cpu_count=None,
4013
                          ispecs_disk_count=None,
4014
                          ispecs_disk_size=None,
4015
                          ispecs_nic_count=None,
4016
                          minmax_ispecs=None,
4017
                          std_ispecs=None,
4018
                          ipolicy_disk_templates=None,
4019
                          ipolicy_vcpu_ratio=None,
4020
                          ipolicy_spindle_ratio=None,
4021
                          group_ipolicy=False,
4022
                          allowed_values=None,
4023
                          fill_all=False):
4024
  """Creation of instance policy based on command line options.
4025

4026
  @param fill_all: whether for cluster policies we should ensure that
4027
    all values are filled
4028

4029
  """
4030
  assert not (fill_all and allowed_values)
4031

    
4032
  split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4033
                 ispecs_disk_size or ispecs_nic_count)
4034
  if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4035
    raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4036
                               " together with any --ipolicy-xxx-specs option",
4037
                               errors.ECODE_INVAL)
4038

    
4039
  ipolicy_out = objects.MakeEmptyIPolicy()
4040
  if split_specs:
4041
    assert fill_all
4042
    _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4043
                             ispecs_disk_count, ispecs_disk_size,
4044
                             ispecs_nic_count, group_ipolicy, fill_all)
4045
  elif (minmax_ispecs is not None or std_ispecs is not None):
4046
    _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4047
                            group_ipolicy, allowed_values)
4048

    
4049
  if ipolicy_disk_templates is not None:
4050
    if allowed_values and ipolicy_disk_templates in allowed_values:
4051
      ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4052
    else:
4053
      ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4054
  if ipolicy_vcpu_ratio is not None:
4055
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4056
  if ipolicy_spindle_ratio is not None:
4057
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4058

    
4059
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4060

    
4061
  if not group_ipolicy and fill_all:
4062
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4063

    
4064
  return ipolicy_out
4065

    
4066

    
4067
def _SerializeGenericInfo(buf, data, level, afterkey=False):
4068
  """Formatting core of L{PrintGenericInfo}.
4069

4070
  @param buf: (string) stream to accumulate the result into
4071
  @param data: data to format
4072
  @type level: int
4073
  @param level: depth in the data hierarchy, used for indenting
4074
  @type afterkey: bool
4075
  @param afterkey: True when we are in the middle of a line after a key (used
4076
      to properly add newlines or indentation)
4077

4078
  """
4079
  baseind = "  "
4080
  if isinstance(data, dict):
4081
    if not data:
4082
      buf.write("\n")
4083
    else:
4084
      if afterkey:
4085
        buf.write("\n")
4086
        doindent = True
4087
      else:
4088
        doindent = False
4089
      for key in sorted(data):
4090
        if doindent:
4091
          buf.write(baseind * level)
4092
        else:
4093
          doindent = True
4094
        buf.write(key)
4095
        buf.write(": ")
4096
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4097
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4098
    # list of tuples (an ordered dictionary)
4099
    if afterkey:
4100
      buf.write("\n")
4101
      doindent = True
4102
    else:
4103
      doindent = False
4104
    for (key, val) in data:
4105
      if doindent:
4106
        buf.write(baseind * level)
4107
      else:
4108
        doindent = True
4109
      buf.write(key)
4110
      buf.write(": ")
4111
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4112
  elif isinstance(data, list):
4113
    if not data:
4114
      buf.write("\n")
4115
    else:
4116
      if afterkey:
4117
        buf.write("\n")
4118
        doindent = True
4119
      else:
4120
        doindent = False
4121
      for item in data:
4122
        if doindent:
4123
          buf.write(baseind * level)
4124
        else:
4125
          doindent = True
4126
        buf.write("-")
4127
        buf.write(baseind[1:])
4128
        _SerializeGenericInfo(buf, item, level + 1)
4129
  else:
4130
    # This branch should be only taken for strings, but it's practically
4131
    # impossible to guarantee that no other types are produced somewhere
4132
    buf.write(str(data))
4133
    buf.write("\n")
4134

    
4135

    
4136
def PrintGenericInfo(data):
4137
  """Print information formatted according to the hierarchy.
4138

4139
  The output is a valid YAML string.
4140

4141
  @param data: the data to print. It's a hierarchical structure whose elements
4142
      can be:
4143
        - dictionaries, where keys are strings and values are of any of the
4144
          types listed here
4145
        - lists of pairs (key, value), where key is a string and value is of
4146
          any of the types listed here; it's a way to encode ordered
4147
          dictionaries
4148
        - lists of any of the types listed here
4149
        - strings
4150

4151
  """
4152
  buf = StringIO()
4153
  _SerializeGenericInfo(buf, data, 0)
4154
  ToStdout(buf.getvalue().rstrip("\n"))