Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ aa7a5c90

History | View | Annotate | Download (135.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ADD_RESERVED_IPS_OPT",
58
  "ALLOCATABLE_OPT",
59
  "ALLOC_POLICY_OPT",
60
  "ALL_OPT",
61
  "ALLOW_FAILOVER_OPT",
62
  "AUTO_PROMOTE_OPT",
63
  "AUTO_REPLACE_OPT",
64
  "BACKEND_OPT",
65
  "BLK_OS_OPT",
66
  "CAPAB_MASTER_OPT",
67
  "CAPAB_VM_OPT",
68
  "CLEANUP_OPT",
69
  "CLUSTER_DOMAIN_SECRET_OPT",
70
  "CONFIRM_OPT",
71
  "CP_SIZE_OPT",
72
  "DEBUG_OPT",
73
  "DEBUG_SIMERR_OPT",
74
  "DISKIDX_OPT",
75
  "DISK_OPT",
76
  "DISK_PARAMS_OPT",
77
  "DISK_TEMPLATE_OPT",
78
  "DRAINED_OPT",
79
  "DRY_RUN_OPT",
80
  "DRBD_HELPER_OPT",
81
  "DST_NODE_OPT",
82
  "EARLY_RELEASE_OPT",
83
  "ENABLED_HV_OPT",
84
  "ENABLED_DISK_TEMPLATES_OPT",
85
  "ERROR_CODES_OPT",
86
  "FAILURE_ONLY_OPT",
87
  "FIELDS_OPT",
88
  "FILESTORE_DIR_OPT",
89
  "FILESTORE_DRIVER_OPT",
90
  "FORCE_FILTER_OPT",
91
  "FORCE_OPT",
92
  "FORCE_VARIANT_OPT",
93
  "GATEWAY_OPT",
94
  "GATEWAY6_OPT",
95
  "GLOBAL_FILEDIR_OPT",
96
  "HID_OS_OPT",
97
  "GLOBAL_SHARED_FILEDIR_OPT",
98
  "HVLIST_OPT",
99
  "HVOPTS_OPT",
100
  "HYPERVISOR_OPT",
101
  "IALLOCATOR_OPT",
102
  "DEFAULT_IALLOCATOR_OPT",
103
  "IDENTIFY_DEFAULTS_OPT",
104
  "IGNORE_CONSIST_OPT",
105
  "IGNORE_ERRORS_OPT",
106
  "IGNORE_FAILURES_OPT",
107
  "IGNORE_OFFLINE_OPT",
108
  "IGNORE_REMOVE_FAILURES_OPT",
109
  "IGNORE_SECONDARIES_OPT",
110
  "IGNORE_SIZE_OPT",
111
  "INCLUDEDEFAULTS_OPT",
112
  "INTERVAL_OPT",
113
  "MAC_PREFIX_OPT",
114
  "MAINTAIN_NODE_HEALTH_OPT",
115
  "MASTER_NETDEV_OPT",
116
  "MASTER_NETMASK_OPT",
117
  "MC_OPT",
118
  "MIGRATION_MODE_OPT",
119
  "MODIFY_ETCHOSTS_OPT",
120
  "NET_OPT",
121
  "NETWORK_OPT",
122
  "NETWORK6_OPT",
123
  "NEW_CLUSTER_CERT_OPT",
124
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
125
  "NEW_CONFD_HMAC_KEY_OPT",
126
  "NEW_RAPI_CERT_OPT",
127
  "NEW_PRIMARY_OPT",
128
  "NEW_SECONDARY_OPT",
129
  "NEW_SPICE_CERT_OPT",
130
  "NIC_PARAMS_OPT",
131
  "NOCONFLICTSCHECK_OPT",
132
  "NODE_FORCE_JOIN_OPT",
133
  "NODE_LIST_OPT",
134
  "NODE_PLACEMENT_OPT",
135
  "NODEGROUP_OPT",
136
  "NODE_PARAMS_OPT",
137
  "NODE_POWERED_OPT",
138
  "NODRBD_STORAGE_OPT",
139
  "NOHDR_OPT",
140
  "NOIPCHECK_OPT",
141
  "NO_INSTALL_OPT",
142
  "NONAMECHECK_OPT",
143
  "NOLVM_STORAGE_OPT",
144
  "NOMODIFY_ETCHOSTS_OPT",
145
  "NOMODIFY_SSH_SETUP_OPT",
146
  "NONICS_OPT",
147
  "NONLIVE_OPT",
148
  "NONPLUS1_OPT",
149
  "NORUNTIME_CHGS_OPT",
150
  "NOSHUTDOWN_OPT",
151
  "NOSTART_OPT",
152
  "NOSSH_KEYCHECK_OPT",
153
  "NOVOTING_OPT",
154
  "NO_REMEMBER_OPT",
155
  "NWSYNC_OPT",
156
  "OFFLINE_INST_OPT",
157
  "ONLINE_INST_OPT",
158
  "ON_PRIMARY_OPT",
159
  "ON_SECONDARY_OPT",
160
  "OFFLINE_OPT",
161
  "OSPARAMS_OPT",
162
  "OS_OPT",
163
  "OS_SIZE_OPT",
164
  "OOB_TIMEOUT_OPT",
165
  "POWER_DELAY_OPT",
166
  "PREALLOC_WIPE_DISKS_OPT",
167
  "PRIMARY_IP_VERSION_OPT",
168
  "PRIMARY_ONLY_OPT",
169
  "PRIORITY_OPT",
170
  "RAPI_CERT_OPT",
171
  "READD_OPT",
172
  "REASON_OPT",
173
  "REBOOT_TYPE_OPT",
174
  "REMOVE_INSTANCE_OPT",
175
  "REMOVE_RESERVED_IPS_OPT",
176
  "REMOVE_UIDS_OPT",
177
  "RESERVED_LVS_OPT",
178
  "RUNTIME_MEM_OPT",
179
  "ROMAN_OPT",
180
  "SECONDARY_IP_OPT",
181
  "SECONDARY_ONLY_OPT",
182
  "SELECT_OS_OPT",
183
  "SEP_OPT",
184
  "SHOWCMD_OPT",
185
  "SHOW_MACHINE_OPT",
186
  "SHUTDOWN_TIMEOUT_OPT",
187
  "SINGLE_NODE_OPT",
188
  "SPECS_CPU_COUNT_OPT",
189
  "SPECS_DISK_COUNT_OPT",
190
  "SPECS_DISK_SIZE_OPT",
191
  "SPECS_MEM_SIZE_OPT",
192
  "SPECS_NIC_COUNT_OPT",
193
  "SPLIT_ISPECS_OPTS",
194
  "IPOLICY_STD_SPECS_OPT",
195
  "IPOLICY_DISK_TEMPLATES",
196
  "IPOLICY_VCPU_RATIO",
197
  "SPICE_CACERT_OPT",
198
  "SPICE_CERT_OPT",
199
  "SRC_DIR_OPT",
200
  "SRC_NODE_OPT",
201
  "SUBMIT_OPT",
202
  "STARTUP_PAUSED_OPT",
203
  "STATIC_OPT",
204
  "SYNC_OPT",
205
  "TAG_ADD_OPT",
206
  "TAG_SRC_OPT",
207
  "TIMEOUT_OPT",
208
  "TO_GROUP_OPT",
209
  "UIDPOOL_OPT",
210
  "USEUNITS_OPT",
211
  "USE_EXTERNAL_MIP_SCRIPT",
212
  "USE_REPL_NET_OPT",
213
  "VERBOSE_OPT",
214
  "VG_NAME_OPT",
215
  "WFSYNC_OPT",
216
  "YES_DOIT_OPT",
217
  "DISK_STATE_OPT",
218
  "HV_STATE_OPT",
219
  "IGNORE_IPOLICY_OPT",
220
  "INSTANCE_POLICY_OPTS",
221
  # Generic functions for CLI programs
222
  "ConfirmOperation",
223
  "CreateIPolicyFromOpts",
224
  "GenericMain",
225
  "GenericInstanceCreate",
226
  "GenericList",
227
  "GenericListFields",
228
  "GetClient",
229
  "GetOnlineNodes",
230
  "JobExecutor",
231
  "JobSubmittedException",
232
  "ParseTimespec",
233
  "RunWhileClusterStopped",
234
  "SubmitOpCode",
235
  "SubmitOrSend",
236
  "UsesRPC",
237
  # Formatting functions
238
  "ToStderr", "ToStdout",
239
  "FormatError",
240
  "FormatQueryResult",
241
  "FormatParamsDictInfo",
242
  "FormatPolicyInfo",
243
  "PrintIPolicyCommand",
244
  "PrintGenericInfo",
245
  "GenerateTable",
246
  "AskUser",
247
  "FormatTimestamp",
248
  "FormatLogMessage",
249
  # Tags functions
250
  "ListTags",
251
  "AddTags",
252
  "RemoveTags",
253
  # command line options support infrastructure
254
  "ARGS_MANY_INSTANCES",
255
  "ARGS_MANY_NODES",
256
  "ARGS_MANY_GROUPS",
257
  "ARGS_MANY_NETWORKS",
258
  "ARGS_NONE",
259
  "ARGS_ONE_INSTANCE",
260
  "ARGS_ONE_NODE",
261
  "ARGS_ONE_GROUP",
262
  "ARGS_ONE_OS",
263
  "ARGS_ONE_NETWORK",
264
  "ArgChoice",
265
  "ArgCommand",
266
  "ArgFile",
267
  "ArgGroup",
268
  "ArgHost",
269
  "ArgInstance",
270
  "ArgJobId",
271
  "ArgNetwork",
272
  "ArgNode",
273
  "ArgOs",
274
  "ArgExtStorage",
275
  "ArgSuggest",
276
  "ArgUnknown",
277
  "OPT_COMPL_INST_ADD_NODES",
278
  "OPT_COMPL_MANY_NODES",
279
  "OPT_COMPL_ONE_IALLOCATOR",
280
  "OPT_COMPL_ONE_INSTANCE",
281
  "OPT_COMPL_ONE_NODE",
282
  "OPT_COMPL_ONE_NODEGROUP",
283
  "OPT_COMPL_ONE_NETWORK",
284
  "OPT_COMPL_ONE_OS",
285
  "OPT_COMPL_ONE_EXTSTORAGE",
286
  "cli_option",
287
  "SplitNodeOption",
288
  "CalculateOSNames",
289
  "ParseFields",
290
  "COMMON_CREATE_OPTS",
291
  ]
292

    
293
NO_PREFIX = "no_"
294
UN_PREFIX = "-"
295

    
296
#: Priorities (sorted)
297
_PRIORITY_NAMES = [
298
  ("low", constants.OP_PRIO_LOW),
299
  ("normal", constants.OP_PRIO_NORMAL),
300
  ("high", constants.OP_PRIO_HIGH),
301
  ]
302

    
303
#: Priority dictionary for easier lookup
304
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
305
# we migrate to Python 2.6
306
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
307

    
308
# Query result status for clients
309
(QR_NORMAL,
310
 QR_UNKNOWN,
311
 QR_INCOMPLETE) = range(3)
312

    
313
#: Maximum batch size for ChooseJob
314
_CHOOSE_BATCH = 25
315

    
316

    
317
# constants used to create InstancePolicy dictionary
318
TISPECS_GROUP_TYPES = {
319
  constants.ISPECS_MIN: constants.VTYPE_INT,
320
  constants.ISPECS_MAX: constants.VTYPE_INT,
321
  }
322

    
323
TISPECS_CLUSTER_TYPES = {
324
  constants.ISPECS_MIN: constants.VTYPE_INT,
325
  constants.ISPECS_MAX: constants.VTYPE_INT,
326
  constants.ISPECS_STD: constants.VTYPE_INT,
327
  }
328

    
329
#: User-friendly names for query2 field types
330
_QFT_NAMES = {
331
  constants.QFT_UNKNOWN: "Unknown",
332
  constants.QFT_TEXT: "Text",
333
  constants.QFT_BOOL: "Boolean",
334
  constants.QFT_NUMBER: "Number",
335
  constants.QFT_UNIT: "Storage size",
336
  constants.QFT_TIMESTAMP: "Timestamp",
337
  constants.QFT_OTHER: "Custom",
338
  }
339

    
340

    
341
class _Argument:
342
  def __init__(self, min=0, max=None): # pylint: disable=W0622
343
    self.min = min
344
    self.max = max
345

    
346
  def __repr__(self):
347
    return ("<%s min=%s max=%s>" %
348
            (self.__class__.__name__, self.min, self.max))
349

    
350

    
351
class ArgSuggest(_Argument):
352
  """Suggesting argument.
353

354
  Value can be any of the ones passed to the constructor.
355

356
  """
357
  # pylint: disable=W0622
358
  def __init__(self, min=0, max=None, choices=None):
359
    _Argument.__init__(self, min=min, max=max)
360
    self.choices = choices
361

    
362
  def __repr__(self):
363
    return ("<%s min=%s max=%s choices=%r>" %
364
            (self.__class__.__name__, self.min, self.max, self.choices))
365

    
366

    
367
class ArgChoice(ArgSuggest):
368
  """Choice argument.
369

370
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
371
  but value must be one of the choices.
372

373
  """
374

    
375

    
376
class ArgUnknown(_Argument):
377
  """Unknown argument to program (e.g. determined at runtime).
378

379
  """
380

    
381

    
382
class ArgInstance(_Argument):
383
  """Instances argument.
384

385
  """
386

    
387

    
388
class ArgNode(_Argument):
389
  """Node argument.
390

391
  """
392

    
393

    
394
class ArgNetwork(_Argument):
395
  """Network argument.
396

397
  """
398

    
399

    
400
class ArgGroup(_Argument):
401
  """Node group argument.
402

403
  """
404

    
405

    
406
class ArgJobId(_Argument):
407
  """Job ID argument.
408

409
  """
410

    
411

    
412
class ArgFile(_Argument):
413
  """File path argument.
414

415
  """
416

    
417

    
418
class ArgCommand(_Argument):
419
  """Command argument.
420

421
  """
422

    
423

    
424
class ArgHost(_Argument):
425
  """Host argument.
426

427
  """
428

    
429

    
430
class ArgOs(_Argument):
431
  """OS argument.
432

433
  """
434

    
435

    
436
class ArgExtStorage(_Argument):
437
  """ExtStorage argument.
438

439
  """
440

    
441

    
442
ARGS_NONE = []
443
ARGS_MANY_INSTANCES = [ArgInstance()]
444
ARGS_MANY_NETWORKS = [ArgNetwork()]
445
ARGS_MANY_NODES = [ArgNode()]
446
ARGS_MANY_GROUPS = [ArgGroup()]
447
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
448
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
449
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
450
# TODO
451
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
452
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
453

    
454

    
455
def _ExtractTagsObject(opts, args):
456
  """Extract the tag type object.
457

458
  Note that this function will modify its args parameter.
459

460
  """
461
  if not hasattr(opts, "tag_type"):
462
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
463
  kind = opts.tag_type
464
  if kind == constants.TAG_CLUSTER:
465
    retval = kind, None
466
  elif kind in (constants.TAG_NODEGROUP,
467
                constants.TAG_NODE,
468
                constants.TAG_NETWORK,
469
                constants.TAG_INSTANCE):
470
    if not args:
471
      raise errors.OpPrereqError("no arguments passed to the command",
472
                                 errors.ECODE_INVAL)
473
    name = args.pop(0)
474
    retval = kind, name
475
  else:
476
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
477
  return retval
478

    
479

    
480
def _ExtendTags(opts, args):
481
  """Extend the args if a source file has been given.
482

483
  This function will extend the tags with the contents of the file
484
  passed in the 'tags_source' attribute of the opts parameter. A file
485
  named '-' will be replaced by stdin.
486

487
  """
488
  fname = opts.tags_source
489
  if fname is None:
490
    return
491
  if fname == "-":
492
    new_fh = sys.stdin
493
  else:
494
    new_fh = open(fname, "r")
495
  new_data = []
496
  try:
497
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
498
    # because of python bug 1633941
499
    while True:
500
      line = new_fh.readline()
501
      if not line:
502
        break
503
      new_data.append(line.strip())
504
  finally:
505
    new_fh.close()
506
  args.extend(new_data)
507

    
508

    
509
def ListTags(opts, args):
510
  """List the tags on a given object.
511

512
  This is a generic implementation that knows how to deal with all
513
  three cases of tag objects (cluster, node, instance). The opts
514
  argument is expected to contain a tag_type field denoting what
515
  object type we work on.
516

517
  """
518
  kind, name = _ExtractTagsObject(opts, args)
519
  cl = GetClient(query=True)
520
  result = cl.QueryTags(kind, name)
521
  result = list(result)
522
  result.sort()
523
  for tag in result:
524
    ToStdout(tag)
525

    
526

    
527
def AddTags(opts, args):
528
  """Add tags on a given object.
529

530
  This is a generic implementation that knows how to deal with all
531
  three cases of tag objects (cluster, node, instance). The opts
532
  argument is expected to contain a tag_type field denoting what
533
  object type we work on.
534

535
  """
536
  kind, name = _ExtractTagsObject(opts, args)
537
  _ExtendTags(opts, args)
538
  if not args:
539
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
540
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
541
  SubmitOrSend(op, opts)
542

    
543

    
544
def RemoveTags(opts, args):
545
  """Remove tags from a given object.
546

547
  This is a generic implementation that knows how to deal with all
548
  three cases of tag objects (cluster, node, instance). The opts
549
  argument is expected to contain a tag_type field denoting what
550
  object type we work on.
551

552
  """
553
  kind, name = _ExtractTagsObject(opts, args)
554
  _ExtendTags(opts, args)
555
  if not args:
556
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
557
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
558
  SubmitOrSend(op, opts)
559

    
560

    
561
def check_unit(option, opt, value): # pylint: disable=W0613
562
  """OptParsers custom converter for units.
563

564
  """
565
  try:
566
    return utils.ParseUnit(value)
567
  except errors.UnitParseError, err:
568
    raise OptionValueError("option %s: %s" % (opt, err))
569

    
570

    
571
def _SplitKeyVal(opt, data, parse_prefixes):
572
  """Convert a KeyVal string into a dict.
573

574
  This function will convert a key=val[,...] string into a dict. Empty
575
  values will be converted specially: keys which have the prefix 'no_'
576
  will have the value=False and the prefix stripped, keys with the prefix
577
  "-" will have value=None and the prefix stripped, and the others will
578
  have value=True.
579

580
  @type opt: string
581
  @param opt: a string holding the option name for which we process the
582
      data, used in building error messages
583
  @type data: string
584
  @param data: a string of the format key=val,key=val,...
585
  @type parse_prefixes: bool
586
  @param parse_prefixes: whether to handle prefixes specially
587
  @rtype: dict
588
  @return: {key=val, key=val}
589
  @raises errors.ParameterError: if there are duplicate keys
590

591
  """
592
  kv_dict = {}
593
  if data:
594
    for elem in utils.UnescapeAndSplit(data, sep=","):
595
      if "=" in elem:
596
        key, val = elem.split("=", 1)
597
      elif parse_prefixes:
598
        if elem.startswith(NO_PREFIX):
599
          key, val = elem[len(NO_PREFIX):], False
600
        elif elem.startswith(UN_PREFIX):
601
          key, val = elem[len(UN_PREFIX):], None
602
        else:
603
          key, val = elem, True
604
      else:
605
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
606
                                    (elem, opt))
607
      if key in kv_dict:
608
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
609
                                    (key, opt))
610
      kv_dict[key] = val
611
  return kv_dict
612

    
613

    
614
def _SplitIdentKeyVal(opt, value, parse_prefixes):
615
  """Helper function to parse "ident:key=val,key=val" options.
616

617
  @type opt: string
618
  @param opt: option name, used in error messages
619
  @type value: string
620
  @param value: expected to be in the format "ident:key=val,key=val,..."
621
  @type parse_prefixes: bool
622
  @param parse_prefixes: whether to handle prefixes specially (see
623
      L{_SplitKeyVal})
624
  @rtype: tuple
625
  @return: (ident, {key=val, key=val})
626
  @raises errors.ParameterError: in case of duplicates or other parsing errors
627

628
  """
629
  if ":" not in value:
630
    ident, rest = value, ""
631
  else:
632
    ident, rest = value.split(":", 1)
633

    
634
  if parse_prefixes and ident.startswith(NO_PREFIX):
635
    if rest:
636
      msg = "Cannot pass options when removing parameter groups: %s" % value
637
      raise errors.ParameterError(msg)
638
    retval = (ident[len(NO_PREFIX):], False)
639
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
640
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
641
    if rest:
642
      msg = "Cannot pass options when removing parameter groups: %s" % value
643
      raise errors.ParameterError(msg)
644
    retval = (ident[len(UN_PREFIX):], None)
645
  else:
646
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
647
    retval = (ident, kv_dict)
648
  return retval
649

    
650

    
651
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
652
  """Custom parser for ident:key=val,key=val options.
653

654
  This will store the parsed values as a tuple (ident, {key: val}). As such,
655
  multiple uses of this option via action=append is possible.
656

657
  """
658
  return _SplitIdentKeyVal(opt, value, True)
659

    
660

    
661
def check_key_val(option, opt, value):  # pylint: disable=W0613
662
  """Custom parser class for key=val,key=val options.
663

664
  This will store the parsed values as a dict {key: val}.
665

666
  """
667
  return _SplitKeyVal(opt, value, True)
668

    
669

    
670
def _SplitListKeyVal(opt, value):
671
  retval = {}
672
  for elem in value.split("/"):
673
    if not elem:
674
      raise errors.ParameterError("Empty section in option '%s'" % opt)
675
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
676
    if ident in retval:
677
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
678
             (ident, opt, elem))
679
      raise errors.ParameterError(msg)
680
    retval[ident] = valdict
681
  return retval
682

    
683

    
684
def check_multilist_ident_key_val(_, opt, value):
685
  """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
686

687
  @rtype: list of dictionary
688
  @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
689

690
  """
691
  retval = []
692
  for line in value.split("//"):
693
    retval.append(_SplitListKeyVal(opt, line))
694
  return retval
695

    
696

    
697
def check_bool(option, opt, value): # pylint: disable=W0613
698
  """Custom parser for yes/no options.
699

700
  This will store the parsed value as either True or False.
701

702
  """
703
  value = value.lower()
704
  if value == constants.VALUE_FALSE or value == "no":
705
    return False
706
  elif value == constants.VALUE_TRUE or value == "yes":
707
    return True
708
  else:
709
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
710

    
711

    
712
def check_list(option, opt, value): # pylint: disable=W0613
713
  """Custom parser for comma-separated lists.
714

715
  """
716
  # we have to make this explicit check since "".split(",") is [""],
717
  # not an empty list :(
718
  if not value:
719
    return []
720
  else:
721
    return utils.UnescapeAndSplit(value)
722

    
723

    
724
def check_maybefloat(option, opt, value): # pylint: disable=W0613
725
  """Custom parser for float numbers which might be also defaults.
726

727
  """
728
  value = value.lower()
729

    
730
  if value == constants.VALUE_DEFAULT:
731
    return value
732
  else:
733
    return float(value)
734

    
735

    
736
# completion_suggestion is normally a list. Using numeric values not evaluating
737
# to False for dynamic completion.
738
(OPT_COMPL_MANY_NODES,
739
 OPT_COMPL_ONE_NODE,
740
 OPT_COMPL_ONE_INSTANCE,
741
 OPT_COMPL_ONE_OS,
742
 OPT_COMPL_ONE_EXTSTORAGE,
743
 OPT_COMPL_ONE_IALLOCATOR,
744
 OPT_COMPL_ONE_NETWORK,
745
 OPT_COMPL_INST_ADD_NODES,
746
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
747

    
748
OPT_COMPL_ALL = compat.UniqueFrozenset([
749
  OPT_COMPL_MANY_NODES,
750
  OPT_COMPL_ONE_NODE,
751
  OPT_COMPL_ONE_INSTANCE,
752
  OPT_COMPL_ONE_OS,
753
  OPT_COMPL_ONE_EXTSTORAGE,
754
  OPT_COMPL_ONE_IALLOCATOR,
755
  OPT_COMPL_ONE_NETWORK,
756
  OPT_COMPL_INST_ADD_NODES,
757
  OPT_COMPL_ONE_NODEGROUP,
758
  ])
759

    
760

    
761
class CliOption(Option):
762
  """Custom option class for optparse.
763

764
  """
765
  ATTRS = Option.ATTRS + [
766
    "completion_suggest",
767
    ]
768
  TYPES = Option.TYPES + (
769
    "multilistidentkeyval",
770
    "identkeyval",
771
    "keyval",
772
    "unit",
773
    "bool",
774
    "list",
775
    "maybefloat",
776
    )
777
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
778
  TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
779
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
780
  TYPE_CHECKER["keyval"] = check_key_val
781
  TYPE_CHECKER["unit"] = check_unit
782
  TYPE_CHECKER["bool"] = check_bool
783
  TYPE_CHECKER["list"] = check_list
784
  TYPE_CHECKER["maybefloat"] = check_maybefloat
785

    
786

    
787
# optparse.py sets make_option, so we do it for our own option class, too
788
cli_option = CliOption
789

    
790

    
791
_YORNO = "yes|no"
792

    
793
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
794
                       help="Increase debugging level")
795

    
796
NOHDR_OPT = cli_option("--no-headers", default=False,
797
                       action="store_true", dest="no_headers",
798
                       help="Don't display column headers")
799

    
800
SEP_OPT = cli_option("--separator", default=None,
801
                     action="store", dest="separator",
802
                     help=("Separator between output fields"
803
                           " (defaults to one space)"))
804

    
805
USEUNITS_OPT = cli_option("--units", default=None,
806
                          dest="units", choices=("h", "m", "g", "t"),
807
                          help="Specify units for output (one of h/m/g/t)")
808

    
809
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
810
                        type="string", metavar="FIELDS",
811
                        help="Comma separated list of output fields")
812

    
813
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
814
                       default=False, help="Force the operation")
815

    
816
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
817
                         default=False, help="Do not require confirmation")
818

    
819
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
820
                                  action="store_true", default=False,
821
                                  help=("Ignore offline nodes and do as much"
822
                                        " as possible"))
823

    
824
TAG_ADD_OPT = cli_option("--tags", dest="tags",
825
                         default=None, help="Comma-separated list of instance"
826
                                            " tags")
827

    
828
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
829
                         default=None, help="File with tag names")
830

    
831
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
832
                        default=False, action="store_true",
833
                        help=("Submit the job and return the job ID, but"
834
                              " don't wait for the job to finish"))
835

    
836
SYNC_OPT = cli_option("--sync", dest="do_locking",
837
                      default=False, action="store_true",
838
                      help=("Grab locks while doing the queries"
839
                            " in order to ensure more consistent results"))
840

    
841
DRY_RUN_OPT = cli_option("--dry-run", default=False,
842
                         action="store_true",
843
                         help=("Do not execute the operation, just run the"
844
                               " check steps and verify if it could be"
845
                               " executed"))
846

    
847
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
848
                         action="store_true",
849
                         help="Increase the verbosity of the operation")
850

    
851
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
852
                              action="store_true", dest="simulate_errors",
853
                              help="Debugging option that makes the operation"
854
                              " treat most runtime checks as failed")
855

    
856
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
857
                        default=True, action="store_false",
858
                        help="Don't wait for sync (DANGEROUS!)")
859

    
860
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
861
                        default=False, action="store_true",
862
                        help="Wait for disks to sync")
863

    
864
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
865
                             action="store_true", default=False,
866
                             help="Enable offline instance")
867

    
868
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
869
                              action="store_true", default=False,
870
                              help="Disable down instance")
871

    
872
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
873
                               help=("Custom disk setup (%s)" %
874
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
875
                               default=None, metavar="TEMPL",
876
                               choices=list(constants.DISK_TEMPLATES))
877

    
878
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
879
                        help="Do not create any network cards for"
880
                        " the instance")
881

    
882
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
883
                               help="Relative path under default cluster-wide"
884
                               " file storage dir to store file-based disks",
885
                               default=None, metavar="<DIR>")
886

    
887
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
888
                                  help="Driver to use for image files",
889
                                  default="loop", metavar="<DRIVER>",
890
                                  choices=list(constants.FILE_DRIVER))
891

    
892
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
893
                            help="Select nodes for the instance automatically"
894
                            " using the <NAME> iallocator plugin",
895
                            default=None, type="string",
896
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
897

    
898
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
899
                                    metavar="<NAME>",
900
                                    help="Set the default instance"
901
                                    " allocator plugin",
902
                                    default=None, type="string",
903
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
904

    
905
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
906
                    metavar="<os>",
907
                    completion_suggest=OPT_COMPL_ONE_OS)
908

    
909
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
910
                          type="keyval", default={},
911
                          help="OS parameters")
912

    
913
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
914
                               action="store_true", default=False,
915
                               help="Force an unknown variant")
916

    
917
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
918
                            action="store_true", default=False,
919
                            help="Do not install the OS (will"
920
                            " enable no-start)")
921

    
922
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
923
                                dest="allow_runtime_chgs",
924
                                default=True, action="store_false",
925
                                help="Don't allow runtime changes")
926

    
927
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
928
                         type="keyval", default={},
929
                         help="Backend parameters")
930

    
931
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
932
                        default={}, dest="hvparams",
933
                        help="Hypervisor parameters")
934

    
935
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
936
                             help="Disk template parameters, in the format"
937
                             " template:option=value,option=value,...",
938
                             type="identkeyval", action="append", default=[])
939

    
940
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
941
                                 type="keyval", default={},
942
                                 help="Memory size specs: list of key=value,"
943
                                " where key is one of min, max, std"
944
                                 " (in MB or using a unit)")
945

    
946
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
947
                                 type="keyval", default={},
948
                                 help="CPU count specs: list of key=value,"
949
                                 " where key is one of min, max, std")
950

    
951
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
952
                                  dest="ispecs_disk_count",
953
                                  type="keyval", default={},
954
                                  help="Disk count specs: list of key=value,"
955
                                  " where key is one of min, max, std")
956

    
957
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
958
                                 type="keyval", default={},
959
                                 help="Disk size specs: list of key=value,"
960
                                 " where key is one of min, max, std"
961
                                 " (in MB or using a unit)")
962

    
963
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
964
                                 type="keyval", default={},
965
                                 help="NIC count specs: list of key=value,"
966
                                 " where key is one of min, max, std")
967

    
968
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
969
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
970
                                      dest="ipolicy_bounds_specs",
971
                                      type="multilistidentkeyval", default=None,
972
                                      help="Complete instance specs limits")
973

    
974
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
975
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
976
                                   dest="ipolicy_std_specs",
977
                                   type="keyval", default=None,
978
                                   help="Complte standard instance specs")
979

    
980
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
981
                                    dest="ipolicy_disk_templates",
982
                                    type="list", default=None,
983
                                    help="Comma-separated list of"
984
                                    " enabled disk templates")
985

    
986
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
987
                                 dest="ipolicy_vcpu_ratio",
988
                                 type="maybefloat", default=None,
989
                                 help="The maximum allowed vcpu-to-cpu ratio")
990

    
991
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
992
                                   dest="ipolicy_spindle_ratio",
993
                                   type="maybefloat", default=None,
994
                                   help=("The maximum allowed instances to"
995
                                         " spindle ratio"))
996

    
997
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
998
                            help="Hypervisor and hypervisor options, in the"
999
                            " format hypervisor:option=value,option=value,...",
1000
                            default=None, type="identkeyval")
1001

    
1002
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1003
                        help="Hypervisor and hypervisor options, in the"
1004
                        " format hypervisor:option=value,option=value,...",
1005
                        default=[], action="append", type="identkeyval")
1006

    
1007
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1008
                           action="store_false",
1009
                           help="Don't check that the instance's IP"
1010
                           " is alive")
1011

    
1012
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1013
                             default=True, action="store_false",
1014
                             help="Don't check that the instance's name"
1015
                             " is resolvable")
1016

    
1017
NET_OPT = cli_option("--net",
1018
                     help="NIC parameters", default=[],
1019
                     dest="nics", action="append", type="identkeyval")
1020

    
1021
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1022
                      dest="disks", action="append", type="identkeyval")
1023

    
1024
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1025
                         help="Comma-separated list of disks"
1026
                         " indices to act on (e.g. 0,2) (optional,"
1027
                         " defaults to all disks)")
1028

    
1029
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1030
                         help="Enforces a single-disk configuration using the"
1031
                         " given disk size, in MiB unless a suffix is used",
1032
                         default=None, type="unit", metavar="<size>")
1033

    
1034
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1035
                                dest="ignore_consistency",
1036
                                action="store_true", default=False,
1037
                                help="Ignore the consistency of the disks on"
1038
                                " the secondary")
1039

    
1040
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1041
                                dest="allow_failover",
1042
                                action="store_true", default=False,
1043
                                help="If migration is not possible fallback to"
1044
                                     " failover")
1045

    
1046
NONLIVE_OPT = cli_option("--non-live", dest="live",
1047
                         default=True, action="store_false",
1048
                         help="Do a non-live migration (this usually means"
1049
                         " freeze the instance, save the state, transfer and"
1050
                         " only then resume running on the secondary node)")
1051

    
1052
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1053
                                default=None,
1054
                                choices=list(constants.HT_MIGRATION_MODES),
1055
                                help="Override default migration mode (choose"
1056
                                " either live or non-live")
1057

    
1058
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1059
                                help="Target node and optional secondary node",
1060
                                metavar="<pnode>[:<snode>]",
1061
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1062

    
1063
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1064
                           action="append", metavar="<node>",
1065
                           help="Use only this node (can be used multiple"
1066
                           " times, if not given defaults to all nodes)",
1067
                           completion_suggest=OPT_COMPL_ONE_NODE)
1068

    
1069
NODEGROUP_OPT_NAME = "--node-group"
1070
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1071
                           dest="nodegroup",
1072
                           help="Node group (name or uuid)",
1073
                           metavar="<nodegroup>",
1074
                           default=None, type="string",
1075
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1076

    
1077
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1078
                             metavar="<node>",
1079
                             completion_suggest=OPT_COMPL_ONE_NODE)
1080

    
1081
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1082
                         action="store_false",
1083
                         help="Don't start the instance after creation")
1084

    
1085
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1086
                         action="store_true", default=False,
1087
                         help="Show command instead of executing it")
1088

    
1089
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1090
                         default=False, action="store_true",
1091
                         help="Instead of performing the migration/failover,"
1092
                         " try to recover from a failed cleanup. This is safe"
1093
                         " to run even if the instance is healthy, but it"
1094
                         " will create extra replication traffic and "
1095
                         " disrupt briefly the replication (like during the"
1096
                         " migration/failover")
1097

    
1098
STATIC_OPT = cli_option("-s", "--static", dest="static",
1099
                        action="store_true", default=False,
1100
                        help="Only show configuration data, not runtime data")
1101

    
1102
ALL_OPT = cli_option("--all", dest="show_all",
1103
                     default=False, action="store_true",
1104
                     help="Show info on all instances on the cluster."
1105
                     " This can take a long time to run, use wisely")
1106

    
1107
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1108
                           action="store_true", default=False,
1109
                           help="Interactive OS reinstall, lists available"
1110
                           " OS templates for selection")
1111

    
1112
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1113
                                 action="store_true", default=False,
1114
                                 help="Remove the instance from the cluster"
1115
                                 " configuration even if there are failures"
1116
                                 " during the removal process")
1117

    
1118
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1119
                                        dest="ignore_remove_failures",
1120
                                        action="store_true", default=False,
1121
                                        help="Remove the instance from the"
1122
                                        " cluster configuration even if there"
1123
                                        " are failures during the removal"
1124
                                        " process")
1125

    
1126
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1127
                                 action="store_true", default=False,
1128
                                 help="Remove the instance from the cluster")
1129

    
1130
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1131
                               help="Specifies the new node for the instance",
1132
                               metavar="NODE", default=None,
1133
                               completion_suggest=OPT_COMPL_ONE_NODE)
1134

    
1135
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1136
                               help="Specifies the new secondary node",
1137
                               metavar="NODE", default=None,
1138
                               completion_suggest=OPT_COMPL_ONE_NODE)
1139

    
1140
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1141
                             help="Specifies the new primary node",
1142
                             metavar="<node>", default=None,
1143
                             completion_suggest=OPT_COMPL_ONE_NODE)
1144

    
1145
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1146
                            default=False, action="store_true",
1147
                            help="Replace the disk(s) on the primary"
1148
                                 " node (applies only to internally mirrored"
1149
                                 " disk templates, e.g. %s)" %
1150
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1151

    
1152
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1153
                              default=False, action="store_true",
1154
                              help="Replace the disk(s) on the secondary"
1155
                                   " node (applies only to internally mirrored"
1156
                                   " disk templates, e.g. %s)" %
1157
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1158

    
1159
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1160
                              default=False, action="store_true",
1161
                              help="Lock all nodes and auto-promote as needed"
1162
                              " to MC status")
1163

    
1164
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1165
                              default=False, action="store_true",
1166
                              help="Automatically replace faulty disks"
1167
                                   " (applies only to internally mirrored"
1168
                                   " disk templates, e.g. %s)" %
1169
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1170

    
1171
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1172
                             default=False, action="store_true",
1173
                             help="Ignore current recorded size"
1174
                             " (useful for forcing activation when"
1175
                             " the recorded size is wrong)")
1176

    
1177
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1178
                          metavar="<node>",
1179
                          completion_suggest=OPT_COMPL_ONE_NODE)
1180

    
1181
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1182
                         metavar="<dir>")
1183

    
1184
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1185
                              help="Specify the secondary ip for the node",
1186
                              metavar="ADDRESS", default=None)
1187

    
1188
READD_OPT = cli_option("--readd", dest="readd",
1189
                       default=False, action="store_true",
1190
                       help="Readd old node after replacing it")
1191

    
1192
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1193
                                default=True, action="store_false",
1194
                                help="Disable SSH key fingerprint checking")
1195

    
1196
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1197
                                 default=False, action="store_true",
1198
                                 help="Force the joining of a node")
1199

    
1200
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1201
                    type="bool", default=None, metavar=_YORNO,
1202
                    help="Set the master_candidate flag on the node")
1203

    
1204
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1205
                         type="bool", default=None,
1206
                         help=("Set the offline flag on the node"
1207
                               " (cluster does not communicate with offline"
1208
                               " nodes)"))
1209

    
1210
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1211
                         type="bool", default=None,
1212
                         help=("Set the drained flag on the node"
1213
                               " (excluded from allocation operations)"))
1214

    
1215
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1216
                              type="bool", default=None, metavar=_YORNO,
1217
                              help="Set the master_capable flag on the node")
1218

    
1219
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1220
                          type="bool", default=None, metavar=_YORNO,
1221
                          help="Set the vm_capable flag on the node")
1222

    
1223
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1224
                             type="bool", default=None, metavar=_YORNO,
1225
                             help="Set the allocatable flag on a volume")
1226

    
1227
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1228
                               help="Disable support for lvm based instances"
1229
                               " (cluster-wide)",
1230
                               action="store_false", default=True)
1231

    
1232
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1233
                            dest="enabled_hypervisors",
1234
                            help="Comma-separated list of hypervisors",
1235
                            type="string", default=None)
1236

    
1237
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1238
                                        dest="enabled_disk_templates",
1239
                                        help="Comma-separated list of "
1240
                                             "disk templates",
1241
                                        type="string", default=None)
1242

    
1243
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1244
                            type="keyval", default={},
1245
                            help="NIC parameters")
1246

    
1247
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1248
                         dest="candidate_pool_size", type="int",
1249
                         help="Set the candidate pool size")
1250

    
1251
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1252
                         help=("Enables LVM and specifies the volume group"
1253
                               " name (cluster-wide) for disk allocation"
1254
                               " [%s]" % constants.DEFAULT_VG),
1255
                         metavar="VG", default=None)
1256

    
1257
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1258
                          help="Destroy cluster", action="store_true")
1259

    
1260
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1261
                          help="Skip node agreement check (dangerous)",
1262
                          action="store_true", default=False)
1263

    
1264
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1265
                            help="Specify the mac prefix for the instance IP"
1266
                            " addresses, in the format XX:XX:XX",
1267
                            metavar="PREFIX",
1268
                            default=None)
1269

    
1270
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1271
                               help="Specify the node interface (cluster-wide)"
1272
                               " on which the master IP address will be added"
1273
                               " (cluster init default: %s)" %
1274
                               constants.DEFAULT_BRIDGE,
1275
                               metavar="NETDEV",
1276
                               default=None)
1277

    
1278
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1279
                                help="Specify the netmask of the master IP",
1280
                                metavar="NETMASK",
1281
                                default=None)
1282

    
1283
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1284
                                     dest="use_external_mip_script",
1285
                                     help="Specify whether to run a"
1286
                                     " user-provided script for the master"
1287
                                     " IP address turnup and"
1288
                                     " turndown operations",
1289
                                     type="bool", metavar=_YORNO, default=None)
1290

    
1291
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1292
                                help="Specify the default directory (cluster-"
1293
                                "wide) for storing the file-based disks [%s]" %
1294
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1295
                                metavar="DIR",
1296
                                default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1297

    
1298
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1299
  "--shared-file-storage-dir",
1300
  dest="shared_file_storage_dir",
1301
  help="Specify the default directory (cluster-wide) for storing the"
1302
  " shared file-based disks [%s]" %
1303
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1304
  metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1305

    
1306
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1307
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1308
                                   action="store_false", default=True)
1309

    
1310
MODIFY_ETCHOSTS_OPT = \
1311
 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1312
            default=None, type="bool",
1313
            help="Defines whether the cluster should autonomously modify"
1314
            " and keep in sync the /etc/hosts file of the nodes")
1315

    
1316
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1317
                                    help="Don't initialize SSH keys",
1318
                                    action="store_false", default=True)
1319

    
1320
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1321
                             help="Enable parseable error messages",
1322
                             action="store_true", default=False)
1323

    
1324
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1325
                          help="Skip N+1 memory redundancy tests",
1326
                          action="store_true", default=False)
1327

    
1328
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1329
                             help="Type of reboot: soft/hard/full",
1330
                             default=constants.INSTANCE_REBOOT_HARD,
1331
                             metavar="<REBOOT>",
1332
                             choices=list(constants.REBOOT_TYPES))
1333

    
1334
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1335
                                    dest="ignore_secondaries",
1336
                                    default=False, action="store_true",
1337
                                    help="Ignore errors from secondaries")
1338

    
1339
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1340
                            action="store_false", default=True,
1341
                            help="Don't shutdown the instance (unsafe)")
1342

    
1343
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1344
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1345
                         help="Maximum time to wait")
1346

    
1347
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1348
                                  dest="shutdown_timeout", type="int",
1349
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1350
                                  help="Maximum time to wait for instance"
1351
                                  " shutdown")
1352

    
1353
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1354
                          default=None,
1355
                          help=("Number of seconds between repetions of the"
1356
                                " command"))
1357

    
1358
EARLY_RELEASE_OPT = cli_option("--early-release",
1359
                               dest="early_release", default=False,
1360
                               action="store_true",
1361
                               help="Release the locks on the secondary"
1362
                               " node(s) early")
1363

    
1364
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1365
                                  dest="new_cluster_cert",
1366
                                  default=False, action="store_true",
1367
                                  help="Generate a new cluster certificate")
1368

    
1369
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1370
                           default=None,
1371
                           help="File containing new RAPI certificate")
1372

    
1373
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1374
                               default=None, action="store_true",
1375
                               help=("Generate a new self-signed RAPI"
1376
                                     " certificate"))
1377

    
1378
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1379
                            default=None,
1380
                            help="File containing new SPICE certificate")
1381

    
1382
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1383
                              default=None,
1384
                              help="File containing the certificate of the CA"
1385
                              " which signed the SPICE certificate")
1386

    
1387
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1388
                                dest="new_spice_cert", default=None,
1389
                                action="store_true",
1390
                                help=("Generate a new self-signed SPICE"
1391
                                      " certificate"))
1392

    
1393
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1394
                                    dest="new_confd_hmac_key",
1395
                                    default=False, action="store_true",
1396
                                    help=("Create a new HMAC key for %s" %
1397
                                          constants.CONFD))
1398

    
1399
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1400
                                       dest="cluster_domain_secret",
1401
                                       default=None,
1402
                                       help=("Load new new cluster domain"
1403
                                             " secret from file"))
1404

    
1405
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1406
                                           dest="new_cluster_domain_secret",
1407
                                           default=False, action="store_true",
1408
                                           help=("Create a new cluster domain"
1409
                                                 " secret"))
1410

    
1411
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1412
                              dest="use_replication_network",
1413
                              help="Whether to use the replication network"
1414
                              " for talking to the nodes",
1415
                              action="store_true", default=False)
1416

    
1417
MAINTAIN_NODE_HEALTH_OPT = \
1418
    cli_option("--maintain-node-health", dest="maintain_node_health",
1419
               metavar=_YORNO, default=None, type="bool",
1420
               help="Configure the cluster to automatically maintain node"
1421
               " health, by shutting down unknown instances, shutting down"
1422
               " unknown DRBD devices, etc.")
1423

    
1424
IDENTIFY_DEFAULTS_OPT = \
1425
    cli_option("--identify-defaults", dest="identify_defaults",
1426
               default=False, action="store_true",
1427
               help="Identify which saved instance parameters are equal to"
1428
               " the current cluster defaults and set them as such, instead"
1429
               " of marking them as overridden")
1430

    
1431
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1432
                         action="store", dest="uid_pool",
1433
                         help=("A list of user-ids or user-id"
1434
                               " ranges separated by commas"))
1435

    
1436
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1437
                          action="store", dest="add_uids",
1438
                          help=("A list of user-ids or user-id"
1439
                                " ranges separated by commas, to be"
1440
                                " added to the user-id pool"))
1441

    
1442
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1443
                             action="store", dest="remove_uids",
1444
                             help=("A list of user-ids or user-id"
1445
                                   " ranges separated by commas, to be"
1446
                                   " removed from the user-id pool"))
1447

    
1448
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1449
                              action="store", dest="reserved_lvs",
1450
                              help=("A comma-separated list of reserved"
1451
                                    " logical volumes names, that will be"
1452
                                    " ignored by cluster verify"))
1453

    
1454
ROMAN_OPT = cli_option("--roman",
1455
                       dest="roman_integers", default=False,
1456
                       action="store_true",
1457
                       help="Use roman numbers for positive integers")
1458

    
1459
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1460
                             action="store", default=None,
1461
                             help="Specifies usermode helper for DRBD")
1462

    
1463
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1464
                                action="store_false", default=True,
1465
                                help="Disable support for DRBD")
1466

    
1467
PRIMARY_IP_VERSION_OPT = \
1468
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1469
               action="store", dest="primary_ip_version",
1470
               metavar="%d|%d" % (constants.IP4_VERSION,
1471
                                  constants.IP6_VERSION),
1472
               help="Cluster-wide IP version for primary IP")
1473

    
1474
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1475
                              action="store_true",
1476
                              help="Show machine name for every line in output")
1477

    
1478
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1479
                              action="store_true",
1480
                              help=("Hide successful results and show failures"
1481
                                    " only (determined by the exit code)"))
1482

    
1483
REASON_OPT = cli_option("--reason", default=None,
1484
                        help="The reason for executing the command")
1485

    
1486

    
1487
def _PriorityOptionCb(option, _, value, parser):
1488
  """Callback for processing C{--priority} option.
1489

1490
  """
1491
  value = _PRIONAME_TO_VALUE[value]
1492

    
1493
  setattr(parser.values, option.dest, value)
1494

    
1495

    
1496
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1497
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1498
                          choices=_PRIONAME_TO_VALUE.keys(),
1499
                          action="callback", type="choice",
1500
                          callback=_PriorityOptionCb,
1501
                          help="Priority for opcode processing")
1502

    
1503
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1504
                        type="bool", default=None, metavar=_YORNO,
1505
                        help="Sets the hidden flag on the OS")
1506

    
1507
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1508
                        type="bool", default=None, metavar=_YORNO,
1509
                        help="Sets the blacklisted flag on the OS")
1510

    
1511
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1512
                                     type="bool", metavar=_YORNO,
1513
                                     dest="prealloc_wipe_disks",
1514
                                     help=("Wipe disks prior to instance"
1515
                                           " creation"))
1516

    
1517
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1518
                             type="keyval", default=None,
1519
                             help="Node parameters")
1520

    
1521
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1522
                              action="store", metavar="POLICY", default=None,
1523
                              help="Allocation policy for the node group")
1524

    
1525
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1526
                              type="bool", metavar=_YORNO,
1527
                              dest="node_powered",
1528
                              help="Specify if the SoR for node is powered")
1529

    
1530
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1531
                             default=constants.OOB_TIMEOUT,
1532
                             help="Maximum time to wait for out-of-band helper")
1533

    
1534
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1535
                             default=constants.OOB_POWER_DELAY,
1536
                             help="Time in seconds to wait between power-ons")
1537

    
1538
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1539
                              action="store_true", default=False,
1540
                              help=("Whether command argument should be treated"
1541
                                    " as filter"))
1542

    
1543
NO_REMEMBER_OPT = cli_option("--no-remember",
1544
                             dest="no_remember",
1545
                             action="store_true", default=False,
1546
                             help="Perform but do not record the change"
1547
                             " in the configuration")
1548

    
1549
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1550
                              default=False, action="store_true",
1551
                              help="Evacuate primary instances only")
1552

    
1553
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1554
                                default=False, action="store_true",
1555
                                help="Evacuate secondary instances only"
1556
                                     " (applies only to internally mirrored"
1557
                                     " disk templates, e.g. %s)" %
1558
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1559

    
1560
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1561
                                action="store_true", default=False,
1562
                                help="Pause instance at startup")
1563

    
1564
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1565
                          help="Destination node group (name or uuid)",
1566
                          default=None, action="append",
1567
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1568

    
1569
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1570
                               action="append", dest="ignore_errors",
1571
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1572
                               help="Error code to be ignored")
1573

    
1574
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1575
                            action="append",
1576
                            help=("Specify disk state information in the"
1577
                                  " format"
1578
                                  " storage_type/identifier:option=value,...;"
1579
                                  " note this is unused for now"),
1580
                            type="identkeyval")
1581

    
1582
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1583
                          action="append",
1584
                          help=("Specify hypervisor state information in the"
1585
                                " format hypervisor:option=value,...;"
1586
                                " note this is unused for now"),
1587
                          type="identkeyval")
1588

    
1589
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1590
                                action="store_true", default=False,
1591
                                help="Ignore instance policy violations")
1592

    
1593
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1594
                             help="Sets the instance's runtime memory,"
1595
                             " ballooning it up or down to the new value",
1596
                             default=None, type="unit", metavar="<size>")
1597

    
1598
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1599
                          action="store_true", default=False,
1600
                          help="Marks the grow as absolute instead of the"
1601
                          " (default) relative mode")
1602

    
1603
NETWORK_OPT = cli_option("--network",
1604
                         action="store", default=None, dest="network",
1605
                         help="IP network in CIDR notation")
1606

    
1607
GATEWAY_OPT = cli_option("--gateway",
1608
                         action="store", default=None, dest="gateway",
1609
                         help="IP address of the router (gateway)")
1610

    
1611
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1612
                                  action="store", default=None,
1613
                                  dest="add_reserved_ips",
1614
                                  help="Comma-separated list of"
1615
                                  " reserved IPs to add")
1616

    
1617
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1618
                                     action="store", default=None,
1619
                                     dest="remove_reserved_ips",
1620
                                     help="Comma-delimited list of"
1621
                                     " reserved IPs to remove")
1622

    
1623
NETWORK6_OPT = cli_option("--network6",
1624
                          action="store", default=None, dest="network6",
1625
                          help="IP network in CIDR notation")
1626

    
1627
GATEWAY6_OPT = cli_option("--gateway6",
1628
                          action="store", default=None, dest="gateway6",
1629
                          help="IP6 address of the router (gateway)")
1630

    
1631
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1632
                                  dest="conflicts_check",
1633
                                  default=True,
1634
                                  action="store_false",
1635
                                  help="Don't check for conflicting IPs")
1636

    
1637
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1638
                                 default=False, action="store_true",
1639
                                 help="Include default values")
1640

    
1641
#: Options provided by all commands
1642
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1643

    
1644
# common options for creating instances. add and import then add their own
1645
# specific ones.
1646
COMMON_CREATE_OPTS = [
1647
  BACKEND_OPT,
1648
  DISK_OPT,
1649
  DISK_TEMPLATE_OPT,
1650
  FILESTORE_DIR_OPT,
1651
  FILESTORE_DRIVER_OPT,
1652
  HYPERVISOR_OPT,
1653
  IALLOCATOR_OPT,
1654
  NET_OPT,
1655
  NODE_PLACEMENT_OPT,
1656
  NOIPCHECK_OPT,
1657
  NOCONFLICTSCHECK_OPT,
1658
  NONAMECHECK_OPT,
1659
  NONICS_OPT,
1660
  NWSYNC_OPT,
1661
  OSPARAMS_OPT,
1662
  OS_SIZE_OPT,
1663
  SUBMIT_OPT,
1664
  TAG_ADD_OPT,
1665
  DRY_RUN_OPT,
1666
  PRIORITY_OPT,
1667
  ]
1668

    
1669
# common instance policy options
1670
INSTANCE_POLICY_OPTS = [
1671
  IPOLICY_BOUNDS_SPECS_OPT,
1672
  IPOLICY_DISK_TEMPLATES,
1673
  IPOLICY_VCPU_RATIO,
1674
  IPOLICY_SPINDLE_RATIO,
1675
  ]
1676

    
1677
# instance policy split specs options
1678
SPLIT_ISPECS_OPTS = [
1679
  SPECS_CPU_COUNT_OPT,
1680
  SPECS_DISK_COUNT_OPT,
1681
  SPECS_DISK_SIZE_OPT,
1682
  SPECS_MEM_SIZE_OPT,
1683
  SPECS_NIC_COUNT_OPT,
1684
  ]
1685

    
1686

    
1687
class _ShowUsage(Exception):
1688
  """Exception class for L{_ParseArgs}.
1689

1690
  """
1691
  def __init__(self, exit_error):
1692
    """Initializes instances of this class.
1693

1694
    @type exit_error: bool
1695
    @param exit_error: Whether to report failure on exit
1696

1697
    """
1698
    Exception.__init__(self)
1699
    self.exit_error = exit_error
1700

    
1701

    
1702
class _ShowVersion(Exception):
1703
  """Exception class for L{_ParseArgs}.
1704

1705
  """
1706

    
1707

    
1708
def _ParseArgs(binary, argv, commands, aliases, env_override):
1709
  """Parser for the command line arguments.
1710

1711
  This function parses the arguments and returns the function which
1712
  must be executed together with its (modified) arguments.
1713

1714
  @param binary: Script name
1715
  @param argv: Command line arguments
1716
  @param commands: Dictionary containing command definitions
1717
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1718
  @param env_override: list of env variables allowed for default args
1719
  @raise _ShowUsage: If usage description should be shown
1720
  @raise _ShowVersion: If version should be shown
1721

1722
  """
1723
  assert not (env_override - set(commands))
1724
  assert not (set(aliases.keys()) & set(commands.keys()))
1725

    
1726
  if len(argv) > 1:
1727
    cmd = argv[1]
1728
  else:
1729
    # No option or command given
1730
    raise _ShowUsage(exit_error=True)
1731

    
1732
  if cmd == "--version":
1733
    raise _ShowVersion()
1734
  elif cmd == "--help":
1735
    raise _ShowUsage(exit_error=False)
1736
  elif not (cmd in commands or cmd in aliases):
1737
    raise _ShowUsage(exit_error=True)
1738

    
1739
  # get command, unalias it, and look it up in commands
1740
  if cmd in aliases:
1741
    if aliases[cmd] not in commands:
1742
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1743
                                   " command '%s'" % (cmd, aliases[cmd]))
1744

    
1745
    cmd = aliases[cmd]
1746

    
1747
  if cmd in env_override:
1748
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1749
    env_args = os.environ.get(args_env_name)
1750
    if env_args:
1751
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1752

    
1753
  func, args_def, parser_opts, usage, description = commands[cmd]
1754
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1755
                        description=description,
1756
                        formatter=TitledHelpFormatter(),
1757
                        usage="%%prog %s %s" % (cmd, usage))
1758
  parser.disable_interspersed_args()
1759
  options, args = parser.parse_args(args=argv[2:])
1760

    
1761
  if not _CheckArguments(cmd, args_def, args):
1762
    return None, None, None
1763

    
1764
  return func, options, args
1765

    
1766

    
1767
def _FormatUsage(binary, commands):
1768
  """Generates a nice description of all commands.
1769

1770
  @param binary: Script name
1771
  @param commands: Dictionary containing command definitions
1772

1773
  """
1774
  # compute the max line length for cmd + usage
1775
  mlen = min(60, max(map(len, commands)))
1776

    
1777
  yield "Usage: %s {command} [options...] [argument...]" % binary
1778
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1779
  yield ""
1780
  yield "Commands:"
1781

    
1782
  # and format a nice command list
1783
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1784
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1785
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1786
    for line in help_lines:
1787
      yield " %-*s   %s" % (mlen, "", line)
1788

    
1789
  yield ""
1790

    
1791

    
1792
def _CheckArguments(cmd, args_def, args):
1793
  """Verifies the arguments using the argument definition.
1794

1795
  Algorithm:
1796

1797
    1. Abort with error if values specified by user but none expected.
1798

1799
    1. For each argument in definition
1800

1801
      1. Keep running count of minimum number of values (min_count)
1802
      1. Keep running count of maximum number of values (max_count)
1803
      1. If it has an unlimited number of values
1804

1805
        1. Abort with error if it's not the last argument in the definition
1806

1807
    1. If last argument has limited number of values
1808

1809
      1. Abort with error if number of values doesn't match or is too large
1810

1811
    1. Abort with error if user didn't pass enough values (min_count)
1812

1813
  """
1814
  if args and not args_def:
1815
    ToStderr("Error: Command %s expects no arguments", cmd)
1816
    return False
1817

    
1818
  min_count = None
1819
  max_count = None
1820
  check_max = None
1821

    
1822
  last_idx = len(args_def) - 1
1823

    
1824
  for idx, arg in enumerate(args_def):
1825
    if min_count is None:
1826
      min_count = arg.min
1827
    elif arg.min is not None:
1828
      min_count += arg.min
1829

    
1830
    if max_count is None:
1831
      max_count = arg.max
1832
    elif arg.max is not None:
1833
      max_count += arg.max
1834

    
1835
    if idx == last_idx:
1836
      check_max = (arg.max is not None)
1837

    
1838
    elif arg.max is None:
1839
      raise errors.ProgrammerError("Only the last argument can have max=None")
1840

    
1841
  if check_max:
1842
    # Command with exact number of arguments
1843
    if (min_count is not None and max_count is not None and
1844
        min_count == max_count and len(args) != min_count):
1845
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1846
      return False
1847

    
1848
    # Command with limited number of arguments
1849
    if max_count is not None and len(args) > max_count:
1850
      ToStderr("Error: Command %s expects only %d argument(s)",
1851
               cmd, max_count)
1852
      return False
1853

    
1854
  # Command with some required arguments
1855
  if min_count is not None and len(args) < min_count:
1856
    ToStderr("Error: Command %s expects at least %d argument(s)",
1857
             cmd, min_count)
1858
    return False
1859

    
1860
  return True
1861

    
1862

    
1863
def SplitNodeOption(value):
1864
  """Splits the value of a --node option.
1865

1866
  """
1867
  if value and ":" in value:
1868
    return value.split(":", 1)
1869
  else:
1870
    return (value, None)
1871

    
1872

    
1873
def CalculateOSNames(os_name, os_variants):
1874
  """Calculates all the names an OS can be called, according to its variants.
1875

1876
  @type os_name: string
1877
  @param os_name: base name of the os
1878
  @type os_variants: list or None
1879
  @param os_variants: list of supported variants
1880
  @rtype: list
1881
  @return: list of valid names
1882

1883
  """
1884
  if os_variants:
1885
    return ["%s+%s" % (os_name, v) for v in os_variants]
1886
  else:
1887
    return [os_name]
1888

    
1889

    
1890
def ParseFields(selected, default):
1891
  """Parses the values of "--field"-like options.
1892

1893
  @type selected: string or None
1894
  @param selected: User-selected options
1895
  @type default: list
1896
  @param default: Default fields
1897

1898
  """
1899
  if selected is None:
1900
    return default
1901

    
1902
  if selected.startswith("+"):
1903
    return default + selected[1:].split(",")
1904

    
1905
  return selected.split(",")
1906

    
1907

    
1908
UsesRPC = rpc.RunWithRPC
1909

    
1910

    
1911
def AskUser(text, choices=None):
1912
  """Ask the user a question.
1913

1914
  @param text: the question to ask
1915

1916
  @param choices: list with elements tuples (input_char, return_value,
1917
      description); if not given, it will default to: [('y', True,
1918
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1919
      note that the '?' char is reserved for help
1920

1921
  @return: one of the return values from the choices list; if input is
1922
      not possible (i.e. not running with a tty, we return the last
1923
      entry from the list
1924

1925
  """
1926
  if choices is None:
1927
    choices = [("y", True, "Perform the operation"),
1928
               ("n", False, "Do not perform the operation")]
1929
  if not choices or not isinstance(choices, list):
1930
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1931
  for entry in choices:
1932
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1933
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1934

    
1935
  answer = choices[-1][1]
1936
  new_text = []
1937
  for line in text.splitlines():
1938
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1939
  text = "\n".join(new_text)
1940
  try:
1941
    f = file("/dev/tty", "a+")
1942
  except IOError:
1943
    return answer
1944
  try:
1945
    chars = [entry[0] for entry in choices]
1946
    chars[-1] = "[%s]" % chars[-1]
1947
    chars.append("?")
1948
    maps = dict([(entry[0], entry[1]) for entry in choices])
1949
    while True:
1950
      f.write(text)
1951
      f.write("\n")
1952
      f.write("/".join(chars))
1953
      f.write(": ")
1954
      line = f.readline(2).strip().lower()
1955
      if line in maps:
1956
        answer = maps[line]
1957
        break
1958
      elif line == "?":
1959
        for entry in choices:
1960
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1961
        f.write("\n")
1962
        continue
1963
  finally:
1964
    f.close()
1965
  return answer
1966

    
1967

    
1968
class JobSubmittedException(Exception):
1969
  """Job was submitted, client should exit.
1970

1971
  This exception has one argument, the ID of the job that was
1972
  submitted. The handler should print this ID.
1973

1974
  This is not an error, just a structured way to exit from clients.
1975

1976
  """
1977

    
1978

    
1979
def SendJob(ops, cl=None):
1980
  """Function to submit an opcode without waiting for the results.
1981

1982
  @type ops: list
1983
  @param ops: list of opcodes
1984
  @type cl: luxi.Client
1985
  @param cl: the luxi client to use for communicating with the master;
1986
             if None, a new client will be created
1987

1988
  """
1989
  if cl is None:
1990
    cl = GetClient()
1991

    
1992
  job_id = cl.SubmitJob(ops)
1993

    
1994
  return job_id
1995

    
1996

    
1997
def GenericPollJob(job_id, cbs, report_cbs):
1998
  """Generic job-polling function.
1999

2000
  @type job_id: number
2001
  @param job_id: Job ID
2002
  @type cbs: Instance of L{JobPollCbBase}
2003
  @param cbs: Data callbacks
2004
  @type report_cbs: Instance of L{JobPollReportCbBase}
2005
  @param report_cbs: Reporting callbacks
2006

2007
  """
2008
  prev_job_info = None
2009
  prev_logmsg_serial = None
2010

    
2011
  status = None
2012

    
2013
  while True:
2014
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2015
                                      prev_logmsg_serial)
2016
    if not result:
2017
      # job not found, go away!
2018
      raise errors.JobLost("Job with id %s lost" % job_id)
2019

    
2020
    if result == constants.JOB_NOTCHANGED:
2021
      report_cbs.ReportNotChanged(job_id, status)
2022

    
2023
      # Wait again
2024
      continue
2025

    
2026
    # Split result, a tuple of (field values, log entries)
2027
    (job_info, log_entries) = result
2028
    (status, ) = job_info
2029

    
2030
    if log_entries:
2031
      for log_entry in log_entries:
2032
        (serial, timestamp, log_type, message) = log_entry
2033
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2034
                                    log_type, message)
2035
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2036

    
2037
    # TODO: Handle canceled and archived jobs
2038
    elif status in (constants.JOB_STATUS_SUCCESS,
2039
                    constants.JOB_STATUS_ERROR,
2040
                    constants.JOB_STATUS_CANCELING,
2041
                    constants.JOB_STATUS_CANCELED):
2042
      break
2043

    
2044
    prev_job_info = job_info
2045

    
2046
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2047
  if not jobs:
2048
    raise errors.JobLost("Job with id %s lost" % job_id)
2049

    
2050
  status, opstatus, result = jobs[0]
2051

    
2052
  if status == constants.JOB_STATUS_SUCCESS:
2053
    return result
2054

    
2055
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2056
    raise errors.OpExecError("Job was canceled")
2057

    
2058
  has_ok = False
2059
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2060
    if status == constants.OP_STATUS_SUCCESS:
2061
      has_ok = True
2062
    elif status == constants.OP_STATUS_ERROR:
2063
      errors.MaybeRaise(msg)
2064

    
2065
      if has_ok:
2066
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2067
                                 (idx, msg))
2068

    
2069
      raise errors.OpExecError(str(msg))
2070

    
2071
  # default failure mode
2072
  raise errors.OpExecError(result)
2073

    
2074

    
2075
class JobPollCbBase:
2076
  """Base class for L{GenericPollJob} callbacks.
2077

2078
  """
2079
  def __init__(self):
2080
    """Initializes this class.
2081

2082
    """
2083

    
2084
  def WaitForJobChangeOnce(self, job_id, fields,
2085
                           prev_job_info, prev_log_serial):
2086
    """Waits for changes on a job.
2087

2088
    """
2089
    raise NotImplementedError()
2090

    
2091
  def QueryJobs(self, job_ids, fields):
2092
    """Returns the selected fields for the selected job IDs.
2093

2094
    @type job_ids: list of numbers
2095
    @param job_ids: Job IDs
2096
    @type fields: list of strings
2097
    @param fields: Fields
2098

2099
    """
2100
    raise NotImplementedError()
2101

    
2102

    
2103
class JobPollReportCbBase:
2104
  """Base class for L{GenericPollJob} reporting callbacks.
2105

2106
  """
2107
  def __init__(self):
2108
    """Initializes this class.
2109

2110
    """
2111

    
2112
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2113
    """Handles a log message.
2114

2115
    """
2116
    raise NotImplementedError()
2117

    
2118
  def ReportNotChanged(self, job_id, status):
2119
    """Called for if a job hasn't changed in a while.
2120

2121
    @type job_id: number
2122
    @param job_id: Job ID
2123
    @type status: string or None
2124
    @param status: Job status if available
2125

2126
    """
2127
    raise NotImplementedError()
2128

    
2129

    
2130
class _LuxiJobPollCb(JobPollCbBase):
2131
  def __init__(self, cl):
2132
    """Initializes this class.
2133

2134
    """
2135
    JobPollCbBase.__init__(self)
2136
    self.cl = cl
2137

    
2138
  def WaitForJobChangeOnce(self, job_id, fields,
2139
                           prev_job_info, prev_log_serial):
2140
    """Waits for changes on a job.
2141

2142
    """
2143
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2144
                                        prev_job_info, prev_log_serial)
2145

    
2146
  def QueryJobs(self, job_ids, fields):
2147
    """Returns the selected fields for the selected job IDs.
2148

2149
    """
2150
    return self.cl.QueryJobs(job_ids, fields)
2151

    
2152

    
2153
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2154
  def __init__(self, feedback_fn):
2155
    """Initializes this class.
2156

2157
    """
2158
    JobPollReportCbBase.__init__(self)
2159

    
2160
    self.feedback_fn = feedback_fn
2161

    
2162
    assert callable(feedback_fn)
2163

    
2164
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2165
    """Handles a log message.
2166

2167
    """
2168
    self.feedback_fn((timestamp, log_type, log_msg))
2169

    
2170
  def ReportNotChanged(self, job_id, status):
2171
    """Called if a job hasn't changed in a while.
2172

2173
    """
2174
    # Ignore
2175

    
2176

    
2177
class StdioJobPollReportCb(JobPollReportCbBase):
2178
  def __init__(self):
2179
    """Initializes this class.
2180

2181
    """
2182
    JobPollReportCbBase.__init__(self)
2183

    
2184
    self.notified_queued = False
2185
    self.notified_waitlock = False
2186

    
2187
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2188
    """Handles a log message.
2189

2190
    """
2191
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2192
             FormatLogMessage(log_type, log_msg))
2193

    
2194
  def ReportNotChanged(self, job_id, status):
2195
    """Called if a job hasn't changed in a while.
2196

2197
    """
2198
    if status is None:
2199
      return
2200

    
2201
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2202
      ToStderr("Job %s is waiting in queue", job_id)
2203
      self.notified_queued = True
2204

    
2205
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2206
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2207
      self.notified_waitlock = True
2208

    
2209

    
2210
def FormatLogMessage(log_type, log_msg):
2211
  """Formats a job message according to its type.
2212

2213
  """
2214
  if log_type != constants.ELOG_MESSAGE:
2215
    log_msg = str(log_msg)
2216

    
2217
  return utils.SafeEncode(log_msg)
2218

    
2219

    
2220
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2221
  """Function to poll for the result of a job.
2222

2223
  @type job_id: job identified
2224
  @param job_id: the job to poll for results
2225
  @type cl: luxi.Client
2226
  @param cl: the luxi client to use for communicating with the master;
2227
             if None, a new client will be created
2228

2229
  """
2230
  if cl is None:
2231
    cl = GetClient()
2232

    
2233
  if reporter is None:
2234
    if feedback_fn:
2235
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2236
    else:
2237
      reporter = StdioJobPollReportCb()
2238
  elif feedback_fn:
2239
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2240

    
2241
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2242

    
2243

    
2244
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2245
  """Legacy function to submit an opcode.
2246

2247
  This is just a simple wrapper over the construction of the processor
2248
  instance. It should be extended to better handle feedback and
2249
  interaction functions.
2250

2251
  """
2252
  if cl is None:
2253
    cl = GetClient()
2254

    
2255
  SetGenericOpcodeOpts([op], opts)
2256

    
2257
  job_id = SendJob([op], cl=cl)
2258

    
2259
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2260
                       reporter=reporter)
2261

    
2262
  return op_results[0]
2263

    
2264

    
2265
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2266
  """Wrapper around SubmitOpCode or SendJob.
2267

2268
  This function will decide, based on the 'opts' parameter, whether to
2269
  submit and wait for the result of the opcode (and return it), or
2270
  whether to just send the job and print its identifier. It is used in
2271
  order to simplify the implementation of the '--submit' option.
2272

2273
  It will also process the opcodes if we're sending the via SendJob
2274
  (otherwise SubmitOpCode does it).
2275

2276
  """
2277
  if opts and opts.submit_only:
2278
    job = [op]
2279
    SetGenericOpcodeOpts(job, opts)
2280
    job_id = SendJob(job, cl=cl)
2281
    raise JobSubmittedException(job_id)
2282
  else:
2283
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2284

    
2285

    
2286
def _InitReasonTrail(op, opts):
2287
  """Builds the first part of the reason trail
2288

2289
  Builds the initial part of the reason trail, adding the user provided reason
2290
  (if it exists) and the name of the command starting the operation.
2291

2292
  @param op: the opcode the reason trail will be added to
2293
  @param opts: the command line options selected by the user
2294

2295
  """
2296
  assert len(sys.argv) >= 2
2297
  trail = []
2298

    
2299
  if opts.reason:
2300
    trail.append((constants.OPCODE_REASON_SRC_USER,
2301
                  opts.reason,
2302
                  utils.EpochNano()))
2303

    
2304
  binary = os.path.basename(sys.argv[0])
2305
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2306
  command = sys.argv[1]
2307
  trail.append((source, command, utils.EpochNano()))
2308
  op.reason = trail
2309

    
2310

    
2311
def SetGenericOpcodeOpts(opcode_list, options):
2312
  """Processor for generic options.
2313

2314
  This function updates the given opcodes based on generic command
2315
  line options (like debug, dry-run, etc.).
2316

2317
  @param opcode_list: list of opcodes
2318
  @param options: command line options or None
2319
  @return: None (in-place modification)
2320

2321
  """
2322
  if not options:
2323
    return
2324
  for op in opcode_list:
2325
    op.debug_level = options.debug
2326
    if hasattr(options, "dry_run"):
2327
      op.dry_run = options.dry_run
2328
    if getattr(options, "priority", None) is not None:
2329
      op.priority = options.priority
2330
    _InitReasonTrail(op, options)
2331

    
2332

    
2333
def GetClient(query=False):
2334
  """Connects to the a luxi socket and returns a client.
2335

2336
  @type query: boolean
2337
  @param query: this signifies that the client will only be
2338
      used for queries; if the build-time parameter
2339
      enable-split-queries is enabled, then the client will be
2340
      connected to the query socket instead of the masterd socket
2341

2342
  """
2343
  override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2344
  if override_socket:
2345
    if override_socket == constants.LUXI_OVERRIDE_MASTER:
2346
      address = pathutils.MASTER_SOCKET
2347
    elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2348
      address = pathutils.QUERY_SOCKET
2349
    else:
2350
      address = override_socket
2351
  elif query and constants.ENABLE_SPLIT_QUERY:
2352
    address = pathutils.QUERY_SOCKET
2353
  else:
2354
    address = None
2355
  # TODO: Cache object?
2356
  try:
2357
    client = luxi.Client(address=address)
2358
  except luxi.NoMasterError:
2359
    ss = ssconf.SimpleStore()
2360

    
2361
    # Try to read ssconf file
2362
    try:
2363
      ss.GetMasterNode()
2364
    except errors.ConfigurationError:
2365
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2366
                                 " not part of a cluster",
2367
                                 errors.ECODE_INVAL)
2368

    
2369
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2370
    if master != myself:
2371
      raise errors.OpPrereqError("This is not the master node, please connect"
2372
                                 " to node '%s' and rerun the command" %
2373
                                 master, errors.ECODE_INVAL)
2374
    raise
2375
  return client
2376

    
2377

    
2378
def FormatError(err):
2379
  """Return a formatted error message for a given error.
2380

2381
  This function takes an exception instance and returns a tuple
2382
  consisting of two values: first, the recommended exit code, and
2383
  second, a string describing the error message (not
2384
  newline-terminated).
2385

2386
  """
2387
  retcode = 1
2388
  obuf = StringIO()
2389
  msg = str(err)
2390
  if isinstance(err, errors.ConfigurationError):
2391
    txt = "Corrupt configuration file: %s" % msg
2392
    logging.error(txt)
2393
    obuf.write(txt + "\n")
2394
    obuf.write("Aborting.")
2395
    retcode = 2
2396
  elif isinstance(err, errors.HooksAbort):
2397
    obuf.write("Failure: hooks execution failed:\n")
2398
    for node, script, out in err.args[0]:
2399
      if out:
2400
        obuf.write("  node: %s, script: %s, output: %s\n" %
2401
                   (node, script, out))
2402
      else:
2403
        obuf.write("  node: %s, script: %s (no output)\n" %
2404
                   (node, script))
2405
  elif isinstance(err, errors.HooksFailure):
2406
    obuf.write("Failure: hooks general failure: %s" % msg)
2407
  elif isinstance(err, errors.ResolverError):
2408
    this_host = netutils.Hostname.GetSysName()
2409
    if err.args[0] == this_host:
2410
      msg = "Failure: can't resolve my own hostname ('%s')"
2411
    else:
2412
      msg = "Failure: can't resolve hostname '%s'"
2413
    obuf.write(msg % err.args[0])
2414
  elif isinstance(err, errors.OpPrereqError):
2415
    if len(err.args) == 2:
2416
      obuf.write("Failure: prerequisites not met for this"
2417
                 " operation:\nerror type: %s, error details:\n%s" %
2418
                 (err.args[1], err.args[0]))
2419
    else:
2420
      obuf.write("Failure: prerequisites not met for this"
2421
                 " operation:\n%s" % msg)
2422
  elif isinstance(err, errors.OpExecError):
2423
    obuf.write("Failure: command execution error:\n%s" % msg)
2424
  elif isinstance(err, errors.TagError):
2425
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2426
  elif isinstance(err, errors.JobQueueDrainError):
2427
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2428
               " accept new requests\n")
2429
  elif isinstance(err, errors.JobQueueFull):
2430
    obuf.write("Failure: the job queue is full and doesn't accept new"
2431
               " job submissions until old jobs are archived\n")
2432
  elif isinstance(err, errors.TypeEnforcementError):
2433
    obuf.write("Parameter Error: %s" % msg)
2434
  elif isinstance(err, errors.ParameterError):
2435
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2436
  elif isinstance(err, luxi.NoMasterError):
2437
    if err.args[0] == pathutils.MASTER_SOCKET:
2438
      daemon = "the master daemon"
2439
    elif err.args[0] == pathutils.QUERY_SOCKET:
2440
      daemon = "the config daemon"
2441
    else:
2442
      daemon = "socket '%s'" % str(err.args[0])
2443
    obuf.write("Cannot communicate with %s.\nIs the process running"
2444
               " and listening for connections?" % daemon)
2445
  elif isinstance(err, luxi.TimeoutError):
2446
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2447
               " been submitted and will continue to run even if the call"
2448
               " timed out. Useful commands in this situation are \"gnt-job"
2449
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2450
    obuf.write(msg)
2451
  elif isinstance(err, luxi.PermissionError):
2452
    obuf.write("It seems you don't have permissions to connect to the"
2453
               " master daemon.\nPlease retry as a different user.")
2454
  elif isinstance(err, luxi.ProtocolError):
2455
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2456
               "%s" % msg)
2457
  elif isinstance(err, errors.JobLost):
2458
    obuf.write("Error checking job status: %s" % msg)
2459
  elif isinstance(err, errors.QueryFilterParseError):
2460
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2461
    obuf.write("\n".join(err.GetDetails()))
2462
  elif isinstance(err, errors.GenericError):
2463
    obuf.write("Unhandled Ganeti error: %s" % msg)
2464
  elif isinstance(err, JobSubmittedException):
2465
    obuf.write("JobID: %s\n" % err.args[0])
2466
    retcode = 0
2467
  else:
2468
    obuf.write("Unhandled exception: %s" % msg)
2469
  return retcode, obuf.getvalue().rstrip("\n")
2470

    
2471

    
2472
def GenericMain(commands, override=None, aliases=None,
2473
                env_override=frozenset()):
2474
  """Generic main function for all the gnt-* commands.
2475

2476
  @param commands: a dictionary with a special structure, see the design doc
2477
                   for command line handling.
2478
  @param override: if not None, we expect a dictionary with keys that will
2479
                   override command line options; this can be used to pass
2480
                   options from the scripts to generic functions
2481
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2482
  @param env_override: list of environment names which are allowed to submit
2483
                       default args for commands
2484

2485
  """
2486
  # save the program name and the entire command line for later logging
2487
  if sys.argv:
2488
    binary = os.path.basename(sys.argv[0])
2489
    if not binary:
2490
      binary = sys.argv[0]
2491

    
2492
    if len(sys.argv) >= 2:
2493
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2494
    else:
2495
      logname = binary
2496

    
2497
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2498
  else:
2499
    binary = "<unknown program>"
2500
    cmdline = "<unknown>"
2501

    
2502
  if aliases is None:
2503
    aliases = {}
2504

    
2505
  try:
2506
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2507
                                       env_override)
2508
  except _ShowVersion:
2509
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2510
             constants.RELEASE_VERSION)
2511
    return constants.EXIT_SUCCESS
2512
  except _ShowUsage, err:
2513
    for line in _FormatUsage(binary, commands):
2514
      ToStdout(line)
2515

    
2516
    if err.exit_error:
2517
      return constants.EXIT_FAILURE
2518
    else:
2519
      return constants.EXIT_SUCCESS
2520
  except errors.ParameterError, err:
2521
    result, err_msg = FormatError(err)
2522
    ToStderr(err_msg)
2523
    return 1
2524

    
2525
  if func is None: # parse error
2526
    return 1
2527

    
2528
  if override is not None:
2529
    for key, val in override.iteritems():
2530
      setattr(options, key, val)
2531

    
2532
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2533
                     stderr_logging=True)
2534

    
2535
  logging.info("Command line: %s", cmdline)
2536

    
2537
  try:
2538
    result = func(options, args)
2539
  except (errors.GenericError, luxi.ProtocolError,
2540
          JobSubmittedException), err:
2541
    result, err_msg = FormatError(err)
2542
    logging.exception("Error during command processing")
2543
    ToStderr(err_msg)
2544
  except KeyboardInterrupt:
2545
    result = constants.EXIT_FAILURE
2546
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2547
             " might have been submitted and"
2548
             " will continue to run in the background.")
2549
  except IOError, err:
2550
    if err.errno == errno.EPIPE:
2551
      # our terminal went away, we'll exit
2552
      sys.exit(constants.EXIT_FAILURE)
2553
    else:
2554
      raise
2555

    
2556
  return result
2557

    
2558

    
2559
def ParseNicOption(optvalue):
2560
  """Parses the value of the --net option(s).
2561

2562
  """
2563
  try:
2564
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2565
  except (TypeError, ValueError), err:
2566
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2567
                               errors.ECODE_INVAL)
2568

    
2569
  nics = [{}] * nic_max
2570
  for nidx, ndict in optvalue:
2571
    nidx = int(nidx)
2572

    
2573
    if not isinstance(ndict, dict):
2574
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2575
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2576

    
2577
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2578

    
2579
    nics[nidx] = ndict
2580

    
2581
  return nics
2582

    
2583

    
2584
def GenericInstanceCreate(mode, opts, args):
2585
  """Add an instance to the cluster via either creation or import.
2586

2587
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2588
  @param opts: the command line options selected by the user
2589
  @type args: list
2590
  @param args: should contain only one element, the new instance name
2591
  @rtype: int
2592
  @return: the desired exit code
2593

2594
  """
2595
  instance = args[0]
2596

    
2597
  (pnode, snode) = SplitNodeOption(opts.node)
2598

    
2599
  hypervisor = None
2600
  hvparams = {}
2601
  if opts.hypervisor:
2602
    hypervisor, hvparams = opts.hypervisor
2603

    
2604
  if opts.nics:
2605
    nics = ParseNicOption(opts.nics)
2606
  elif opts.no_nics:
2607
    # no nics
2608
    nics = []
2609
  elif mode == constants.INSTANCE_CREATE:
2610
    # default of one nic, all auto
2611
    nics = [{}]
2612
  else:
2613
    # mode == import
2614
    nics = []
2615

    
2616
  if opts.disk_template == constants.DT_DISKLESS:
2617
    if opts.disks or opts.sd_size is not None:
2618
      raise errors.OpPrereqError("Diskless instance but disk"
2619
                                 " information passed", errors.ECODE_INVAL)
2620
    disks = []
2621
  else:
2622
    if (not opts.disks and not opts.sd_size
2623
        and mode == constants.INSTANCE_CREATE):
2624
      raise errors.OpPrereqError("No disk information specified",
2625
                                 errors.ECODE_INVAL)
2626
    if opts.disks and opts.sd_size is not None:
2627
      raise errors.OpPrereqError("Please use either the '--disk' or"
2628
                                 " '-s' option", errors.ECODE_INVAL)
2629
    if opts.sd_size is not None:
2630
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2631

    
2632
    if opts.disks:
2633
      try:
2634
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2635
      except ValueError, err:
2636
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2637
                                   errors.ECODE_INVAL)
2638
      disks = [{}] * disk_max
2639
    else:
2640
      disks = []
2641
    for didx, ddict in opts.disks:
2642
      didx = int(didx)
2643
      if not isinstance(ddict, dict):
2644
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2645
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2646
      elif constants.IDISK_SIZE in ddict:
2647
        if constants.IDISK_ADOPT in ddict:
2648
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2649
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2650
        try:
2651
          ddict[constants.IDISK_SIZE] = \
2652
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2653
        except ValueError, err:
2654
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2655
                                     (didx, err), errors.ECODE_INVAL)
2656
      elif constants.IDISK_ADOPT in ddict:
2657
        if mode == constants.INSTANCE_IMPORT:
2658
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2659
                                     " import", errors.ECODE_INVAL)
2660
        ddict[constants.IDISK_SIZE] = 0
2661
      else:
2662
        raise errors.OpPrereqError("Missing size or adoption source for"
2663
                                   " disk %d" % didx, errors.ECODE_INVAL)
2664
      disks[didx] = ddict
2665

    
2666
  if opts.tags is not None:
2667
    tags = opts.tags.split(",")
2668
  else:
2669
    tags = []
2670

    
2671
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2672
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2673

    
2674
  if mode == constants.INSTANCE_CREATE:
2675
    start = opts.start
2676
    os_type = opts.os
2677
    force_variant = opts.force_variant
2678
    src_node = None
2679
    src_path = None
2680
    no_install = opts.no_install
2681
    identify_defaults = False
2682
  elif mode == constants.INSTANCE_IMPORT:
2683
    start = False
2684
    os_type = None
2685
    force_variant = False
2686
    src_node = opts.src_node
2687
    src_path = opts.src_dir
2688
    no_install = None
2689
    identify_defaults = opts.identify_defaults
2690
  else:
2691
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2692

    
2693
  op = opcodes.OpInstanceCreate(instance_name=instance,
2694
                                disks=disks,
2695
                                disk_template=opts.disk_template,
2696
                                nics=nics,
2697
                                conflicts_check=opts.conflicts_check,
2698
                                pnode=pnode, snode=snode,
2699
                                ip_check=opts.ip_check,
2700
                                name_check=opts.name_check,
2701
                                wait_for_sync=opts.wait_for_sync,
2702
                                file_storage_dir=opts.file_storage_dir,
2703
                                file_driver=opts.file_driver,
2704
                                iallocator=opts.iallocator,
2705
                                hypervisor=hypervisor,
2706
                                hvparams=hvparams,
2707
                                beparams=opts.beparams,
2708
                                osparams=opts.osparams,
2709
                                mode=mode,
2710
                                start=start,
2711
                                os_type=os_type,
2712
                                force_variant=force_variant,
2713
                                src_node=src_node,
2714
                                src_path=src_path,
2715
                                tags=tags,
2716
                                no_install=no_install,
2717
                                identify_defaults=identify_defaults,
2718
                                ignore_ipolicy=opts.ignore_ipolicy)
2719

    
2720
  SubmitOrSend(op, opts)
2721
  return 0
2722

    
2723

    
2724
class _RunWhileClusterStoppedHelper:
2725
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2726

2727
  """
2728
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2729
    """Initializes this class.
2730

2731
    @type feedback_fn: callable
2732
    @param feedback_fn: Feedback function
2733
    @type cluster_name: string
2734
    @param cluster_name: Cluster name
2735
    @type master_node: string
2736
    @param master_node Master node name
2737
    @type online_nodes: list
2738
    @param online_nodes: List of names of online nodes
2739

2740
    """
2741
    self.feedback_fn = feedback_fn
2742
    self.cluster_name = cluster_name
2743
    self.master_node = master_node
2744
    self.online_nodes = online_nodes
2745

    
2746
    self.ssh = ssh.SshRunner(self.cluster_name)
2747

    
2748
    self.nonmaster_nodes = [name for name in online_nodes
2749
                            if name != master_node]
2750

    
2751
    assert self.master_node not in self.nonmaster_nodes
2752

    
2753
  def _RunCmd(self, node_name, cmd):
2754
    """Runs a command on the local or a remote machine.
2755

2756
    @type node_name: string
2757
    @param node_name: Machine name
2758
    @type cmd: list
2759
    @param cmd: Command
2760

2761
    """
2762
    if node_name is None or node_name == self.master_node:
2763
      # No need to use SSH
2764
      result = utils.RunCmd(cmd)
2765
    else:
2766
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2767
                            utils.ShellQuoteArgs(cmd))
2768

    
2769
    if result.failed:
2770
      errmsg = ["Failed to run command %s" % result.cmd]
2771
      if node_name:
2772
        errmsg.append("on node %s" % node_name)
2773
      errmsg.append(": exitcode %s and error %s" %
2774
                    (result.exit_code, result.output))
2775
      raise errors.OpExecError(" ".join(errmsg))
2776

    
2777
  def Call(self, fn, *args):
2778
    """Call function while all daemons are stopped.
2779

2780
    @type fn: callable
2781
    @param fn: Function to be called
2782

2783
    """
2784
    # Pause watcher by acquiring an exclusive lock on watcher state file
2785
    self.feedback_fn("Blocking watcher")
2786
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2787
    try:
2788
      # TODO: Currently, this just blocks. There's no timeout.
2789
      # TODO: Should it be a shared lock?
2790
      watcher_block.Exclusive(blocking=True)
2791

    
2792
      # Stop master daemons, so that no new jobs can come in and all running
2793
      # ones are finished
2794
      self.feedback_fn("Stopping master daemons")
2795
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2796
      try:
2797
        # Stop daemons on all nodes
2798
        for node_name in self.online_nodes:
2799
          self.feedback_fn("Stopping daemons on %s" % node_name)
2800
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2801

    
2802
        # All daemons are shut down now
2803
        try:
2804
          return fn(self, *args)
2805
        except Exception, err:
2806
          _, errmsg = FormatError(err)
2807
          logging.exception("Caught exception")
2808
          self.feedback_fn(errmsg)
2809
          raise
2810
      finally:
2811
        # Start cluster again, master node last
2812
        for node_name in self.nonmaster_nodes + [self.master_node]:
2813
          self.feedback_fn("Starting daemons on %s" % node_name)
2814
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2815
    finally:
2816
      # Resume watcher
2817
      watcher_block.Close()
2818

    
2819

    
2820
def RunWhileClusterStopped(feedback_fn, fn, *args):
2821
  """Calls a function while all cluster daemons are stopped.
2822

2823
  @type feedback_fn: callable
2824
  @param feedback_fn: Feedback function
2825
  @type fn: callable
2826
  @param fn: Function to be called when daemons are stopped
2827

2828
  """
2829
  feedback_fn("Gathering cluster information")
2830

    
2831
  # This ensures we're running on the master daemon
2832
  cl = GetClient()
2833

    
2834
  (cluster_name, master_node) = \
2835
    cl.QueryConfigValues(["cluster_name", "master_node"])
2836

    
2837
  online_nodes = GetOnlineNodes([], cl=cl)
2838

    
2839
  # Don't keep a reference to the client. The master daemon will go away.
2840
  del cl
2841

    
2842
  assert master_node in online_nodes
2843

    
2844
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2845
                                       online_nodes).Call(fn, *args)
2846

    
2847

    
2848
def GenerateTable(headers, fields, separator, data,
2849
                  numfields=None, unitfields=None,
2850
                  units=None):
2851
  """Prints a table with headers and different fields.
2852

2853
  @type headers: dict
2854
  @param headers: dictionary mapping field names to headers for
2855
      the table
2856
  @type fields: list
2857
  @param fields: the field names corresponding to each row in
2858
      the data field
2859
  @param separator: the separator to be used; if this is None,
2860
      the default 'smart' algorithm is used which computes optimal
2861
      field width, otherwise just the separator is used between
2862
      each field
2863
  @type data: list
2864
  @param data: a list of lists, each sublist being one row to be output
2865
  @type numfields: list
2866
  @param numfields: a list with the fields that hold numeric
2867
      values and thus should be right-aligned
2868
  @type unitfields: list
2869
  @param unitfields: a list with the fields that hold numeric
2870
      values that should be formatted with the units field
2871
  @type units: string or None
2872
  @param units: the units we should use for formatting, or None for
2873
      automatic choice (human-readable for non-separator usage, otherwise
2874
      megabytes); this is a one-letter string
2875

2876
  """
2877
  if units is None:
2878
    if separator:
2879
      units = "m"
2880
    else:
2881
      units = "h"
2882

    
2883
  if numfields is None:
2884
    numfields = []
2885
  if unitfields is None:
2886
    unitfields = []
2887

    
2888
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2889
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2890

    
2891
  format_fields = []
2892
  for field in fields:
2893
    if headers and field not in headers:
2894
      # TODO: handle better unknown fields (either revert to old
2895
      # style of raising exception, or deal more intelligently with
2896
      # variable fields)
2897
      headers[field] = field
2898
    if separator is not None:
2899
      format_fields.append("%s")
2900
    elif numfields.Matches(field):
2901
      format_fields.append("%*s")
2902
    else:
2903
      format_fields.append("%-*s")
2904

    
2905
  if separator is None:
2906
    mlens = [0 for name in fields]
2907
    format_str = " ".join(format_fields)
2908
  else:
2909
    format_str = separator.replace("%", "%%").join(format_fields)
2910

    
2911
  for row in data:
2912
    if row is None:
2913
      continue
2914
    for idx, val in enumerate(row):
2915
      if unitfields.Matches(fields[idx]):
2916
        try:
2917
          val = int(val)
2918
        except (TypeError, ValueError):
2919
          pass
2920
        else:
2921
          val = row[idx] = utils.FormatUnit(val, units)
2922
      val = row[idx] = str(val)
2923
      if separator is None:
2924
        mlens[idx] = max(mlens[idx], len(val))
2925

    
2926
  result = []
2927
  if headers:
2928
    args = []
2929
    for idx, name in enumerate(fields):
2930
      hdr = headers[name]
2931
      if separator is None:
2932
        mlens[idx] = max(mlens[idx], len(hdr))
2933
        args.append(mlens[idx])
2934
      args.append(hdr)
2935
    result.append(format_str % tuple(args))
2936

    
2937
  if separator is None:
2938
    assert len(mlens) == len(fields)
2939

    
2940
    if fields and not numfields.Matches(fields[-1]):
2941
      mlens[-1] = 0
2942

    
2943
  for line in data:
2944
    args = []
2945
    if line is None:
2946
      line = ["-" for _ in fields]
2947
    for idx in range(len(fields)):
2948
      if separator is None:
2949
        args.append(mlens[idx])
2950
      args.append(line[idx])
2951
    result.append(format_str % tuple(args))
2952

    
2953
  return result
2954

    
2955

    
2956
def _FormatBool(value):
2957
  """Formats a boolean value as a string.
2958

2959
  """
2960
  if value:
2961
    return "Y"
2962
  return "N"
2963

    
2964

    
2965
#: Default formatting for query results; (callback, align right)
2966
_DEFAULT_FORMAT_QUERY = {
2967
  constants.QFT_TEXT: (str, False),
2968
  constants.QFT_BOOL: (_FormatBool, False),
2969
  constants.QFT_NUMBER: (str, True),
2970
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2971
  constants.QFT_OTHER: (str, False),
2972
  constants.QFT_UNKNOWN: (str, False),
2973
  }
2974

    
2975

    
2976
def _GetColumnFormatter(fdef, override, unit):
2977
  """Returns formatting function for a field.
2978

2979
  @type fdef: L{objects.QueryFieldDefinition}
2980
  @type override: dict
2981
  @param override: Dictionary for overriding field formatting functions,
2982
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2983
  @type unit: string
2984
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2985
  @rtype: tuple; (callable, bool)
2986
  @return: Returns the function to format a value (takes one parameter) and a
2987
    boolean for aligning the value on the right-hand side
2988

2989
  """
2990
  fmt = override.get(fdef.name, None)
2991
  if fmt is not None:
2992
    return fmt
2993

    
2994
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2995

    
2996
  if fdef.kind == constants.QFT_UNIT:
2997
    # Can't keep this information in the static dictionary
2998
    return (lambda value: utils.FormatUnit(value, unit), True)
2999

    
3000
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3001
  if fmt is not None:
3002
    return fmt
3003

    
3004
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3005

    
3006

    
3007
class _QueryColumnFormatter:
3008
  """Callable class for formatting fields of a query.
3009

3010
  """
3011
  def __init__(self, fn, status_fn, verbose):
3012
    """Initializes this class.
3013

3014
    @type fn: callable
3015
    @param fn: Formatting function
3016
    @type status_fn: callable
3017
    @param status_fn: Function to report fields' status
3018
    @type verbose: boolean
3019
    @param verbose: whether to use verbose field descriptions or not
3020

3021
    """
3022
    self._fn = fn
3023
    self._status_fn = status_fn
3024
    self._verbose = verbose
3025

    
3026
  def __call__(self, data):
3027
    """Returns a field's string representation.
3028

3029
    """
3030
    (status, value) = data
3031

    
3032
    # Report status
3033
    self._status_fn(status)
3034

    
3035
    if status == constants.RS_NORMAL:
3036
      return self._fn(value)
3037

    
3038
    assert value is None, \
3039
           "Found value %r for abnormal status %s" % (value, status)
3040

    
3041
    return FormatResultError(status, self._verbose)
3042

    
3043

    
3044
def FormatResultError(status, verbose):
3045
  """Formats result status other than L{constants.RS_NORMAL}.
3046

3047
  @param status: The result status
3048
  @type verbose: boolean
3049
  @param verbose: Whether to return the verbose text
3050
  @return: Text of result status
3051

3052
  """
3053
  assert status != constants.RS_NORMAL, \
3054
         "FormatResultError called with status equal to constants.RS_NORMAL"
3055
  try:
3056
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3057
  except KeyError:
3058
    raise NotImplementedError("Unknown status %s" % status)
3059
  else:
3060
    if verbose:
3061
      return verbose_text
3062
    return normal_text
3063

    
3064

    
3065
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3066
                      header=False, verbose=False):
3067
  """Formats data in L{objects.QueryResponse}.
3068

3069
  @type result: L{objects.QueryResponse}
3070
  @param result: result of query operation
3071
  @type unit: string
3072
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3073
    see L{utils.text.FormatUnit}
3074
  @type format_override: dict
3075
  @param format_override: Dictionary for overriding field formatting functions,
3076
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3077
  @type separator: string or None
3078
  @param separator: String used to separate fields
3079
  @type header: bool
3080
  @param header: Whether to output header row
3081
  @type verbose: boolean
3082
  @param verbose: whether to use verbose field descriptions or not
3083

3084
  """
3085
  if unit is None:
3086
    if separator:
3087
      unit = "m"
3088
    else:
3089
      unit = "h"
3090

    
3091
  if format_override is None:
3092
    format_override = {}
3093

    
3094
  stats = dict.fromkeys(constants.RS_ALL, 0)
3095

    
3096
  def _RecordStatus(status):
3097
    if status in stats:
3098
      stats[status] += 1
3099

    
3100
  columns = []
3101
  for fdef in result.fields:
3102
    assert fdef.title and fdef.name
3103
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3104
    columns.append(TableColumn(fdef.title,
3105
                               _QueryColumnFormatter(fn, _RecordStatus,
3106
                                                     verbose),
3107
                               align_right))
3108

    
3109
  table = FormatTable(result.data, columns, header, separator)
3110

    
3111
  # Collect statistics
3112
  assert len(stats) == len(constants.RS_ALL)
3113
  assert compat.all(count >= 0 for count in stats.values())
3114

    
3115
  # Determine overall status. If there was no data, unknown fields must be
3116
  # detected via the field definitions.
3117
  if (stats[constants.RS_UNKNOWN] or
3118
      (not result.data and _GetUnknownFields(result.fields))):
3119
    status = QR_UNKNOWN
3120
  elif compat.any(count > 0 for key, count in stats.items()
3121
                  if key != constants.RS_NORMAL):
3122
    status = QR_INCOMPLETE
3123
  else:
3124
    status = QR_NORMAL
3125

    
3126
  return (status, table)
3127

    
3128

    
3129
def _GetUnknownFields(fdefs):
3130
  """Returns list of unknown fields included in C{fdefs}.
3131

3132
  @type fdefs: list of L{objects.QueryFieldDefinition}
3133

3134
  """
3135
  return [fdef for fdef in fdefs
3136
          if fdef.kind == constants.QFT_UNKNOWN]
3137

    
3138

    
3139
def _WarnUnknownFields(fdefs):
3140
  """Prints a warning to stderr if a query included unknown fields.
3141

3142
  @type fdefs: list of L{objects.QueryFieldDefinition}
3143

3144
  """
3145
  unknown = _GetUnknownFields(fdefs)
3146
  if unknown:
3147
    ToStderr("Warning: Queried for unknown fields %s",
3148
             utils.CommaJoin(fdef.name for fdef in unknown))
3149
    return True
3150

    
3151
  return False
3152

    
3153

    
3154
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3155
                format_override=None, verbose=False, force_filter=False,
3156
                namefield=None, qfilter=None, isnumeric=False):
3157
  """Generic implementation for listing all items of a resource.
3158

3159
  @param resource: One of L{constants.QR_VIA_LUXI}
3160
  @type fields: list of strings
3161
  @param fields: List of fields to query for
3162
  @type names: list of strings
3163
  @param names: Names of items to query for
3164
  @type unit: string or None
3165
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3166
    None for automatic choice (human-readable for non-separator usage,
3167
    otherwise megabytes); this is a one-letter string
3168
  @type separator: string or None
3169
  @param separator: String used to separate fields
3170
  @type header: bool
3171
  @param header: Whether to show header row
3172
  @type force_filter: bool
3173
  @param force_filter: Whether to always treat names as filter
3174
  @type format_override: dict
3175
  @param format_override: Dictionary for overriding field formatting functions,
3176
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3177
  @type verbose: boolean
3178
  @param verbose: whether to use verbose field descriptions or not
3179
  @type namefield: string
3180
  @param namefield: Name of field to use for simple filters (see
3181
    L{qlang.MakeFilter} for details)
3182
  @type qfilter: list or None
3183
  @param qfilter: Query filter (in addition to names)
3184
  @param isnumeric: bool
3185
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3186
    any simple filters built by namefield should use integer values to
3187
    reflect that
3188

3189
  """
3190
  if not names:
3191
    names = None
3192

    
3193
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3194
                                isnumeric=isnumeric)
3195

    
3196
  if qfilter is None:
3197
    qfilter = namefilter
3198
  elif namefilter is not None:
3199
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3200

    
3201
  if cl is None:
3202
    cl = GetClient()
3203

    
3204
  response = cl.Query(resource, fields, qfilter)
3205

    
3206
  found_unknown = _WarnUnknownFields(response.fields)
3207

    
3208
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3209
                                     header=header,
3210
                                     format_override=format_override,
3211
                                     verbose=verbose)
3212

    
3213
  for line in data:
3214
    ToStdout(line)
3215

    
3216
  assert ((found_unknown and status == QR_UNKNOWN) or
3217
          (not found_unknown and status != QR_UNKNOWN))
3218

    
3219
  if status == QR_UNKNOWN:
3220
    return constants.EXIT_UNKNOWN_FIELD
3221

    
3222
  # TODO: Should the list command fail if not all data could be collected?
3223
  return constants.EXIT_SUCCESS
3224

    
3225

    
3226
def _FieldDescValues(fdef):
3227
  """Helper function for L{GenericListFields} to get query field description.
3228

3229
  @type fdef: L{objects.QueryFieldDefinition}
3230
  @rtype: list
3231

3232
  """
3233
  return [
3234
    fdef.name,
3235
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3236
    fdef.title,
3237
    fdef.doc,
3238
    ]
3239

    
3240

    
3241
def GenericListFields(resource, fields, separator, header, cl=None):
3242
  """Generic implementation for listing fields for a resource.
3243

3244
  @param resource: One of L{constants.QR_VIA_LUXI}
3245
  @type fields: list of strings
3246
  @param fields: List of fields to query for
3247
  @type separator: string or None
3248
  @param separator: String used to separate fields
3249
  @type header: bool
3250
  @param header: Whether to show header row
3251

3252
  """
3253
  if cl is None:
3254
    cl = GetClient()
3255

    
3256
  if not fields:
3257
    fields = None
3258

    
3259
  response = cl.QueryFields(resource, fields)
3260

    
3261
  found_unknown = _WarnUnknownFields(response.fields)
3262

    
3263
  columns = [
3264
    TableColumn("Name", str, False),
3265
    TableColumn("Type", str, False),
3266
    TableColumn("Title", str, False),
3267
    TableColumn("Description", str, False),
3268
    ]
3269

    
3270
  rows = map(_FieldDescValues, response.fields)
3271

    
3272
  for line in FormatTable(rows, columns, header, separator):
3273
    ToStdout(line)
3274

    
3275
  if found_unknown:
3276
    return constants.EXIT_UNKNOWN_FIELD
3277

    
3278
  return constants.EXIT_SUCCESS
3279

    
3280

    
3281
class TableColumn:
3282
  """Describes a column for L{FormatTable}.
3283

3284
  """
3285
  def __init__(self, title, fn, align_right):
3286
    """Initializes this class.
3287

3288
    @type title: string
3289
    @param title: Column title
3290
    @type fn: callable
3291
    @param fn: Formatting function
3292
    @type align_right: bool
3293
    @param align_right: Whether to align values on the right-hand side
3294

3295
    """
3296
    self.title = title
3297
    self.format = fn
3298
    self.align_right = align_right
3299

    
3300

    
3301
def _GetColFormatString(width, align_right):
3302
  """Returns the format string for a field.
3303

3304
  """
3305
  if align_right:
3306
    sign = ""
3307
  else:
3308
    sign = "-"
3309

    
3310
  return "%%%s%ss" % (sign, width)
3311

    
3312

    
3313
def FormatTable(rows, columns, header, separator):
3314
  """Formats data as a table.
3315

3316
  @type rows: list of lists
3317
  @param rows: Row data, one list per row
3318
  @type columns: list of L{TableColumn}
3319
  @param columns: Column descriptions
3320
  @type header: bool
3321
  @param header: Whether to show header row
3322
  @type separator: string or None
3323
  @param separator: String used to separate columns
3324

3325
  """
3326
  if header:
3327
    data = [[col.title for col in columns]]
3328
    colwidth = [len(col.title) for col in columns]
3329
  else:
3330
    data = []
3331
    colwidth = [0 for _ in columns]
3332

    
3333
  # Format row data
3334
  for row in rows:
3335
    assert len(row) == len(columns)
3336

    
3337
    formatted = [col.format(value) for value, col in zip(row, columns)]
3338

    
3339
    if separator is None:
3340
      # Update column widths
3341
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3342
        # Modifying a list's items while iterating is fine
3343
        colwidth[idx] = max(oldwidth, len(value))
3344

    
3345
    data.append(formatted)
3346

    
3347
  if separator is not None:
3348
    # Return early if a separator is used
3349
    return [separator.join(row) for row in data]
3350

    
3351
  if columns and not columns[-1].align_right:
3352
    # Avoid unnecessary spaces at end of line
3353
    colwidth[-1] = 0
3354

    
3355
  # Build format string
3356
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3357
                  for col, width in zip(columns, colwidth)])
3358

    
3359
  return [fmt % tuple(row) for row in data]
3360

    
3361

    
3362
def FormatTimestamp(ts):
3363
  """Formats a given timestamp.
3364

3365
  @type ts: timestamp
3366
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3367

3368
  @rtype: string
3369
  @return: a string with the formatted timestamp
3370

3371
  """
3372
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3373
    return "?"
3374

    
3375
  (sec, usecs) = ts
3376
  return utils.FormatTime(sec, usecs=usecs)
3377

    
3378

    
3379
def ParseTimespec(value):
3380
  """Parse a time specification.
3381

3382
  The following suffixed will be recognized:
3383

3384
    - s: seconds
3385
    - m: minutes
3386
    - h: hours
3387
    - d: day
3388
    - w: weeks
3389

3390
  Without any suffix, the value will be taken to be in seconds.
3391

3392
  """
3393
  value = str(value)
3394
  if not value:
3395
    raise errors.OpPrereqError("Empty time specification passed",
3396
                               errors.ECODE_INVAL)
3397
  suffix_map = {
3398
    "s": 1,
3399
    "m": 60,
3400
    "h": 3600,
3401
    "d": 86400,
3402
    "w": 604800,
3403
    }
3404
  if value[-1] not in suffix_map:
3405
    try:
3406
      value = int(value)
3407
    except (TypeError, ValueError):
3408
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3409
                                 errors.ECODE_INVAL)
3410
  else:
3411
    multiplier = suffix_map[value[-1]]
3412
    value = value[:-1]
3413
    if not value: # no data left after stripping the suffix
3414
      raise errors.OpPrereqError("Invalid time specification (only"
3415
                                 " suffix passed)", errors.ECODE_INVAL)
3416
    try:
3417
      value = int(value) * multiplier
3418
    except (TypeError, ValueError):
3419
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3420
                                 errors.ECODE_INVAL)
3421
  return value
3422

    
3423

    
3424
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3425
                   filter_master=False, nodegroup=None):
3426
  """Returns the names of online nodes.
3427

3428
  This function will also log a warning on stderr with the names of
3429
  the online nodes.
3430

3431
  @param nodes: if not empty, use only this subset of nodes (minus the
3432
      offline ones)
3433
  @param cl: if not None, luxi client to use
3434
  @type nowarn: boolean
3435
  @param nowarn: by default, this function will output a note with the
3436
      offline nodes that are skipped; if this parameter is True the
3437
      note is not displayed
3438
  @type secondary_ips: boolean
3439
  @param secondary_ips: if True, return the secondary IPs instead of the
3440
      names, useful for doing network traffic over the replication interface
3441
      (if any)
3442
  @type filter_master: boolean
3443
  @param filter_master: if True, do not return the master node in the list
3444
      (useful in coordination with secondary_ips where we cannot check our
3445
      node name against the list)
3446
  @type nodegroup: string
3447
  @param nodegroup: If set, only return nodes in this node group
3448

3449
  """
3450
  if cl is None:
3451
    cl = GetClient()
3452

    
3453
  qfilter = []
3454

    
3455
  if nodes:
3456
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3457

    
3458
  if nodegroup is not None:
3459
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3460
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3461

    
3462
  if filter_master:
3463
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3464

    
3465
  if qfilter:
3466
    if len(qfilter) > 1:
3467
      final_filter = [qlang.OP_AND] + qfilter
3468
    else:
3469
      assert len(qfilter) == 1
3470
      final_filter = qfilter[0]
3471
  else:
3472
    final_filter = None
3473

    
3474
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3475

    
3476
  def _IsOffline(row):
3477
    (_, (_, offline), _) = row
3478
    return offline
3479

    
3480
  def _GetName(row):
3481
    ((_, name), _, _) = row
3482
    return name
3483

    
3484
  def _GetSip(row):
3485
    (_, _, (_, sip)) = row
3486
    return sip
3487

    
3488
  (offline, online) = compat.partition(result.data, _IsOffline)
3489

    
3490
  if offline and not nowarn:
3491
    ToStderr("Note: skipping offline node(s): %s" %
3492
             utils.CommaJoin(map(_GetName, offline)))
3493

    
3494
  if secondary_ips:
3495
    fn = _GetSip
3496
  else:
3497
    fn = _GetName
3498

    
3499
  return map(fn, online)
3500

    
3501

    
3502
def _ToStream(stream, txt, *args):
3503
  """Write a message to a stream, bypassing the logging system
3504

3505
  @type stream: file object
3506
  @param stream: the file to which we should write
3507
  @type txt: str
3508
  @param txt: the message
3509

3510
  """
3511
  try:
3512
    if args:
3513
      args = tuple(args)
3514
      stream.write(txt % args)
3515
    else:
3516
      stream.write(txt)
3517
    stream.write("\n")
3518
    stream.flush()
3519
  except IOError, err:
3520
    if err.errno == errno.EPIPE:
3521
      # our terminal went away, we'll exit
3522
      sys.exit(constants.EXIT_FAILURE)
3523
    else:
3524
      raise
3525

    
3526

    
3527
def ToStdout(txt, *args):
3528
  """Write a message to stdout only, bypassing the logging system
3529

3530
  This is just a wrapper over _ToStream.
3531

3532
  @type txt: str
3533
  @param txt: the message
3534

3535
  """
3536
  _ToStream(sys.stdout, txt, *args)
3537

    
3538

    
3539
def ToStderr(txt, *args):
3540
  """Write a message to stderr only, bypassing the logging system
3541

3542
  This is just a wrapper over _ToStream.
3543

3544
  @type txt: str
3545
  @param txt: the message
3546

3547
  """
3548
  _ToStream(sys.stderr, txt, *args)
3549

    
3550

    
3551
class JobExecutor(object):
3552
  """Class which manages the submission and execution of multiple jobs.
3553

3554
  Note that instances of this class should not be reused between
3555
  GetResults() calls.
3556

3557
  """
3558
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3559
    self.queue = []
3560
    if cl is None:
3561
      cl = GetClient()
3562
    self.cl = cl
3563
    self.verbose = verbose
3564
    self.jobs = []
3565
    self.opts = opts
3566
    self.feedback_fn = feedback_fn
3567
    self._counter = itertools.count()
3568

    
3569
  @staticmethod
3570
  def _IfName(name, fmt):
3571
    """Helper function for formatting name.
3572

3573
    """
3574
    if name:
3575
      return fmt % name
3576

    
3577
    return ""
3578

    
3579
  def QueueJob(self, name, *ops):
3580
    """Record a job for later submit.
3581

3582
    @type name: string
3583
    @param name: a description of the job, will be used in WaitJobSet
3584

3585
    """
3586
    SetGenericOpcodeOpts(ops, self.opts)
3587
    self.queue.append((self._counter.next(), name, ops))
3588

    
3589
  def AddJobId(self, name, status, job_id):
3590
    """Adds a job ID to the internal queue.
3591

3592
    """
3593
    self.jobs.append((self._counter.next(), status, job_id, name))
3594

    
3595
  def SubmitPending(self, each=False):
3596
    """Submit all pending jobs.
3597

3598
    """
3599
    if each:
3600
      results = []
3601
      for (_, _, ops) in self.queue:
3602
        # SubmitJob will remove the success status, but raise an exception if
3603
        # the submission fails, so we'll notice that anyway.
3604
        results.append([True, self.cl.SubmitJob(ops)[0]])
3605
    else:
3606
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3607
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3608
      self.jobs.append((idx, status, data, name))
3609

    
3610
  def _ChooseJob(self):
3611
    """Choose a non-waiting/queued job to poll next.
3612

3613
    """
3614
    assert self.jobs, "_ChooseJob called with empty job list"
3615

    
3616
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3617
                               ["status"])
3618
    assert result
3619

    
3620
    for job_data, status in zip(self.jobs, result):
3621
      if (isinstance(status, list) and status and
3622
          status[0] in (constants.JOB_STATUS_QUEUED,
3623
                        constants.JOB_STATUS_WAITING,
3624
                        constants.JOB_STATUS_CANCELING)):
3625
        # job is still present and waiting
3626
        continue
3627
      # good candidate found (either running job or lost job)
3628
      self.jobs.remove(job_data)
3629
      return job_data
3630

    
3631
    # no job found
3632
    return self.jobs.pop(0)
3633

    
3634
  def GetResults(self):
3635
    """Wait for and return the results of all jobs.
3636

3637
    @rtype: list
3638
    @return: list of tuples (success, job results), in the same order
3639
        as the submitted jobs; if a job has failed, instead of the result
3640
        there will be the error message
3641

3642
    """
3643
    if not self.jobs:
3644
      self.SubmitPending()
3645
    results = []
3646
    if self.verbose:
3647
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3648
      if ok_jobs:
3649
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3650

    
3651
    # first, remove any non-submitted jobs
3652
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3653
    for idx, _, jid, name in failures:
3654
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3655
      results.append((idx, False, jid))
3656

    
3657
    while self.jobs:
3658
      (idx, _, jid, name) = self._ChooseJob()
3659
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3660
      try:
3661
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3662
        success = True
3663
      except errors.JobLost, err:
3664
        _, job_result = FormatError(err)
3665
        ToStderr("Job %s%s has been archived, cannot check its result",
3666
                 jid, self._IfName(name, " for %s"))
3667
        success = False
3668
      except (errors.GenericError, luxi.ProtocolError), err:
3669
        _, job_result = FormatError(err)
3670
        success = False
3671
        # the error message will always be shown, verbose or not
3672
        ToStderr("Job %s%s has failed: %s",
3673
                 jid, self._IfName(name, " for %s"), job_result)
3674

    
3675
      results.append((idx, success, job_result))
3676

    
3677
    # sort based on the index, then drop it
3678
    results.sort()
3679
    results = [i[1:] for i in results]
3680

    
3681
    return results
3682

    
3683
  def WaitOrShow(self, wait):
3684
    """Wait for job results or only print the job IDs.
3685

3686
    @type wait: boolean
3687
    @param wait: whether to wait or not
3688

3689
    """
3690
    if wait:
3691
      return self.GetResults()
3692
    else:
3693
      if not self.jobs:
3694
        self.SubmitPending()
3695
      for _, status, result, name in self.jobs:
3696
        if status:
3697
          ToStdout("%s: %s", result, name)
3698
        else:
3699
          ToStderr("Failure for %s: %s", name, result)
3700
      return [row[1:3] for row in self.jobs]
3701

    
3702

    
3703
def FormatParamsDictInfo(param_dict, actual):
3704
  """Formats a parameter dictionary.
3705

3706
  @type param_dict: dict
3707
  @param param_dict: the own parameters
3708
  @type actual: dict
3709
  @param actual: the current parameter set (including defaults)
3710
  @rtype: dict
3711
  @return: dictionary where the value of each parameter is either a fully
3712
      formatted string or a dictionary containing formatted strings
3713

3714
  """
3715
  ret = {}
3716
  for (key, data) in actual.items():
3717
    if isinstance(data, dict) and data:
3718
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3719
    else:
3720
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3721
  return ret
3722

    
3723

    
3724
def _FormatListInfoDefault(data, def_data):
3725
  if data is not None:
3726
    ret = utils.CommaJoin(data)
3727
  else:
3728
    ret = "default (%s)" % utils.CommaJoin(def_data)
3729
  return ret
3730

    
3731

    
3732
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3733
  """Formats an instance policy.
3734

3735
  @type custom_ipolicy: dict
3736
  @param custom_ipolicy: own policy
3737
  @type eff_ipolicy: dict
3738
  @param eff_ipolicy: effective policy (including defaults); ignored for
3739
      cluster
3740
  @type iscluster: bool
3741
  @param iscluster: the policy is at cluster level
3742
  @rtype: list of pairs
3743
  @return: formatted data, suitable for L{PrintGenericInfo}
3744

3745
  """
3746
  if iscluster:
3747
    eff_ipolicy = custom_ipolicy
3748

    
3749
  minmax_out = []
3750
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3751
  if custom_minmax:
3752
    for (k, minmax) in enumerate(custom_minmax):
3753
      minmax_out.append([
3754
        ("%s/%s" % (key, k),
3755
         FormatParamsDictInfo(minmax[key], minmax[key]))
3756
        for key in constants.ISPECS_MINMAX_KEYS
3757
        ])
3758
  else:
3759
    for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3760
      minmax_out.append([
3761
        ("%s/%s" % (key, k),
3762
         FormatParamsDictInfo({}, minmax[key]))
3763
        for key in constants.ISPECS_MINMAX_KEYS
3764
        ])
3765
  ret = [("bounds specs", minmax_out)]
3766

    
3767
  if iscluster:
3768
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3769
    ret.append(
3770
      (constants.ISPECS_STD,
3771
       FormatParamsDictInfo(stdspecs, stdspecs))
3772
      )
3773

    
3774
  ret.append(
3775
    ("allowed disk templates",
3776
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3777
                            eff_ipolicy[constants.IPOLICY_DTS]))
3778
    )
3779
  ret.extend([
3780
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3781
    for key in constants.IPOLICY_PARAMETERS
3782
    ])
3783
  return ret
3784

    
3785

    
3786
def _PrintSpecsParameters(buf, specs):
3787
  values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3788
  buf.write(",".join(values))
3789

    
3790

    
3791
def PrintIPolicyCommand(buf, ipolicy, isgroup):
3792
  """Print the command option used to generate the given instance policy.
3793

3794
  Currently only the parts dealing with specs are supported.
3795

3796
  @type buf: StringIO
3797
  @param buf: stream to write into
3798
  @type ipolicy: dict
3799
  @param ipolicy: instance policy
3800
  @type isgroup: bool
3801
  @param isgroup: whether the policy is at group level
3802

3803
  """
3804
  if not isgroup:
3805
    stdspecs = ipolicy.get("std")
3806
    if stdspecs:
3807
      buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3808
      _PrintSpecsParameters(buf, stdspecs)
3809
  minmaxes = ipolicy.get("minmax", [])
3810
  first = True
3811
  for minmax in minmaxes:
3812
    minspecs = minmax.get("min")
3813
    maxspecs = minmax.get("max")
3814
    if minspecs and maxspecs:
3815
      if first:
3816
        buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3817
        first = False
3818
      else:
3819
        buf.write("//")
3820
      buf.write("min:")
3821
      _PrintSpecsParameters(buf, minspecs)
3822
      buf.write("/max:")
3823
      _PrintSpecsParameters(buf, maxspecs)
3824

    
3825

    
3826
def ConfirmOperation(names, list_type, text, extra=""):
3827
  """Ask the user to confirm an operation on a list of list_type.
3828

3829
  This function is used to request confirmation for doing an operation
3830
  on a given list of list_type.
3831

3832
  @type names: list
3833
  @param names: the list of names that we display when
3834
      we ask for confirmation
3835
  @type list_type: str
3836
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3837
  @type text: str
3838
  @param text: the operation that the user should confirm
3839
  @rtype: boolean
3840
  @return: True or False depending on user's confirmation.
3841

3842
  """
3843
  count = len(names)
3844
  msg = ("The %s will operate on %d %s.\n%s"
3845
         "Do you want to continue?" % (text, count, list_type, extra))
3846
  affected = (("\nAffected %s:\n" % list_type) +
3847
              "\n".join(["  %s" % name for name in names]))
3848

    
3849
  choices = [("y", True, "Yes, execute the %s" % text),
3850
             ("n", False, "No, abort the %s" % text)]
3851

    
3852
  if count > 20:
3853
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3854
    question = msg
3855
  else:
3856
    question = msg + affected
3857

    
3858
  choice = AskUser(question, choices)
3859
  if choice == "v":
3860
    choices.pop(1)
3861
    choice = AskUser(msg + affected, choices)
3862
  return choice
3863

    
3864

    
3865
def _MaybeParseUnit(elements):
3866
  """Parses and returns an array of potential values with units.
3867

3868
  """
3869
  parsed = {}
3870
  for k, v in elements.items():
3871
    if v == constants.VALUE_DEFAULT:
3872
      parsed[k] = v
3873
    else:
3874
      parsed[k] = utils.ParseUnit(v)
3875
  return parsed
3876

    
3877

    
3878
def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3879
                             ispecs_disk_count, ispecs_disk_size,
3880
                             ispecs_nic_count, group_ipolicy, fill_all):
3881
  try:
3882
    if ispecs_mem_size:
3883
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3884
    if ispecs_disk_size:
3885
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3886
  except (TypeError, ValueError, errors.UnitParseError), err:
3887
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3888
                               " in policy: %s" %
3889
                               (ispecs_disk_size, ispecs_mem_size, err),
3890
                               errors.ECODE_INVAL)
3891

    
3892
  # prepare ipolicy dict
3893
  ispecs_transposed = {
3894
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3895
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3896
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3897
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3898
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3899
    }
3900

    
3901
  # first, check that the values given are correct
3902
  if group_ipolicy:
3903
    forced_type = TISPECS_GROUP_TYPES
3904
  else:
3905
    forced_type = TISPECS_CLUSTER_TYPES
3906
  for specs in ispecs_transposed.values():
3907
    assert type(specs) is dict
3908
    utils.ForceDictType(specs, forced_type)
3909

    
3910
  # then transpose
3911
  ispecs = {
3912
    constants.ISPECS_MIN: {},
3913
    constants.ISPECS_MAX: {},
3914
    constants.ISPECS_STD: {},
3915
    }
3916
  for (name, specs) in ispecs_transposed.iteritems():
3917
    assert name in constants.ISPECS_PARAMETERS
3918
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3919
      assert key in ispecs
3920
      ispecs[key][name] = val
3921
  minmax_out = {}
3922
  for key in constants.ISPECS_MINMAX_KEYS:
3923
    if fill_all:
3924
      minmax_out[key] = \
3925
        objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3926
    else:
3927
      minmax_out[key] = ispecs[key]
3928
  ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3929
  if fill_all:
3930
    ipolicy[constants.ISPECS_STD] = \
3931
        objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3932
                         ispecs[constants.ISPECS_STD])
3933
  else:
3934
    ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3935

    
3936

    
3937
def _ParseSpecUnit(spec, keyname):
3938
  ret = spec.copy()
3939
  for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3940
    if k in ret:
3941
      try:
3942
        ret[k] = utils.ParseUnit(ret[k])
3943
      except (TypeError, ValueError, errors.UnitParseError), err:
3944
        raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3945
                                    " specs: %s" % (k, ret[k], keyname, err)),
3946
                                   errors.ECODE_INVAL)
3947
  return ret
3948

    
3949

    
3950
def _ParseISpec(spec, keyname, required):
3951
  ret = _ParseSpecUnit(spec, keyname)
3952
  utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3953
  missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3954
  if required and missing:
3955
    raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3956
                               (keyname, utils.CommaJoin(missing)),
3957
                               errors.ECODE_INVAL)
3958
  return ret
3959

    
3960

    
3961
def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
3962
  ret = None
3963
  if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
3964
      len(minmax_ispecs[0]) == 1):
3965
    for (key, spec) in minmax_ispecs[0].items():
3966
      # This loop is executed exactly once
3967
      if key in allowed_values and not spec:
3968
        ret = key
3969
  return ret
3970

    
3971

    
3972
def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
3973
                            group_ipolicy, allowed_values):
3974
  found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
3975
  if found_allowed is not None:
3976
    ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
3977
  elif minmax_ispecs is not None:
3978
    minmax_out = []
3979
    for mmpair in minmax_ispecs:
3980
      mmpair_out = {}
3981
      for (key, spec) in mmpair.items():
3982
        if key not in constants.ISPECS_MINMAX_KEYS:
3983
          msg = "Invalid key in bounds instance specifications: %s" % key
3984
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3985
        mmpair_out[key] = _ParseISpec(spec, key, True)
3986
      minmax_out.append(mmpair_out)
3987
    ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
3988
  if std_ispecs is not None:
3989
    assert not group_ipolicy # This is not an option for gnt-group
3990
    ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
3991

    
3992

    
3993
def CreateIPolicyFromOpts(ispecs_mem_size=None,
3994
                          ispecs_cpu_count=None,
3995
                          ispecs_disk_count=None,
3996
                          ispecs_disk_size=None,
3997
                          ispecs_nic_count=None,
3998
                          minmax_ispecs=None,
3999
                          std_ispecs=None,
4000
                          ipolicy_disk_templates=None,
4001
                          ipolicy_vcpu_ratio=None,
4002
                          ipolicy_spindle_ratio=None,
4003
                          group_ipolicy=False,
4004
                          allowed_values=None,
4005
                          fill_all=False):
4006
  """Creation of instance policy based on command line options.
4007

4008
  @param fill_all: whether for cluster policies we should ensure that
4009
    all values are filled
4010

4011
  """
4012
  assert not (fill_all and allowed_values)
4013

    
4014
  split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4015
                 ispecs_disk_size or ispecs_nic_count)
4016
  if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4017
    raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4018
                               " together with any --ipolicy-xxx-specs option",
4019
                               errors.ECODE_INVAL)
4020

    
4021
  ipolicy_out = objects.MakeEmptyIPolicy()
4022
  if split_specs:
4023
    assert fill_all
4024
    _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4025
                             ispecs_disk_count, ispecs_disk_size,
4026
                             ispecs_nic_count, group_ipolicy, fill_all)
4027
  elif (minmax_ispecs is not None or std_ispecs is not None):
4028
    _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4029
                            group_ipolicy, allowed_values)
4030

    
4031
  if ipolicy_disk_templates is not None:
4032
    if allowed_values and ipolicy_disk_templates in allowed_values:
4033
      ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4034
    else:
4035
      ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4036
  if ipolicy_vcpu_ratio is not None:
4037
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4038
  if ipolicy_spindle_ratio is not None:
4039
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4040

    
4041
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4042

    
4043
  if not group_ipolicy and fill_all:
4044
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4045

    
4046
  return ipolicy_out
4047

    
4048

    
4049
def _SerializeGenericInfo(buf, data, level, afterkey=False):
4050
  """Formatting core of L{PrintGenericInfo}.
4051

4052
  @param buf: (string) stream to accumulate the result into
4053
  @param data: data to format
4054
  @type level: int
4055
  @param level: depth in the data hierarchy, used for indenting
4056
  @type afterkey: bool
4057
  @param afterkey: True when we are in the middle of a line after a key (used
4058
      to properly add newlines or indentation)
4059

4060
  """
4061
  baseind = "  "
4062
  if isinstance(data, dict):
4063
    if not data:
4064
      buf.write("\n")
4065
    else:
4066
      if afterkey:
4067
        buf.write("\n")
4068
        doindent = True
4069
      else:
4070
        doindent = False
4071
      for key in sorted(data):
4072
        if doindent:
4073
          buf.write(baseind * level)
4074
        else:
4075
          doindent = True
4076
        buf.write(key)
4077
        buf.write(": ")
4078
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4079
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4080
    # list of tuples (an ordered dictionary)
4081
    if afterkey:
4082
      buf.write("\n")
4083
      doindent = True
4084
    else:
4085
      doindent = False
4086
    for (key, val) in data:
4087
      if doindent:
4088
        buf.write(baseind * level)
4089
      else:
4090
        doindent = True
4091
      buf.write(key)
4092
      buf.write(": ")
4093
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4094
  elif isinstance(data, list):
4095
    if not data:
4096
      buf.write("\n")
4097
    else:
4098
      if afterkey:
4099
        buf.write("\n")
4100
        doindent = True
4101
      else:
4102
        doindent = False
4103
      for item in data:
4104
        if doindent:
4105
          buf.write(baseind * level)
4106
        else:
4107
          doindent = True
4108
        buf.write("-")
4109
        buf.write(baseind[1:])
4110
        _SerializeGenericInfo(buf, item, level + 1)
4111
  else:
4112
    # This branch should be only taken for strings, but it's practically
4113
    # impossible to guarantee that no other types are produced somewhere
4114
    buf.write(str(data))
4115
    buf.write("\n")
4116

    
4117

    
4118
def PrintGenericInfo(data):
4119
  """Print information formatted according to the hierarchy.
4120

4121
  The output is a valid YAML string.
4122

4123
  @param data: the data to print. It's a hierarchical structure whose elements
4124
      can be:
4125
        - dictionaries, where keys are strings and values are of any of the
4126
          types listed here
4127
        - lists of pairs (key, value), where key is a string and value is of
4128
          any of the types listed here; it's a way to encode ordered
4129
          dictionaries
4130
        - lists of any of the types listed here
4131
        - strings
4132

4133
  """
4134
  buf = StringIO()
4135
  _SerializeGenericInfo(buf, data, 0)
4136
  ToStdout(buf.getvalue().rstrip("\n"))