Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 53919782

History | View | Annotate | Download (82.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42

    
43
from optparse import (OptionParser, TitledHelpFormatter,
44
                      Option, OptionValueError)
45

    
46

    
47
__all__ = [
48
  # Command line options
49
  "ADD_UIDS_OPT",
50
  "ALLOCATABLE_OPT",
51
  "ALL_OPT",
52
  "AUTO_PROMOTE_OPT",
53
  "AUTO_REPLACE_OPT",
54
  "BACKEND_OPT",
55
  "BLK_OS_OPT",
56
  "CAPAB_MASTER_OPT",
57
  "CAPAB_VM_OPT",
58
  "CLEANUP_OPT",
59
  "CLUSTER_DOMAIN_SECRET_OPT",
60
  "CONFIRM_OPT",
61
  "CP_SIZE_OPT",
62
  "DEBUG_OPT",
63
  "DEBUG_SIMERR_OPT",
64
  "DISKIDX_OPT",
65
  "DISK_OPT",
66
  "DISK_TEMPLATE_OPT",
67
  "DRAINED_OPT",
68
  "DRY_RUN_OPT",
69
  "DRBD_HELPER_OPT",
70
  "EARLY_RELEASE_OPT",
71
  "ENABLED_HV_OPT",
72
  "ERROR_CODES_OPT",
73
  "FIELDS_OPT",
74
  "FILESTORE_DIR_OPT",
75
  "FILESTORE_DRIVER_OPT",
76
  "FORCE_OPT",
77
  "FORCE_VARIANT_OPT",
78
  "GLOBAL_FILEDIR_OPT",
79
  "HID_OS_OPT",
80
  "HVLIST_OPT",
81
  "HVOPTS_OPT",
82
  "HYPERVISOR_OPT",
83
  "IALLOCATOR_OPT",
84
  "DEFAULT_IALLOCATOR_OPT",
85
  "IDENTIFY_DEFAULTS_OPT",
86
  "IGNORE_CONSIST_OPT",
87
  "IGNORE_FAILURES_OPT",
88
  "IGNORE_OFFLINE_OPT",
89
  "IGNORE_REMOVE_FAILURES_OPT",
90
  "IGNORE_SECONDARIES_OPT",
91
  "IGNORE_SIZE_OPT",
92
  "INTERVAL_OPT",
93
  "MAC_PREFIX_OPT",
94
  "MAINTAIN_NODE_HEALTH_OPT",
95
  "MASTER_NETDEV_OPT",
96
  "MC_OPT",
97
  "MIGRATION_MODE_OPT",
98
  "NET_OPT",
99
  "NEW_CLUSTER_CERT_OPT",
100
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
101
  "NEW_CONFD_HMAC_KEY_OPT",
102
  "NEW_RAPI_CERT_OPT",
103
  "NEW_SECONDARY_OPT",
104
  "NIC_PARAMS_OPT",
105
  "NODE_LIST_OPT",
106
  "NODE_PLACEMENT_OPT",
107
  "NODEGROUP_OPT",
108
  "NODRBD_STORAGE_OPT",
109
  "NOHDR_OPT",
110
  "NOIPCHECK_OPT",
111
  "NO_INSTALL_OPT",
112
  "NONAMECHECK_OPT",
113
  "NOLVM_STORAGE_OPT",
114
  "NOMODIFY_ETCHOSTS_OPT",
115
  "NOMODIFY_SSH_SETUP_OPT",
116
  "NONICS_OPT",
117
  "NONLIVE_OPT",
118
  "NONPLUS1_OPT",
119
  "NOSHUTDOWN_OPT",
120
  "NOSTART_OPT",
121
  "NOSSH_KEYCHECK_OPT",
122
  "NOVOTING_OPT",
123
  "NWSYNC_OPT",
124
  "ON_PRIMARY_OPT",
125
  "ON_SECONDARY_OPT",
126
  "OFFLINE_OPT",
127
  "OSPARAMS_OPT",
128
  "OS_OPT",
129
  "OS_SIZE_OPT",
130
  "PREALLOC_WIPE_DISKS_OPT",
131
  "PRIMARY_IP_VERSION_OPT",
132
  "PRIORITY_OPT",
133
  "RAPI_CERT_OPT",
134
  "READD_OPT",
135
  "REBOOT_TYPE_OPT",
136
  "REMOVE_INSTANCE_OPT",
137
  "REMOVE_UIDS_OPT",
138
  "RESERVED_LVS_OPT",
139
  "ROMAN_OPT",
140
  "SECONDARY_IP_OPT",
141
  "SELECT_OS_OPT",
142
  "SEP_OPT",
143
  "SHOWCMD_OPT",
144
  "SHUTDOWN_TIMEOUT_OPT",
145
  "SINGLE_NODE_OPT",
146
  "SRC_DIR_OPT",
147
  "SRC_NODE_OPT",
148
  "SUBMIT_OPT",
149
  "STATIC_OPT",
150
  "SYNC_OPT",
151
  "TAG_SRC_OPT",
152
  "TIMEOUT_OPT",
153
  "UIDPOOL_OPT",
154
  "USEUNITS_OPT",
155
  "USE_REPL_NET_OPT",
156
  "VERBOSE_OPT",
157
  "VG_NAME_OPT",
158
  "YES_DOIT_OPT",
159
  # Generic functions for CLI programs
160
  "GenericMain",
161
  "GenericInstanceCreate",
162
  "GetClient",
163
  "GetOnlineNodes",
164
  "JobExecutor",
165
  "JobSubmittedException",
166
  "ParseTimespec",
167
  "RunWhileClusterStopped",
168
  "SubmitOpCode",
169
  "SubmitOrSend",
170
  "UsesRPC",
171
  # Formatting functions
172
  "ToStderr", "ToStdout",
173
  "FormatError",
174
  "GenerateTable",
175
  "AskUser",
176
  "FormatTimestamp",
177
  "FormatLogMessage",
178
  # Tags functions
179
  "ListTags",
180
  "AddTags",
181
  "RemoveTags",
182
  # command line options support infrastructure
183
  "ARGS_MANY_INSTANCES",
184
  "ARGS_MANY_NODES",
185
  "ARGS_NONE",
186
  "ARGS_ONE_INSTANCE",
187
  "ARGS_ONE_NODE",
188
  "ARGS_ONE_OS",
189
  "ArgChoice",
190
  "ArgCommand",
191
  "ArgFile",
192
  "ArgHost",
193
  "ArgInstance",
194
  "ArgJobId",
195
  "ArgNode",
196
  "ArgOs",
197
  "ArgSuggest",
198
  "ArgUnknown",
199
  "OPT_COMPL_INST_ADD_NODES",
200
  "OPT_COMPL_MANY_NODES",
201
  "OPT_COMPL_ONE_IALLOCATOR",
202
  "OPT_COMPL_ONE_INSTANCE",
203
  "OPT_COMPL_ONE_NODE",
204
  "OPT_COMPL_ONE_NODEGROUP",
205
  "OPT_COMPL_ONE_OS",
206
  "cli_option",
207
  "SplitNodeOption",
208
  "CalculateOSNames",
209
  "ParseFields",
210
  ]
211

    
212
NO_PREFIX = "no_"
213
UN_PREFIX = "-"
214

    
215
#: Priorities (sorted)
216
_PRIORITY_NAMES = [
217
  ("low", constants.OP_PRIO_LOW),
218
  ("normal", constants.OP_PRIO_NORMAL),
219
  ("high", constants.OP_PRIO_HIGH),
220
  ]
221

    
222
#: Priority dictionary for easier lookup
223
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
224
# we migrate to Python 2.6
225
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
226

    
227

    
228
class _Argument:
229
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
230
    self.min = min
231
    self.max = max
232

    
233
  def __repr__(self):
234
    return ("<%s min=%s max=%s>" %
235
            (self.__class__.__name__, self.min, self.max))
236

    
237

    
238
class ArgSuggest(_Argument):
239
  """Suggesting argument.
240

241
  Value can be any of the ones passed to the constructor.
242

243
  """
244
  # pylint: disable-msg=W0622
245
  def __init__(self, min=0, max=None, choices=None):
246
    _Argument.__init__(self, min=min, max=max)
247
    self.choices = choices
248

    
249
  def __repr__(self):
250
    return ("<%s min=%s max=%s choices=%r>" %
251
            (self.__class__.__name__, self.min, self.max, self.choices))
252

    
253

    
254
class ArgChoice(ArgSuggest):
255
  """Choice argument.
256

257
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
258
  but value must be one of the choices.
259

260
  """
261

    
262

    
263
class ArgUnknown(_Argument):
264
  """Unknown argument to program (e.g. determined at runtime).
265

266
  """
267

    
268

    
269
class ArgInstance(_Argument):
270
  """Instances argument.
271

272
  """
273

    
274

    
275
class ArgNode(_Argument):
276
  """Node argument.
277

278
  """
279

    
280
class ArgJobId(_Argument):
281
  """Job ID argument.
282

283
  """
284

    
285

    
286
class ArgFile(_Argument):
287
  """File path argument.
288

289
  """
290

    
291

    
292
class ArgCommand(_Argument):
293
  """Command argument.
294

295
  """
296

    
297

    
298
class ArgHost(_Argument):
299
  """Host argument.
300

301
  """
302

    
303

    
304
class ArgOs(_Argument):
305
  """OS argument.
306

307
  """
308

    
309

    
310
ARGS_NONE = []
311
ARGS_MANY_INSTANCES = [ArgInstance()]
312
ARGS_MANY_NODES = [ArgNode()]
313
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
314
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
315
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
316

    
317

    
318
def _ExtractTagsObject(opts, args):
319
  """Extract the tag type object.
320

321
  Note that this function will modify its args parameter.
322

323
  """
324
  if not hasattr(opts, "tag_type"):
325
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
326
  kind = opts.tag_type
327
  if kind == constants.TAG_CLUSTER:
328
    retval = kind, kind
329
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
330
    if not args:
331
      raise errors.OpPrereqError("no arguments passed to the command")
332
    name = args.pop(0)
333
    retval = kind, name
334
  else:
335
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
336
  return retval
337

    
338

    
339
def _ExtendTags(opts, args):
340
  """Extend the args if a source file has been given.
341

342
  This function will extend the tags with the contents of the file
343
  passed in the 'tags_source' attribute of the opts parameter. A file
344
  named '-' will be replaced by stdin.
345

346
  """
347
  fname = opts.tags_source
348
  if fname is None:
349
    return
350
  if fname == "-":
351
    new_fh = sys.stdin
352
  else:
353
    new_fh = open(fname, "r")
354
  new_data = []
355
  try:
356
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
357
    # because of python bug 1633941
358
    while True:
359
      line = new_fh.readline()
360
      if not line:
361
        break
362
      new_data.append(line.strip())
363
  finally:
364
    new_fh.close()
365
  args.extend(new_data)
366

    
367

    
368
def ListTags(opts, args):
369
  """List the tags on a given object.
370

371
  This is a generic implementation that knows how to deal with all
372
  three cases of tag objects (cluster, node, instance). The opts
373
  argument is expected to contain a tag_type field denoting what
374
  object type we work on.
375

376
  """
377
  kind, name = _ExtractTagsObject(opts, args)
378
  cl = GetClient()
379
  result = cl.QueryTags(kind, name)
380
  result = list(result)
381
  result.sort()
382
  for tag in result:
383
    ToStdout(tag)
384

    
385

    
386
def AddTags(opts, args):
387
  """Add tags on a given object.
388

389
  This is a generic implementation that knows how to deal with all
390
  three cases of tag objects (cluster, node, instance). The opts
391
  argument is expected to contain a tag_type field denoting what
392
  object type we work on.
393

394
  """
395
  kind, name = _ExtractTagsObject(opts, args)
396
  _ExtendTags(opts, args)
397
  if not args:
398
    raise errors.OpPrereqError("No tags to be added")
399
  op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
400
  SubmitOpCode(op, opts=opts)
401

    
402

    
403
def RemoveTags(opts, args):
404
  """Remove tags from a given object.
405

406
  This is a generic implementation that knows how to deal with all
407
  three cases of tag objects (cluster, node, instance). The opts
408
  argument is expected to contain a tag_type field denoting what
409
  object type we work on.
410

411
  """
412
  kind, name = _ExtractTagsObject(opts, args)
413
  _ExtendTags(opts, args)
414
  if not args:
415
    raise errors.OpPrereqError("No tags to be removed")
416
  op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
417
  SubmitOpCode(op, opts=opts)
418

    
419

    
420
def check_unit(option, opt, value): # pylint: disable-msg=W0613
421
  """OptParsers custom converter for units.
422

423
  """
424
  try:
425
    return utils.ParseUnit(value)
426
  except errors.UnitParseError, err:
427
    raise OptionValueError("option %s: %s" % (opt, err))
428

    
429

    
430
def _SplitKeyVal(opt, data):
431
  """Convert a KeyVal string into a dict.
432

433
  This function will convert a key=val[,...] string into a dict. Empty
434
  values will be converted specially: keys which have the prefix 'no_'
435
  will have the value=False and the prefix stripped, the others will
436
  have value=True.
437

438
  @type opt: string
439
  @param opt: a string holding the option name for which we process the
440
      data, used in building error messages
441
  @type data: string
442
  @param data: a string of the format key=val,key=val,...
443
  @rtype: dict
444
  @return: {key=val, key=val}
445
  @raises errors.ParameterError: if there are duplicate keys
446

447
  """
448
  kv_dict = {}
449
  if data:
450
    for elem in utils.UnescapeAndSplit(data, sep=","):
451
      if "=" in elem:
452
        key, val = elem.split("=", 1)
453
      else:
454
        if elem.startswith(NO_PREFIX):
455
          key, val = elem[len(NO_PREFIX):], False
456
        elif elem.startswith(UN_PREFIX):
457
          key, val = elem[len(UN_PREFIX):], None
458
        else:
459
          key, val = elem, True
460
      if key in kv_dict:
461
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
462
                                    (key, opt))
463
      kv_dict[key] = val
464
  return kv_dict
465

    
466

    
467
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
468
  """Custom parser for ident:key=val,key=val options.
469

470
  This will store the parsed values as a tuple (ident, {key: val}). As such,
471
  multiple uses of this option via action=append is possible.
472

473
  """
474
  if ":" not in value:
475
    ident, rest = value, ''
476
  else:
477
    ident, rest = value.split(":", 1)
478

    
479
  if ident.startswith(NO_PREFIX):
480
    if rest:
481
      msg = "Cannot pass options when removing parameter groups: %s" % value
482
      raise errors.ParameterError(msg)
483
    retval = (ident[len(NO_PREFIX):], False)
484
  elif ident.startswith(UN_PREFIX):
485
    if rest:
486
      msg = "Cannot pass options when removing parameter groups: %s" % value
487
      raise errors.ParameterError(msg)
488
    retval = (ident[len(UN_PREFIX):], None)
489
  else:
490
    kv_dict = _SplitKeyVal(opt, rest)
491
    retval = (ident, kv_dict)
492
  return retval
493

    
494

    
495
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
496
  """Custom parser class for key=val,key=val options.
497

498
  This will store the parsed values as a dict {key: val}.
499

500
  """
501
  return _SplitKeyVal(opt, value)
502

    
503

    
504
def check_bool(option, opt, value): # pylint: disable-msg=W0613
505
  """Custom parser for yes/no options.
506

507
  This will store the parsed value as either True or False.
508

509
  """
510
  value = value.lower()
511
  if value == constants.VALUE_FALSE or value == "no":
512
    return False
513
  elif value == constants.VALUE_TRUE or value == "yes":
514
    return True
515
  else:
516
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
517

    
518

    
519
# completion_suggestion is normally a list. Using numeric values not evaluating
520
# to False for dynamic completion.
521
(OPT_COMPL_MANY_NODES,
522
 OPT_COMPL_ONE_NODE,
523
 OPT_COMPL_ONE_INSTANCE,
524
 OPT_COMPL_ONE_OS,
525
 OPT_COMPL_ONE_IALLOCATOR,
526
 OPT_COMPL_INST_ADD_NODES,
527
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
528

    
529
OPT_COMPL_ALL = frozenset([
530
  OPT_COMPL_MANY_NODES,
531
  OPT_COMPL_ONE_NODE,
532
  OPT_COMPL_ONE_INSTANCE,
533
  OPT_COMPL_ONE_OS,
534
  OPT_COMPL_ONE_IALLOCATOR,
535
  OPT_COMPL_INST_ADD_NODES,
536
  OPT_COMPL_ONE_NODEGROUP,
537
  ])
538

    
539

    
540
class CliOption(Option):
541
  """Custom option class for optparse.
542

543
  """
544
  ATTRS = Option.ATTRS + [
545
    "completion_suggest",
546
    ]
547
  TYPES = Option.TYPES + (
548
    "identkeyval",
549
    "keyval",
550
    "unit",
551
    "bool",
552
    )
553
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
554
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
555
  TYPE_CHECKER["keyval"] = check_key_val
556
  TYPE_CHECKER["unit"] = check_unit
557
  TYPE_CHECKER["bool"] = check_bool
558

    
559

    
560
# optparse.py sets make_option, so we do it for our own option class, too
561
cli_option = CliOption
562

    
563

    
564
_YORNO = "yes|no"
565

    
566
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
567
                       help="Increase debugging level")
568

    
569
NOHDR_OPT = cli_option("--no-headers", default=False,
570
                       action="store_true", dest="no_headers",
571
                       help="Don't display column headers")
572

    
573
SEP_OPT = cli_option("--separator", default=None,
574
                     action="store", dest="separator",
575
                     help=("Separator between output fields"
576
                           " (defaults to one space)"))
577

    
578
USEUNITS_OPT = cli_option("--units", default=None,
579
                          dest="units", choices=('h', 'm', 'g', 't'),
580
                          help="Specify units for output (one of hmgt)")
581

    
582
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
583
                        type="string", metavar="FIELDS",
584
                        help="Comma separated list of output fields")
585

    
586
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
587
                       default=False, help="Force the operation")
588

    
589
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
590
                         default=False, help="Do not require confirmation")
591

    
592
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
593
                                  action="store_true", default=False,
594
                                  help=("Ignore offline nodes and do as much"
595
                                        " as possible"))
596

    
597
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
598
                         default=None, help="File with tag names")
599

    
600
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
601
                        default=False, action="store_true",
602
                        help=("Submit the job and return the job ID, but"
603
                              " don't wait for the job to finish"))
604

    
605
SYNC_OPT = cli_option("--sync", dest="do_locking",
606
                      default=False, action="store_true",
607
                      help=("Grab locks while doing the queries"
608
                            " in order to ensure more consistent results"))
609

    
610
DRY_RUN_OPT = cli_option("--dry-run", default=False,
611
                         action="store_true",
612
                         help=("Do not execute the operation, just run the"
613
                               " check steps and verify it it could be"
614
                               " executed"))
615

    
616
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
617
                         action="store_true",
618
                         help="Increase the verbosity of the operation")
619

    
620
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
621
                              action="store_true", dest="simulate_errors",
622
                              help="Debugging option that makes the operation"
623
                              " treat most runtime checks as failed")
624

    
625
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
626
                        default=True, action="store_false",
627
                        help="Don't wait for sync (DANGEROUS!)")
628

    
629
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
630
                               help="Custom disk setup (diskless, file,"
631
                               " plain or drbd)",
632
                               default=None, metavar="TEMPL",
633
                               choices=list(constants.DISK_TEMPLATES))
634

    
635
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
636
                        help="Do not create any network cards for"
637
                        " the instance")
638

    
639
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
640
                               help="Relative path under default cluster-wide"
641
                               " file storage dir to store file-based disks",
642
                               default=None, metavar="<DIR>")
643

    
644
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
645
                                  help="Driver to use for image files",
646
                                  default="loop", metavar="<DRIVER>",
647
                                  choices=list(constants.FILE_DRIVER))
648

    
649
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
650
                            help="Select nodes for the instance automatically"
651
                            " using the <NAME> iallocator plugin",
652
                            default=None, type="string",
653
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
654

    
655
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
656
                            metavar="<NAME>",
657
                            help="Set the default instance allocator plugin",
658
                            default=None, type="string",
659
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
660

    
661
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
662
                    metavar="<os>",
663
                    completion_suggest=OPT_COMPL_ONE_OS)
664

    
665
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
666
                         type="keyval", default={},
667
                         help="OS parameters")
668

    
669
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
670
                               action="store_true", default=False,
671
                               help="Force an unknown variant")
672

    
673
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
674
                            action="store_true", default=False,
675
                            help="Do not install the OS (will"
676
                            " enable no-start)")
677

    
678
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
679
                         type="keyval", default={},
680
                         help="Backend parameters")
681

    
682
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
683
                         default={}, dest="hvparams",
684
                         help="Hypervisor parameters")
685

    
686
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
687
                            help="Hypervisor and hypervisor options, in the"
688
                            " format hypervisor:option=value,option=value,...",
689
                            default=None, type="identkeyval")
690

    
691
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
692
                        help="Hypervisor and hypervisor options, in the"
693
                        " format hypervisor:option=value,option=value,...",
694
                        default=[], action="append", type="identkeyval")
695

    
696
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
697
                           action="store_false",
698
                           help="Don't check that the instance's IP"
699
                           " is alive")
700

    
701
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
702
                             default=True, action="store_false",
703
                             help="Don't check that the instance's name"
704
                             " is resolvable")
705

    
706
NET_OPT = cli_option("--net",
707
                     help="NIC parameters", default=[],
708
                     dest="nics", action="append", type="identkeyval")
709

    
710
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
711
                      dest="disks", action="append", type="identkeyval")
712

    
713
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
714
                         help="Comma-separated list of disks"
715
                         " indices to act on (e.g. 0,2) (optional,"
716
                         " defaults to all disks)")
717

    
718
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
719
                         help="Enforces a single-disk configuration using the"
720
                         " given disk size, in MiB unless a suffix is used",
721
                         default=None, type="unit", metavar="<size>")
722

    
723
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
724
                                dest="ignore_consistency",
725
                                action="store_true", default=False,
726
                                help="Ignore the consistency of the disks on"
727
                                " the secondary")
728

    
729
NONLIVE_OPT = cli_option("--non-live", dest="live",
730
                         default=True, action="store_false",
731
                         help="Do a non-live migration (this usually means"
732
                         " freeze the instance, save the state, transfer and"
733
                         " only then resume running on the secondary node)")
734

    
735
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
736
                                default=None,
737
                                choices=list(constants.HT_MIGRATION_MODES),
738
                                help="Override default migration mode (choose"
739
                                " either live or non-live")
740

    
741
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
742
                                help="Target node and optional secondary node",
743
                                metavar="<pnode>[:<snode>]",
744
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
745

    
746
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
747
                           action="append", metavar="<node>",
748
                           help="Use only this node (can be used multiple"
749
                           " times, if not given defaults to all nodes)",
750
                           completion_suggest=OPT_COMPL_ONE_NODE)
751

    
752
NODEGROUP_OPT = cli_option("-g", "--node-group",
753
                           dest="nodegroup",
754
                           help="Node group (name or uuid)",
755
                           metavar="<nodegroup>",
756
                           default=None, type="string",
757
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
758

    
759
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
760
                             metavar="<node>",
761
                             completion_suggest=OPT_COMPL_ONE_NODE)
762

    
763
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
764
                         action="store_false",
765
                         help="Don't start the instance after creation")
766

    
767
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
768
                         action="store_true", default=False,
769
                         help="Show command instead of executing it")
770

    
771
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
772
                         default=False, action="store_true",
773
                         help="Instead of performing the migration, try to"
774
                         " recover from a failed cleanup. This is safe"
775
                         " to run even if the instance is healthy, but it"
776
                         " will create extra replication traffic and "
777
                         " disrupt briefly the replication (like during the"
778
                         " migration")
779

    
780
STATIC_OPT = cli_option("-s", "--static", dest="static",
781
                        action="store_true", default=False,
782
                        help="Only show configuration data, not runtime data")
783

    
784
ALL_OPT = cli_option("--all", dest="show_all",
785
                     default=False, action="store_true",
786
                     help="Show info on all instances on the cluster."
787
                     " This can take a long time to run, use wisely")
788

    
789
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
790
                           action="store_true", default=False,
791
                           help="Interactive OS reinstall, lists available"
792
                           " OS templates for selection")
793

    
794
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
795
                                 action="store_true", default=False,
796
                                 help="Remove the instance from the cluster"
797
                                 " configuration even if there are failures"
798
                                 " during the removal process")
799

    
800
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
801
                                        dest="ignore_remove_failures",
802
                                        action="store_true", default=False,
803
                                        help="Remove the instance from the"
804
                                        " cluster configuration even if there"
805
                                        " are failures during the removal"
806
                                        " process")
807

    
808
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
809
                                 action="store_true", default=False,
810
                                 help="Remove the instance from the cluster")
811

    
812
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
813
                               help="Specifies the new secondary node",
814
                               metavar="NODE", default=None,
815
                               completion_suggest=OPT_COMPL_ONE_NODE)
816

    
817
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
818
                            default=False, action="store_true",
819
                            help="Replace the disk(s) on the primary"
820
                            " node (only for the drbd template)")
821

    
822
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
823
                              default=False, action="store_true",
824
                              help="Replace the disk(s) on the secondary"
825
                              " node (only for the drbd template)")
826

    
827
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
828
                              default=False, action="store_true",
829
                              help="Lock all nodes and auto-promote as needed"
830
                              " to MC status")
831

    
832
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
833
                              default=False, action="store_true",
834
                              help="Automatically replace faulty disks"
835
                              " (only for the drbd template)")
836

    
837
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
838
                             default=False, action="store_true",
839
                             help="Ignore current recorded size"
840
                             " (useful for forcing activation when"
841
                             " the recorded size is wrong)")
842

    
843
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
844
                          metavar="<node>",
845
                          completion_suggest=OPT_COMPL_ONE_NODE)
846

    
847
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
848
                         metavar="<dir>")
849

    
850
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
851
                              help="Specify the secondary ip for the node",
852
                              metavar="ADDRESS", default=None)
853

    
854
READD_OPT = cli_option("--readd", dest="readd",
855
                       default=False, action="store_true",
856
                       help="Readd old node after replacing it")
857

    
858
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
859
                                default=True, action="store_false",
860
                                help="Disable SSH key fingerprint checking")
861

    
862

    
863
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
864
                    type="bool", default=None, metavar=_YORNO,
865
                    help="Set the master_candidate flag on the node")
866

    
867
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
868
                         type="bool", default=None,
869
                         help="Set the offline flag on the node")
870

    
871
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
872
                         type="bool", default=None,
873
                         help="Set the drained flag on the node")
874

    
875
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
876
                    type="bool", default=None, metavar=_YORNO,
877
                    help="Set the master_capable flag on the node")
878

    
879
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
880
                    type="bool", default=None, metavar=_YORNO,
881
                    help="Set the vm_capable flag on the node")
882

    
883
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
884
                             type="bool", default=None, metavar=_YORNO,
885
                             help="Set the allocatable flag on a volume")
886

    
887
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
888
                               help="Disable support for lvm based instances"
889
                               " (cluster-wide)",
890
                               action="store_false", default=True)
891

    
892
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
893
                            dest="enabled_hypervisors",
894
                            help="Comma-separated list of hypervisors",
895
                            type="string", default=None)
896

    
897
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
898
                            type="keyval", default={},
899
                            help="NIC parameters")
900

    
901
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
902
                         dest="candidate_pool_size", type="int",
903
                         help="Set the candidate pool size")
904

    
905
VG_NAME_OPT = cli_option("-g", "--vg-name", dest="vg_name",
906
                         help="Enables LVM and specifies the volume group"
907
                         " name (cluster-wide) for disk allocation [xenvg]",
908
                         metavar="VG", default=None)
909

    
910
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
911
                          help="Destroy cluster", action="store_true")
912

    
913
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
914
                          help="Skip node agreement check (dangerous)",
915
                          action="store_true", default=False)
916

    
917
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
918
                            help="Specify the mac prefix for the instance IP"
919
                            " addresses, in the format XX:XX:XX",
920
                            metavar="PREFIX",
921
                            default=None)
922

    
923
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
924
                               help="Specify the node interface (cluster-wide)"
925
                               " on which the master IP address will be added "
926
                               " [%s]" % constants.DEFAULT_BRIDGE,
927
                               metavar="NETDEV",
928
                               default=constants.DEFAULT_BRIDGE)
929

    
930
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
931
                                help="Specify the default directory (cluster-"
932
                                "wide) for storing the file-based disks [%s]" %
933
                                constants.DEFAULT_FILE_STORAGE_DIR,
934
                                metavar="DIR",
935
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
936

    
937
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
938
                                   help="Don't modify /etc/hosts",
939
                                   action="store_false", default=True)
940

    
941
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
942
                                    help="Don't initialize SSH keys",
943
                                    action="store_false", default=True)
944

    
945
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
946
                             help="Enable parseable error messages",
947
                             action="store_true", default=False)
948

    
949
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
950
                          help="Skip N+1 memory redundancy tests",
951
                          action="store_true", default=False)
952

    
953
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
954
                             help="Type of reboot: soft/hard/full",
955
                             default=constants.INSTANCE_REBOOT_HARD,
956
                             metavar="<REBOOT>",
957
                             choices=list(constants.REBOOT_TYPES))
958

    
959
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
960
                                    dest="ignore_secondaries",
961
                                    default=False, action="store_true",
962
                                    help="Ignore errors from secondaries")
963

    
964
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
965
                            action="store_false", default=True,
966
                            help="Don't shutdown the instance (unsafe)")
967

    
968
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
969
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
970
                         help="Maximum time to wait")
971

    
972
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
973
                         dest="shutdown_timeout", type="int",
974
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
975
                         help="Maximum time to wait for instance shutdown")
976

    
977
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
978
                          default=None,
979
                          help=("Number of seconds between repetions of the"
980
                                " command"))
981

    
982
EARLY_RELEASE_OPT = cli_option("--early-release",
983
                               dest="early_release", default=False,
984
                               action="store_true",
985
                               help="Release the locks on the secondary"
986
                               " node(s) early")
987

    
988
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
989
                                  dest="new_cluster_cert",
990
                                  default=False, action="store_true",
991
                                  help="Generate a new cluster certificate")
992

    
993
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
994
                           default=None,
995
                           help="File containing new RAPI certificate")
996

    
997
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
998
                               default=None, action="store_true",
999
                               help=("Generate a new self-signed RAPI"
1000
                                     " certificate"))
1001

    
1002
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1003
                                    dest="new_confd_hmac_key",
1004
                                    default=False, action="store_true",
1005
                                    help=("Create a new HMAC key for %s" %
1006
                                          constants.CONFD))
1007

    
1008
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1009
                                       dest="cluster_domain_secret",
1010
                                       default=None,
1011
                                       help=("Load new new cluster domain"
1012
                                             " secret from file"))
1013

    
1014
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1015
                                           dest="new_cluster_domain_secret",
1016
                                           default=False, action="store_true",
1017
                                           help=("Create a new cluster domain"
1018
                                                 " secret"))
1019

    
1020
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1021
                              dest="use_replication_network",
1022
                              help="Whether to use the replication network"
1023
                              " for talking to the nodes",
1024
                              action="store_true", default=False)
1025

    
1026
MAINTAIN_NODE_HEALTH_OPT = \
1027
    cli_option("--maintain-node-health", dest="maintain_node_health",
1028
               metavar=_YORNO, default=None, type="bool",
1029
               help="Configure the cluster to automatically maintain node"
1030
               " health, by shutting down unknown instances, shutting down"
1031
               " unknown DRBD devices, etc.")
1032

    
1033
IDENTIFY_DEFAULTS_OPT = \
1034
    cli_option("--identify-defaults", dest="identify_defaults",
1035
               default=False, action="store_true",
1036
               help="Identify which saved instance parameters are equal to"
1037
               " the current cluster defaults and set them as such, instead"
1038
               " of marking them as overridden")
1039

    
1040
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1041
                         action="store", dest="uid_pool",
1042
                         help=("A list of user-ids or user-id"
1043
                               " ranges separated by commas"))
1044

    
1045
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1046
                          action="store", dest="add_uids",
1047
                          help=("A list of user-ids or user-id"
1048
                                " ranges separated by commas, to be"
1049
                                " added to the user-id pool"))
1050

    
1051
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1052
                             action="store", dest="remove_uids",
1053
                             help=("A list of user-ids or user-id"
1054
                                   " ranges separated by commas, to be"
1055
                                   " removed from the user-id pool"))
1056

    
1057
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1058
                             action="store", dest="reserved_lvs",
1059
                             help=("A comma-separated list of reserved"
1060
                                   " logical volumes names, that will be"
1061
                                   " ignored by cluster verify"))
1062

    
1063
ROMAN_OPT = cli_option("--roman",
1064
                       dest="roman_integers", default=False,
1065
                       action="store_true",
1066
                       help="Use roman numbers for positive integers")
1067

    
1068
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1069
                             action="store", default=None,
1070
                             help="Specifies usermode helper for DRBD")
1071

    
1072
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1073
                                action="store_false", default=True,
1074
                                help="Disable support for DRBD")
1075

    
1076
PRIMARY_IP_VERSION_OPT = \
1077
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1078
               action="store", dest="primary_ip_version",
1079
               metavar="%d|%d" % (constants.IP4_VERSION,
1080
                                  constants.IP6_VERSION),
1081
               help="Cluster-wide IP version for primary IP")
1082

    
1083
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1084
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1085
                          choices=_PRIONAME_TO_VALUE.keys(),
1086
                          help="Priority for opcode processing")
1087

    
1088
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1089
                        type="bool", default=None, metavar=_YORNO,
1090
                        help="Sets the hidden flag on the OS")
1091

    
1092
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1093
                        type="bool", default=None, metavar=_YORNO,
1094
                        help="Sets the blacklisted flag on the OS")
1095

    
1096
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1097
                                     type="bool", metavar=_YORNO,
1098
                                     dest="prealloc_wipe_disks",
1099
                                     help=("Wipe disks prior to instance"
1100
                                           " creation"))
1101

    
1102

    
1103
#: Options provided by all commands
1104
COMMON_OPTS = [DEBUG_OPT]
1105

    
1106

    
1107
def _ParseArgs(argv, commands, aliases):
1108
  """Parser for the command line arguments.
1109

1110
  This function parses the arguments and returns the function which
1111
  must be executed together with its (modified) arguments.
1112

1113
  @param argv: the command line
1114
  @param commands: dictionary with special contents, see the design
1115
      doc for cmdline handling
1116
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1117

1118
  """
1119
  if len(argv) == 0:
1120
    binary = "<command>"
1121
  else:
1122
    binary = argv[0].split("/")[-1]
1123

    
1124
  if len(argv) > 1 and argv[1] == "--version":
1125
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1126
             constants.RELEASE_VERSION)
1127
    # Quit right away. That way we don't have to care about this special
1128
    # argument. optparse.py does it the same.
1129
    sys.exit(0)
1130

    
1131
  if len(argv) < 2 or not (argv[1] in commands or
1132
                           argv[1] in aliases):
1133
    # let's do a nice thing
1134
    sortedcmds = commands.keys()
1135
    sortedcmds.sort()
1136

    
1137
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1138
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1139
    ToStdout("")
1140

    
1141
    # compute the max line length for cmd + usage
1142
    mlen = max([len(" %s" % cmd) for cmd in commands])
1143
    mlen = min(60, mlen) # should not get here...
1144

    
1145
    # and format a nice command list
1146
    ToStdout("Commands:")
1147
    for cmd in sortedcmds:
1148
      cmdstr = " %s" % (cmd,)
1149
      help_text = commands[cmd][4]
1150
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1151
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1152
      for line in help_lines:
1153
        ToStdout("%-*s   %s", mlen, "", line)
1154

    
1155
    ToStdout("")
1156

    
1157
    return None, None, None
1158

    
1159
  # get command, unalias it, and look it up in commands
1160
  cmd = argv.pop(1)
1161
  if cmd in aliases:
1162
    if cmd in commands:
1163
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1164
                                   " command" % cmd)
1165

    
1166
    if aliases[cmd] not in commands:
1167
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1168
                                   " command '%s'" % (cmd, aliases[cmd]))
1169

    
1170
    cmd = aliases[cmd]
1171

    
1172
  func, args_def, parser_opts, usage, description = commands[cmd]
1173
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1174
                        description=description,
1175
                        formatter=TitledHelpFormatter(),
1176
                        usage="%%prog %s %s" % (cmd, usage))
1177
  parser.disable_interspersed_args()
1178
  options, args = parser.parse_args()
1179

    
1180
  if not _CheckArguments(cmd, args_def, args):
1181
    return None, None, None
1182

    
1183
  return func, options, args
1184

    
1185

    
1186
def _CheckArguments(cmd, args_def, args):
1187
  """Verifies the arguments using the argument definition.
1188

1189
  Algorithm:
1190

1191
    1. Abort with error if values specified by user but none expected.
1192

1193
    1. For each argument in definition
1194

1195
      1. Keep running count of minimum number of values (min_count)
1196
      1. Keep running count of maximum number of values (max_count)
1197
      1. If it has an unlimited number of values
1198

1199
        1. Abort with error if it's not the last argument in the definition
1200

1201
    1. If last argument has limited number of values
1202

1203
      1. Abort with error if number of values doesn't match or is too large
1204

1205
    1. Abort with error if user didn't pass enough values (min_count)
1206

1207
  """
1208
  if args and not args_def:
1209
    ToStderr("Error: Command %s expects no arguments", cmd)
1210
    return False
1211

    
1212
  min_count = None
1213
  max_count = None
1214
  check_max = None
1215

    
1216
  last_idx = len(args_def) - 1
1217

    
1218
  for idx, arg in enumerate(args_def):
1219
    if min_count is None:
1220
      min_count = arg.min
1221
    elif arg.min is not None:
1222
      min_count += arg.min
1223

    
1224
    if max_count is None:
1225
      max_count = arg.max
1226
    elif arg.max is not None:
1227
      max_count += arg.max
1228

    
1229
    if idx == last_idx:
1230
      check_max = (arg.max is not None)
1231

    
1232
    elif arg.max is None:
1233
      raise errors.ProgrammerError("Only the last argument can have max=None")
1234

    
1235
  if check_max:
1236
    # Command with exact number of arguments
1237
    if (min_count is not None and max_count is not None and
1238
        min_count == max_count and len(args) != min_count):
1239
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1240
      return False
1241

    
1242
    # Command with limited number of arguments
1243
    if max_count is not None and len(args) > max_count:
1244
      ToStderr("Error: Command %s expects only %d argument(s)",
1245
               cmd, max_count)
1246
      return False
1247

    
1248
  # Command with some required arguments
1249
  if min_count is not None and len(args) < min_count:
1250
    ToStderr("Error: Command %s expects at least %d argument(s)",
1251
             cmd, min_count)
1252
    return False
1253

    
1254
  return True
1255

    
1256

    
1257
def SplitNodeOption(value):
1258
  """Splits the value of a --node option.
1259

1260
  """
1261
  if value and ':' in value:
1262
    return value.split(':', 1)
1263
  else:
1264
    return (value, None)
1265

    
1266

    
1267
def CalculateOSNames(os_name, os_variants):
1268
  """Calculates all the names an OS can be called, according to its variants.
1269

1270
  @type os_name: string
1271
  @param os_name: base name of the os
1272
  @type os_variants: list or None
1273
  @param os_variants: list of supported variants
1274
  @rtype: list
1275
  @return: list of valid names
1276

1277
  """
1278
  if os_variants:
1279
    return ['%s+%s' % (os_name, v) for v in os_variants]
1280
  else:
1281
    return [os_name]
1282

    
1283

    
1284
def ParseFields(selected, default):
1285
  """Parses the values of "--field"-like options.
1286

1287
  @type selected: string or None
1288
  @param selected: User-selected options
1289
  @type default: list
1290
  @param default: Default fields
1291

1292
  """
1293
  if selected is None:
1294
    return default
1295

    
1296
  if selected.startswith("+"):
1297
    return default + selected[1:].split(",")
1298

    
1299
  return selected.split(",")
1300

    
1301

    
1302
UsesRPC = rpc.RunWithRPC
1303

    
1304

    
1305
def AskUser(text, choices=None):
1306
  """Ask the user a question.
1307

1308
  @param text: the question to ask
1309

1310
  @param choices: list with elements tuples (input_char, return_value,
1311
      description); if not given, it will default to: [('y', True,
1312
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1313
      note that the '?' char is reserved for help
1314

1315
  @return: one of the return values from the choices list; if input is
1316
      not possible (i.e. not running with a tty, we return the last
1317
      entry from the list
1318

1319
  """
1320
  if choices is None:
1321
    choices = [('y', True, 'Perform the operation'),
1322
               ('n', False, 'Do not perform the operation')]
1323
  if not choices or not isinstance(choices, list):
1324
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1325
  for entry in choices:
1326
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1327
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1328

    
1329
  answer = choices[-1][1]
1330
  new_text = []
1331
  for line in text.splitlines():
1332
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1333
  text = "\n".join(new_text)
1334
  try:
1335
    f = file("/dev/tty", "a+")
1336
  except IOError:
1337
    return answer
1338
  try:
1339
    chars = [entry[0] for entry in choices]
1340
    chars[-1] = "[%s]" % chars[-1]
1341
    chars.append('?')
1342
    maps = dict([(entry[0], entry[1]) for entry in choices])
1343
    while True:
1344
      f.write(text)
1345
      f.write('\n')
1346
      f.write("/".join(chars))
1347
      f.write(": ")
1348
      line = f.readline(2).strip().lower()
1349
      if line in maps:
1350
        answer = maps[line]
1351
        break
1352
      elif line == '?':
1353
        for entry in choices:
1354
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1355
        f.write("\n")
1356
        continue
1357
  finally:
1358
    f.close()
1359
  return answer
1360

    
1361

    
1362
class JobSubmittedException(Exception):
1363
  """Job was submitted, client should exit.
1364

1365
  This exception has one argument, the ID of the job that was
1366
  submitted. The handler should print this ID.
1367

1368
  This is not an error, just a structured way to exit from clients.
1369

1370
  """
1371

    
1372

    
1373
def SendJob(ops, cl=None):
1374
  """Function to submit an opcode without waiting for the results.
1375

1376
  @type ops: list
1377
  @param ops: list of opcodes
1378
  @type cl: luxi.Client
1379
  @param cl: the luxi client to use for communicating with the master;
1380
             if None, a new client will be created
1381

1382
  """
1383
  if cl is None:
1384
    cl = GetClient()
1385

    
1386
  job_id = cl.SubmitJob(ops)
1387

    
1388
  return job_id
1389

    
1390

    
1391
def GenericPollJob(job_id, cbs, report_cbs):
1392
  """Generic job-polling function.
1393

1394
  @type job_id: number
1395
  @param job_id: Job ID
1396
  @type cbs: Instance of L{JobPollCbBase}
1397
  @param cbs: Data callbacks
1398
  @type report_cbs: Instance of L{JobPollReportCbBase}
1399
  @param report_cbs: Reporting callbacks
1400

1401
  """
1402
  prev_job_info = None
1403
  prev_logmsg_serial = None
1404

    
1405
  status = None
1406

    
1407
  while True:
1408
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1409
                                      prev_logmsg_serial)
1410
    if not result:
1411
      # job not found, go away!
1412
      raise errors.JobLost("Job with id %s lost" % job_id)
1413

    
1414
    if result == constants.JOB_NOTCHANGED:
1415
      report_cbs.ReportNotChanged(job_id, status)
1416

    
1417
      # Wait again
1418
      continue
1419

    
1420
    # Split result, a tuple of (field values, log entries)
1421
    (job_info, log_entries) = result
1422
    (status, ) = job_info
1423

    
1424
    if log_entries:
1425
      for log_entry in log_entries:
1426
        (serial, timestamp, log_type, message) = log_entry
1427
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1428
                                    log_type, message)
1429
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1430

    
1431
    # TODO: Handle canceled and archived jobs
1432
    elif status in (constants.JOB_STATUS_SUCCESS,
1433
                    constants.JOB_STATUS_ERROR,
1434
                    constants.JOB_STATUS_CANCELING,
1435
                    constants.JOB_STATUS_CANCELED):
1436
      break
1437

    
1438
    prev_job_info = job_info
1439

    
1440
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1441
  if not jobs:
1442
    raise errors.JobLost("Job with id %s lost" % job_id)
1443

    
1444
  status, opstatus, result = jobs[0]
1445

    
1446
  if status == constants.JOB_STATUS_SUCCESS:
1447
    return result
1448

    
1449
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1450
    raise errors.OpExecError("Job was canceled")
1451

    
1452
  has_ok = False
1453
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1454
    if status == constants.OP_STATUS_SUCCESS:
1455
      has_ok = True
1456
    elif status == constants.OP_STATUS_ERROR:
1457
      errors.MaybeRaise(msg)
1458

    
1459
      if has_ok:
1460
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1461
                                 (idx, msg))
1462

    
1463
      raise errors.OpExecError(str(msg))
1464

    
1465
  # default failure mode
1466
  raise errors.OpExecError(result)
1467

    
1468

    
1469
class JobPollCbBase:
1470
  """Base class for L{GenericPollJob} callbacks.
1471

1472
  """
1473
  def __init__(self):
1474
    """Initializes this class.
1475

1476
    """
1477

    
1478
  def WaitForJobChangeOnce(self, job_id, fields,
1479
                           prev_job_info, prev_log_serial):
1480
    """Waits for changes on a job.
1481

1482
    """
1483
    raise NotImplementedError()
1484

    
1485
  def QueryJobs(self, job_ids, fields):
1486
    """Returns the selected fields for the selected job IDs.
1487

1488
    @type job_ids: list of numbers
1489
    @param job_ids: Job IDs
1490
    @type fields: list of strings
1491
    @param fields: Fields
1492

1493
    """
1494
    raise NotImplementedError()
1495

    
1496

    
1497
class JobPollReportCbBase:
1498
  """Base class for L{GenericPollJob} reporting callbacks.
1499

1500
  """
1501
  def __init__(self):
1502
    """Initializes this class.
1503

1504
    """
1505

    
1506
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1507
    """Handles a log message.
1508

1509
    """
1510
    raise NotImplementedError()
1511

    
1512
  def ReportNotChanged(self, job_id, status):
1513
    """Called for if a job hasn't changed in a while.
1514

1515
    @type job_id: number
1516
    @param job_id: Job ID
1517
    @type status: string or None
1518
    @param status: Job status if available
1519

1520
    """
1521
    raise NotImplementedError()
1522

    
1523

    
1524
class _LuxiJobPollCb(JobPollCbBase):
1525
  def __init__(self, cl):
1526
    """Initializes this class.
1527

1528
    """
1529
    JobPollCbBase.__init__(self)
1530
    self.cl = cl
1531

    
1532
  def WaitForJobChangeOnce(self, job_id, fields,
1533
                           prev_job_info, prev_log_serial):
1534
    """Waits for changes on a job.
1535

1536
    """
1537
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1538
                                        prev_job_info, prev_log_serial)
1539

    
1540
  def QueryJobs(self, job_ids, fields):
1541
    """Returns the selected fields for the selected job IDs.
1542

1543
    """
1544
    return self.cl.QueryJobs(job_ids, fields)
1545

    
1546

    
1547
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1548
  def __init__(self, feedback_fn):
1549
    """Initializes this class.
1550

1551
    """
1552
    JobPollReportCbBase.__init__(self)
1553

    
1554
    self.feedback_fn = feedback_fn
1555

    
1556
    assert callable(feedback_fn)
1557

    
1558
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1559
    """Handles a log message.
1560

1561
    """
1562
    self.feedback_fn((timestamp, log_type, log_msg))
1563

    
1564
  def ReportNotChanged(self, job_id, status):
1565
    """Called if a job hasn't changed in a while.
1566

1567
    """
1568
    # Ignore
1569

    
1570

    
1571
class StdioJobPollReportCb(JobPollReportCbBase):
1572
  def __init__(self):
1573
    """Initializes this class.
1574

1575
    """
1576
    JobPollReportCbBase.__init__(self)
1577

    
1578
    self.notified_queued = False
1579
    self.notified_waitlock = False
1580

    
1581
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1582
    """Handles a log message.
1583

1584
    """
1585
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1586
             FormatLogMessage(log_type, log_msg))
1587

    
1588
  def ReportNotChanged(self, job_id, status):
1589
    """Called if a job hasn't changed in a while.
1590

1591
    """
1592
    if status is None:
1593
      return
1594

    
1595
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1596
      ToStderr("Job %s is waiting in queue", job_id)
1597
      self.notified_queued = True
1598

    
1599
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1600
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1601
      self.notified_waitlock = True
1602

    
1603

    
1604
def FormatLogMessage(log_type, log_msg):
1605
  """Formats a job message according to its type.
1606

1607
  """
1608
  if log_type != constants.ELOG_MESSAGE:
1609
    log_msg = str(log_msg)
1610

    
1611
  return utils.SafeEncode(log_msg)
1612

    
1613

    
1614
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1615
  """Function to poll for the result of a job.
1616

1617
  @type job_id: job identified
1618
  @param job_id: the job to poll for results
1619
  @type cl: luxi.Client
1620
  @param cl: the luxi client to use for communicating with the master;
1621
             if None, a new client will be created
1622

1623
  """
1624
  if cl is None:
1625
    cl = GetClient()
1626

    
1627
  if reporter is None:
1628
    if feedback_fn:
1629
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1630
    else:
1631
      reporter = StdioJobPollReportCb()
1632
  elif feedback_fn:
1633
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1634

    
1635
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1636

    
1637

    
1638
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1639
  """Legacy function to submit an opcode.
1640

1641
  This is just a simple wrapper over the construction of the processor
1642
  instance. It should be extended to better handle feedback and
1643
  interaction functions.
1644

1645
  """
1646
  if cl is None:
1647
    cl = GetClient()
1648

    
1649
  SetGenericOpcodeOpts([op], opts)
1650

    
1651
  job_id = SendJob([op], cl=cl)
1652

    
1653
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1654
                       reporter=reporter)
1655

    
1656
  return op_results[0]
1657

    
1658

    
1659
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1660
  """Wrapper around SubmitOpCode or SendJob.
1661

1662
  This function will decide, based on the 'opts' parameter, whether to
1663
  submit and wait for the result of the opcode (and return it), or
1664
  whether to just send the job and print its identifier. It is used in
1665
  order to simplify the implementation of the '--submit' option.
1666

1667
  It will also process the opcodes if we're sending the via SendJob
1668
  (otherwise SubmitOpCode does it).
1669

1670
  """
1671
  if opts and opts.submit_only:
1672
    job = [op]
1673
    SetGenericOpcodeOpts(job, opts)
1674
    job_id = SendJob(job, cl=cl)
1675
    raise JobSubmittedException(job_id)
1676
  else:
1677
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1678

    
1679

    
1680
def SetGenericOpcodeOpts(opcode_list, options):
1681
  """Processor for generic options.
1682

1683
  This function updates the given opcodes based on generic command
1684
  line options (like debug, dry-run, etc.).
1685

1686
  @param opcode_list: list of opcodes
1687
  @param options: command line options or None
1688
  @return: None (in-place modification)
1689

1690
  """
1691
  if not options:
1692
    return
1693
  for op in opcode_list:
1694
    op.debug_level = options.debug
1695
    if hasattr(options, "dry_run"):
1696
      op.dry_run = options.dry_run
1697
    if getattr(options, "priority", None) is not None:
1698
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1699

    
1700

    
1701
def GetClient():
1702
  # TODO: Cache object?
1703
  try:
1704
    client = luxi.Client()
1705
  except luxi.NoMasterError:
1706
    ss = ssconf.SimpleStore()
1707

    
1708
    # Try to read ssconf file
1709
    try:
1710
      ss.GetMasterNode()
1711
    except errors.ConfigurationError:
1712
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1713
                                 " not part of a cluster")
1714

    
1715
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1716
    if master != myself:
1717
      raise errors.OpPrereqError("This is not the master node, please connect"
1718
                                 " to node '%s' and rerun the command" %
1719
                                 master)
1720
    raise
1721
  return client
1722

    
1723

    
1724
def FormatError(err):
1725
  """Return a formatted error message for a given error.
1726

1727
  This function takes an exception instance and returns a tuple
1728
  consisting of two values: first, the recommended exit code, and
1729
  second, a string describing the error message (not
1730
  newline-terminated).
1731

1732
  """
1733
  retcode = 1
1734
  obuf = StringIO()
1735
  msg = str(err)
1736
  if isinstance(err, errors.ConfigurationError):
1737
    txt = "Corrupt configuration file: %s" % msg
1738
    logging.error(txt)
1739
    obuf.write(txt + "\n")
1740
    obuf.write("Aborting.")
1741
    retcode = 2
1742
  elif isinstance(err, errors.HooksAbort):
1743
    obuf.write("Failure: hooks execution failed:\n")
1744
    for node, script, out in err.args[0]:
1745
      if out:
1746
        obuf.write("  node: %s, script: %s, output: %s\n" %
1747
                   (node, script, out))
1748
      else:
1749
        obuf.write("  node: %s, script: %s (no output)\n" %
1750
                   (node, script))
1751
  elif isinstance(err, errors.HooksFailure):
1752
    obuf.write("Failure: hooks general failure: %s" % msg)
1753
  elif isinstance(err, errors.ResolverError):
1754
    this_host = netutils.Hostname.GetSysName()
1755
    if err.args[0] == this_host:
1756
      msg = "Failure: can't resolve my own hostname ('%s')"
1757
    else:
1758
      msg = "Failure: can't resolve hostname '%s'"
1759
    obuf.write(msg % err.args[0])
1760
  elif isinstance(err, errors.OpPrereqError):
1761
    if len(err.args) == 2:
1762
      obuf.write("Failure: prerequisites not met for this"
1763
               " operation:\nerror type: %s, error details:\n%s" %
1764
                 (err.args[1], err.args[0]))
1765
    else:
1766
      obuf.write("Failure: prerequisites not met for this"
1767
                 " operation:\n%s" % msg)
1768
  elif isinstance(err, errors.OpExecError):
1769
    obuf.write("Failure: command execution error:\n%s" % msg)
1770
  elif isinstance(err, errors.TagError):
1771
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1772
  elif isinstance(err, errors.JobQueueDrainError):
1773
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1774
               " accept new requests\n")
1775
  elif isinstance(err, errors.JobQueueFull):
1776
    obuf.write("Failure: the job queue is full and doesn't accept new"
1777
               " job submissions until old jobs are archived\n")
1778
  elif isinstance(err, errors.TypeEnforcementError):
1779
    obuf.write("Parameter Error: %s" % msg)
1780
  elif isinstance(err, errors.ParameterError):
1781
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1782
  elif isinstance(err, luxi.NoMasterError):
1783
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1784
               " and listening for connections?")
1785
  elif isinstance(err, luxi.TimeoutError):
1786
    obuf.write("Timeout while talking to the master daemon. Error:\n"
1787
               "%s" % msg)
1788
  elif isinstance(err, luxi.PermissionError):
1789
    obuf.write("It seems you don't have permissions to connect to the"
1790
               " master daemon.\nPlease retry as a different user.")
1791
  elif isinstance(err, luxi.ProtocolError):
1792
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1793
               "%s" % msg)
1794
  elif isinstance(err, errors.JobLost):
1795
    obuf.write("Error checking job status: %s" % msg)
1796
  elif isinstance(err, errors.GenericError):
1797
    obuf.write("Unhandled Ganeti error: %s" % msg)
1798
  elif isinstance(err, JobSubmittedException):
1799
    obuf.write("JobID: %s\n" % err.args[0])
1800
    retcode = 0
1801
  else:
1802
    obuf.write("Unhandled exception: %s" % msg)
1803
  return retcode, obuf.getvalue().rstrip('\n')
1804

    
1805

    
1806
def GenericMain(commands, override=None, aliases=None):
1807
  """Generic main function for all the gnt-* commands.
1808

1809
  Arguments:
1810
    - commands: a dictionary with a special structure, see the design doc
1811
                for command line handling.
1812
    - override: if not None, we expect a dictionary with keys that will
1813
                override command line options; this can be used to pass
1814
                options from the scripts to generic functions
1815
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1816

1817
  """
1818
  # save the program name and the entire command line for later logging
1819
  if sys.argv:
1820
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1821
    if len(sys.argv) >= 2:
1822
      binary += " " + sys.argv[1]
1823
      old_cmdline = " ".join(sys.argv[2:])
1824
    else:
1825
      old_cmdline = ""
1826
  else:
1827
    binary = "<unknown program>"
1828
    old_cmdline = ""
1829

    
1830
  if aliases is None:
1831
    aliases = {}
1832

    
1833
  try:
1834
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1835
  except errors.ParameterError, err:
1836
    result, err_msg = FormatError(err)
1837
    ToStderr(err_msg)
1838
    return 1
1839

    
1840
  if func is None: # parse error
1841
    return 1
1842

    
1843
  if override is not None:
1844
    for key, val in override.iteritems():
1845
      setattr(options, key, val)
1846

    
1847
  utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1848
                     stderr_logging=True, program=binary)
1849

    
1850
  if old_cmdline:
1851
    logging.info("run with arguments '%s'", old_cmdline)
1852
  else:
1853
    logging.info("run with no arguments")
1854

    
1855
  try:
1856
    result = func(options, args)
1857
  except (errors.GenericError, luxi.ProtocolError,
1858
          JobSubmittedException), err:
1859
    result, err_msg = FormatError(err)
1860
    logging.exception("Error during command processing")
1861
    ToStderr(err_msg)
1862

    
1863
  return result
1864

    
1865

    
1866
def ParseNicOption(optvalue):
1867
  """Parses the value of the --net option(s).
1868

1869
  """
1870
  try:
1871
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1872
  except (TypeError, ValueError), err:
1873
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1874

    
1875
  nics = [{}] * nic_max
1876
  for nidx, ndict in optvalue:
1877
    nidx = int(nidx)
1878

    
1879
    if not isinstance(ndict, dict):
1880
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1881
                                 " got %s" % (nidx, ndict))
1882

    
1883
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1884

    
1885
    nics[nidx] = ndict
1886

    
1887
  return nics
1888

    
1889

    
1890
def GenericInstanceCreate(mode, opts, args):
1891
  """Add an instance to the cluster via either creation or import.
1892

1893
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1894
  @param opts: the command line options selected by the user
1895
  @type args: list
1896
  @param args: should contain only one element, the new instance name
1897
  @rtype: int
1898
  @return: the desired exit code
1899

1900
  """
1901
  instance = args[0]
1902

    
1903
  (pnode, snode) = SplitNodeOption(opts.node)
1904

    
1905
  hypervisor = None
1906
  hvparams = {}
1907
  if opts.hypervisor:
1908
    hypervisor, hvparams = opts.hypervisor
1909

    
1910
  if opts.nics:
1911
    nics = ParseNicOption(opts.nics)
1912
  elif opts.no_nics:
1913
    # no nics
1914
    nics = []
1915
  elif mode == constants.INSTANCE_CREATE:
1916
    # default of one nic, all auto
1917
    nics = [{}]
1918
  else:
1919
    # mode == import
1920
    nics = []
1921

    
1922
  if opts.disk_template == constants.DT_DISKLESS:
1923
    if opts.disks or opts.sd_size is not None:
1924
      raise errors.OpPrereqError("Diskless instance but disk"
1925
                                 " information passed")
1926
    disks = []
1927
  else:
1928
    if (not opts.disks and not opts.sd_size
1929
        and mode == constants.INSTANCE_CREATE):
1930
      raise errors.OpPrereqError("No disk information specified")
1931
    if opts.disks and opts.sd_size is not None:
1932
      raise errors.OpPrereqError("Please use either the '--disk' or"
1933
                                 " '-s' option")
1934
    if opts.sd_size is not None:
1935
      opts.disks = [(0, {"size": opts.sd_size})]
1936

    
1937
    if opts.disks:
1938
      try:
1939
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
1940
      except ValueError, err:
1941
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
1942
      disks = [{}] * disk_max
1943
    else:
1944
      disks = []
1945
    for didx, ddict in opts.disks:
1946
      didx = int(didx)
1947
      if not isinstance(ddict, dict):
1948
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
1949
        raise errors.OpPrereqError(msg)
1950
      elif "size" in ddict:
1951
        if "adopt" in ddict:
1952
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
1953
                                     " (disk %d)" % didx)
1954
        try:
1955
          ddict["size"] = utils.ParseUnit(ddict["size"])
1956
        except ValueError, err:
1957
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
1958
                                     (didx, err))
1959
      elif "adopt" in ddict:
1960
        if mode == constants.INSTANCE_IMPORT:
1961
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
1962
                                     " import")
1963
        ddict["size"] = 0
1964
      else:
1965
        raise errors.OpPrereqError("Missing size or adoption source for"
1966
                                   " disk %d" % didx)
1967
      disks[didx] = ddict
1968

    
1969
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
1970
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
1971

    
1972
  if mode == constants.INSTANCE_CREATE:
1973
    start = opts.start
1974
    os_type = opts.os
1975
    force_variant = opts.force_variant
1976
    src_node = None
1977
    src_path = None
1978
    no_install = opts.no_install
1979
    identify_defaults = False
1980
  elif mode == constants.INSTANCE_IMPORT:
1981
    start = False
1982
    os_type = None
1983
    force_variant = False
1984
    src_node = opts.src_node
1985
    src_path = opts.src_dir
1986
    no_install = None
1987
    identify_defaults = opts.identify_defaults
1988
  else:
1989
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
1990

    
1991
  op = opcodes.OpCreateInstance(instance_name=instance,
1992
                                disks=disks,
1993
                                disk_template=opts.disk_template,
1994
                                nics=nics,
1995
                                pnode=pnode, snode=snode,
1996
                                ip_check=opts.ip_check,
1997
                                name_check=opts.name_check,
1998
                                wait_for_sync=opts.wait_for_sync,
1999
                                file_storage_dir=opts.file_storage_dir,
2000
                                file_driver=opts.file_driver,
2001
                                iallocator=opts.iallocator,
2002
                                hypervisor=hypervisor,
2003
                                hvparams=hvparams,
2004
                                beparams=opts.beparams,
2005
                                osparams=opts.osparams,
2006
                                mode=mode,
2007
                                start=start,
2008
                                os_type=os_type,
2009
                                force_variant=force_variant,
2010
                                src_node=src_node,
2011
                                src_path=src_path,
2012
                                no_install=no_install,
2013
                                identify_defaults=identify_defaults)
2014

    
2015
  SubmitOrSend(op, opts)
2016
  return 0
2017

    
2018

    
2019
class _RunWhileClusterStoppedHelper:
2020
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2021

2022
  """
2023
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2024
    """Initializes this class.
2025

2026
    @type feedback_fn: callable
2027
    @param feedback_fn: Feedback function
2028
    @type cluster_name: string
2029
    @param cluster_name: Cluster name
2030
    @type master_node: string
2031
    @param master_node Master node name
2032
    @type online_nodes: list
2033
    @param online_nodes: List of names of online nodes
2034

2035
    """
2036
    self.feedback_fn = feedback_fn
2037
    self.cluster_name = cluster_name
2038
    self.master_node = master_node
2039
    self.online_nodes = online_nodes
2040

    
2041
    self.ssh = ssh.SshRunner(self.cluster_name)
2042

    
2043
    self.nonmaster_nodes = [name for name in online_nodes
2044
                            if name != master_node]
2045

    
2046
    assert self.master_node not in self.nonmaster_nodes
2047

    
2048
  def _RunCmd(self, node_name, cmd):
2049
    """Runs a command on the local or a remote machine.
2050

2051
    @type node_name: string
2052
    @param node_name: Machine name
2053
    @type cmd: list
2054
    @param cmd: Command
2055

2056
    """
2057
    if node_name is None or node_name == self.master_node:
2058
      # No need to use SSH
2059
      result = utils.RunCmd(cmd)
2060
    else:
2061
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2062

    
2063
    if result.failed:
2064
      errmsg = ["Failed to run command %s" % result.cmd]
2065
      if node_name:
2066
        errmsg.append("on node %s" % node_name)
2067
      errmsg.append(": exitcode %s and error %s" %
2068
                    (result.exit_code, result.output))
2069
      raise errors.OpExecError(" ".join(errmsg))
2070

    
2071
  def Call(self, fn, *args):
2072
    """Call function while all daemons are stopped.
2073

2074
    @type fn: callable
2075
    @param fn: Function to be called
2076

2077
    """
2078
    # Pause watcher by acquiring an exclusive lock on watcher state file
2079
    self.feedback_fn("Blocking watcher")
2080
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2081
    try:
2082
      # TODO: Currently, this just blocks. There's no timeout.
2083
      # TODO: Should it be a shared lock?
2084
      watcher_block.Exclusive(blocking=True)
2085

    
2086
      # Stop master daemons, so that no new jobs can come in and all running
2087
      # ones are finished
2088
      self.feedback_fn("Stopping master daemons")
2089
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2090
      try:
2091
        # Stop daemons on all nodes
2092
        for node_name in self.online_nodes:
2093
          self.feedback_fn("Stopping daemons on %s" % node_name)
2094
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2095

    
2096
        # All daemons are shut down now
2097
        try:
2098
          return fn(self, *args)
2099
        except Exception, err:
2100
          _, errmsg = FormatError(err)
2101
          logging.exception("Caught exception")
2102
          self.feedback_fn(errmsg)
2103
          raise
2104
      finally:
2105
        # Start cluster again, master node last
2106
        for node_name in self.nonmaster_nodes + [self.master_node]:
2107
          self.feedback_fn("Starting daemons on %s" % node_name)
2108
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2109
    finally:
2110
      # Resume watcher
2111
      watcher_block.Close()
2112

    
2113

    
2114
def RunWhileClusterStopped(feedback_fn, fn, *args):
2115
  """Calls a function while all cluster daemons are stopped.
2116

2117
  @type feedback_fn: callable
2118
  @param feedback_fn: Feedback function
2119
  @type fn: callable
2120
  @param fn: Function to be called when daemons are stopped
2121

2122
  """
2123
  feedback_fn("Gathering cluster information")
2124

    
2125
  # This ensures we're running on the master daemon
2126
  cl = GetClient()
2127

    
2128
  (cluster_name, master_node) = \
2129
    cl.QueryConfigValues(["cluster_name", "master_node"])
2130

    
2131
  online_nodes = GetOnlineNodes([], cl=cl)
2132

    
2133
  # Don't keep a reference to the client. The master daemon will go away.
2134
  del cl
2135

    
2136
  assert master_node in online_nodes
2137

    
2138
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2139
                                       online_nodes).Call(fn, *args)
2140

    
2141

    
2142
def GenerateTable(headers, fields, separator, data,
2143
                  numfields=None, unitfields=None,
2144
                  units=None):
2145
  """Prints a table with headers and different fields.
2146

2147
  @type headers: dict
2148
  @param headers: dictionary mapping field names to headers for
2149
      the table
2150
  @type fields: list
2151
  @param fields: the field names corresponding to each row in
2152
      the data field
2153
  @param separator: the separator to be used; if this is None,
2154
      the default 'smart' algorithm is used which computes optimal
2155
      field width, otherwise just the separator is used between
2156
      each field
2157
  @type data: list
2158
  @param data: a list of lists, each sublist being one row to be output
2159
  @type numfields: list
2160
  @param numfields: a list with the fields that hold numeric
2161
      values and thus should be right-aligned
2162
  @type unitfields: list
2163
  @param unitfields: a list with the fields that hold numeric
2164
      values that should be formatted with the units field
2165
  @type units: string or None
2166
  @param units: the units we should use for formatting, or None for
2167
      automatic choice (human-readable for non-separator usage, otherwise
2168
      megabytes); this is a one-letter string
2169

2170
  """
2171
  if units is None:
2172
    if separator:
2173
      units = "m"
2174
    else:
2175
      units = "h"
2176

    
2177
  if numfields is None:
2178
    numfields = []
2179
  if unitfields is None:
2180
    unitfields = []
2181

    
2182
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2183
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2184

    
2185
  format_fields = []
2186
  for field in fields:
2187
    if headers and field not in headers:
2188
      # TODO: handle better unknown fields (either revert to old
2189
      # style of raising exception, or deal more intelligently with
2190
      # variable fields)
2191
      headers[field] = field
2192
    if separator is not None:
2193
      format_fields.append("%s")
2194
    elif numfields.Matches(field):
2195
      format_fields.append("%*s")
2196
    else:
2197
      format_fields.append("%-*s")
2198

    
2199
  if separator is None:
2200
    mlens = [0 for name in fields]
2201
    format_str = ' '.join(format_fields)
2202
  else:
2203
    format_str = separator.replace("%", "%%").join(format_fields)
2204

    
2205
  for row in data:
2206
    if row is None:
2207
      continue
2208
    for idx, val in enumerate(row):
2209
      if unitfields.Matches(fields[idx]):
2210
        try:
2211
          val = int(val)
2212
        except (TypeError, ValueError):
2213
          pass
2214
        else:
2215
          val = row[idx] = utils.FormatUnit(val, units)
2216
      val = row[idx] = str(val)
2217
      if separator is None:
2218
        mlens[idx] = max(mlens[idx], len(val))
2219

    
2220
  result = []
2221
  if headers:
2222
    args = []
2223
    for idx, name in enumerate(fields):
2224
      hdr = headers[name]
2225
      if separator is None:
2226
        mlens[idx] = max(mlens[idx], len(hdr))
2227
        args.append(mlens[idx])
2228
      args.append(hdr)
2229
    result.append(format_str % tuple(args))
2230

    
2231
  if separator is None:
2232
    assert len(mlens) == len(fields)
2233

    
2234
    if fields and not numfields.Matches(fields[-1]):
2235
      mlens[-1] = 0
2236

    
2237
  for line in data:
2238
    args = []
2239
    if line is None:
2240
      line = ['-' for _ in fields]
2241
    for idx in range(len(fields)):
2242
      if separator is None:
2243
        args.append(mlens[idx])
2244
      args.append(line[idx])
2245
    result.append(format_str % tuple(args))
2246

    
2247
  return result
2248

    
2249

    
2250
def FormatTimestamp(ts):
2251
  """Formats a given timestamp.
2252

2253
  @type ts: timestamp
2254
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2255

2256
  @rtype: string
2257
  @return: a string with the formatted timestamp
2258

2259
  """
2260
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2261
    return '?'
2262
  sec, usec = ts
2263
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2264

    
2265

    
2266
def ParseTimespec(value):
2267
  """Parse a time specification.
2268

2269
  The following suffixed will be recognized:
2270

2271
    - s: seconds
2272
    - m: minutes
2273
    - h: hours
2274
    - d: day
2275
    - w: weeks
2276

2277
  Without any suffix, the value will be taken to be in seconds.
2278

2279
  """
2280
  value = str(value)
2281
  if not value:
2282
    raise errors.OpPrereqError("Empty time specification passed")
2283
  suffix_map = {
2284
    's': 1,
2285
    'm': 60,
2286
    'h': 3600,
2287
    'd': 86400,
2288
    'w': 604800,
2289
    }
2290
  if value[-1] not in suffix_map:
2291
    try:
2292
      value = int(value)
2293
    except (TypeError, ValueError):
2294
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2295
  else:
2296
    multiplier = suffix_map[value[-1]]
2297
    value = value[:-1]
2298
    if not value: # no data left after stripping the suffix
2299
      raise errors.OpPrereqError("Invalid time specification (only"
2300
                                 " suffix passed)")
2301
    try:
2302
      value = int(value) * multiplier
2303
    except (TypeError, ValueError):
2304
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2305
  return value
2306

    
2307

    
2308
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2309
                   filter_master=False):
2310
  """Returns the names of online nodes.
2311

2312
  This function will also log a warning on stderr with the names of
2313
  the online nodes.
2314

2315
  @param nodes: if not empty, use only this subset of nodes (minus the
2316
      offline ones)
2317
  @param cl: if not None, luxi client to use
2318
  @type nowarn: boolean
2319
  @param nowarn: by default, this function will output a note with the
2320
      offline nodes that are skipped; if this parameter is True the
2321
      note is not displayed
2322
  @type secondary_ips: boolean
2323
  @param secondary_ips: if True, return the secondary IPs instead of the
2324
      names, useful for doing network traffic over the replication interface
2325
      (if any)
2326
  @type filter_master: boolean
2327
  @param filter_master: if True, do not return the master node in the list
2328
      (useful in coordination with secondary_ips where we cannot check our
2329
      node name against the list)
2330

2331
  """
2332
  if cl is None:
2333
    cl = GetClient()
2334

    
2335
  if secondary_ips:
2336
    name_idx = 2
2337
  else:
2338
    name_idx = 0
2339

    
2340
  if filter_master:
2341
    master_node = cl.QueryConfigValues(["master_node"])[0]
2342
    filter_fn = lambda x: x != master_node
2343
  else:
2344
    filter_fn = lambda _: True
2345

    
2346
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2347
                         use_locking=False)
2348
  offline = [row[0] for row in result if row[1]]
2349
  if offline and not nowarn:
2350
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2351
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2352

    
2353

    
2354
def _ToStream(stream, txt, *args):
2355
  """Write a message to a stream, bypassing the logging system
2356

2357
  @type stream: file object
2358
  @param stream: the file to which we should write
2359
  @type txt: str
2360
  @param txt: the message
2361

2362
  """
2363
  if args:
2364
    args = tuple(args)
2365
    stream.write(txt % args)
2366
  else:
2367
    stream.write(txt)
2368
  stream.write('\n')
2369
  stream.flush()
2370

    
2371

    
2372
def ToStdout(txt, *args):
2373
  """Write a message to stdout only, bypassing the logging system
2374

2375
  This is just a wrapper over _ToStream.
2376

2377
  @type txt: str
2378
  @param txt: the message
2379

2380
  """
2381
  _ToStream(sys.stdout, txt, *args)
2382

    
2383

    
2384
def ToStderr(txt, *args):
2385
  """Write a message to stderr only, bypassing the logging system
2386

2387
  This is just a wrapper over _ToStream.
2388

2389
  @type txt: str
2390
  @param txt: the message
2391

2392
  """
2393
  _ToStream(sys.stderr, txt, *args)
2394

    
2395

    
2396
class JobExecutor(object):
2397
  """Class which manages the submission and execution of multiple jobs.
2398

2399
  Note that instances of this class should not be reused between
2400
  GetResults() calls.
2401

2402
  """
2403
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2404
    self.queue = []
2405
    if cl is None:
2406
      cl = GetClient()
2407
    self.cl = cl
2408
    self.verbose = verbose
2409
    self.jobs = []
2410
    self.opts = opts
2411
    self.feedback_fn = feedback_fn
2412

    
2413
  def QueueJob(self, name, *ops):
2414
    """Record a job for later submit.
2415

2416
    @type name: string
2417
    @param name: a description of the job, will be used in WaitJobSet
2418
    """
2419
    SetGenericOpcodeOpts(ops, self.opts)
2420
    self.queue.append((name, ops))
2421

    
2422
  def SubmitPending(self, each=False):
2423
    """Submit all pending jobs.
2424

2425
    """
2426
    if each:
2427
      results = []
2428
      for row in self.queue:
2429
        # SubmitJob will remove the success status, but raise an exception if
2430
        # the submission fails, so we'll notice that anyway.
2431
        results.append([True, self.cl.SubmitJob(row[1])])
2432
    else:
2433
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2434
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2435
                                                            self.queue)):
2436
      self.jobs.append((idx, status, data, name))
2437

    
2438
  def _ChooseJob(self):
2439
    """Choose a non-waiting/queued job to poll next.
2440

2441
    """
2442
    assert self.jobs, "_ChooseJob called with empty job list"
2443

    
2444
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2445
    assert result
2446

    
2447
    for job_data, status in zip(self.jobs, result):
2448
      if (isinstance(status, list) and status and
2449
          status[0] in (constants.JOB_STATUS_QUEUED,
2450
                        constants.JOB_STATUS_WAITLOCK,
2451
                        constants.JOB_STATUS_CANCELING)):
2452
        # job is still present and waiting
2453
        continue
2454
      # good candidate found (either running job or lost job)
2455
      self.jobs.remove(job_data)
2456
      return job_data
2457

    
2458
    # no job found
2459
    return self.jobs.pop(0)
2460

    
2461
  def GetResults(self):
2462
    """Wait for and return the results of all jobs.
2463

2464
    @rtype: list
2465
    @return: list of tuples (success, job results), in the same order
2466
        as the submitted jobs; if a job has failed, instead of the result
2467
        there will be the error message
2468

2469
    """
2470
    if not self.jobs:
2471
      self.SubmitPending()
2472
    results = []
2473
    if self.verbose:
2474
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2475
      if ok_jobs:
2476
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2477

    
2478
    # first, remove any non-submitted jobs
2479
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2480
    for idx, _, jid, name in failures:
2481
      ToStderr("Failed to submit job for %s: %s", name, jid)
2482
      results.append((idx, False, jid))
2483

    
2484
    while self.jobs:
2485
      (idx, _, jid, name) = self._ChooseJob()
2486
      ToStdout("Waiting for job %s for %s...", jid, name)
2487
      try:
2488
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2489
        success = True
2490
      except errors.JobLost, err:
2491
        _, job_result = FormatError(err)
2492
        ToStderr("Job %s for %s has been archived, cannot check its result",
2493
                 jid, name)
2494
        success = False
2495
      except (errors.GenericError, luxi.ProtocolError), err:
2496
        _, job_result = FormatError(err)
2497
        success = False
2498
        # the error message will always be shown, verbose or not
2499
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2500

    
2501
      results.append((idx, success, job_result))
2502

    
2503
    # sort based on the index, then drop it
2504
    results.sort()
2505
    results = [i[1:] for i in results]
2506

    
2507
    return results
2508

    
2509
  def WaitOrShow(self, wait):
2510
    """Wait for job results or only print the job IDs.
2511

2512
    @type wait: boolean
2513
    @param wait: whether to wait or not
2514

2515
    """
2516
    if wait:
2517
      return self.GetResults()
2518
    else:
2519
      if not self.jobs:
2520
        self.SubmitPending()
2521
      for _, status, result, name in self.jobs:
2522
        if status:
2523
          ToStdout("%s: %s", result, name)
2524
        else:
2525
          ToStderr("Failure for %s: %s", name, result)
2526
      return [row[1:3] for row in self.jobs]