Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ cd4c86a8

History | View | Annotate | Download (82.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42

    
43
from optparse import (OptionParser, TitledHelpFormatter,
44
                      Option, OptionValueError)
45

    
46

    
47
__all__ = [
48
  # Command line options
49
  "ADD_UIDS_OPT",
50
  "ALLOCATABLE_OPT",
51
  "ALL_OPT",
52
  "AUTO_PROMOTE_OPT",
53
  "AUTO_REPLACE_OPT",
54
  "BACKEND_OPT",
55
  "BLK_OS_OPT",
56
  "CAPAB_MASTER_OPT",
57
  "CAPAB_VM_OPT",
58
  "CLEANUP_OPT",
59
  "CLUSTER_DOMAIN_SECRET_OPT",
60
  "CONFIRM_OPT",
61
  "CP_SIZE_OPT",
62
  "DEBUG_OPT",
63
  "DEBUG_SIMERR_OPT",
64
  "DISKIDX_OPT",
65
  "DISK_OPT",
66
  "DISK_TEMPLATE_OPT",
67
  "DRAINED_OPT",
68
  "DRY_RUN_OPT",
69
  "DRBD_HELPER_OPT",
70
  "EARLY_RELEASE_OPT",
71
  "ENABLED_HV_OPT",
72
  "ERROR_CODES_OPT",
73
  "FIELDS_OPT",
74
  "FILESTORE_DIR_OPT",
75
  "FILESTORE_DRIVER_OPT",
76
  "FORCE_OPT",
77
  "FORCE_VARIANT_OPT",
78
  "GLOBAL_FILEDIR_OPT",
79
  "HID_OS_OPT",
80
  "HVLIST_OPT",
81
  "HVOPTS_OPT",
82
  "HYPERVISOR_OPT",
83
  "IALLOCATOR_OPT",
84
  "DEFAULT_IALLOCATOR_OPT",
85
  "IDENTIFY_DEFAULTS_OPT",
86
  "IGNORE_CONSIST_OPT",
87
  "IGNORE_FAILURES_OPT",
88
  "IGNORE_OFFLINE_OPT",
89
  "IGNORE_REMOVE_FAILURES_OPT",
90
  "IGNORE_SECONDARIES_OPT",
91
  "IGNORE_SIZE_OPT",
92
  "INTERVAL_OPT",
93
  "MAC_PREFIX_OPT",
94
  "MAINTAIN_NODE_HEALTH_OPT",
95
  "MASTER_NETDEV_OPT",
96
  "MC_OPT",
97
  "MIGRATION_MODE_OPT",
98
  "NET_OPT",
99
  "NEW_CLUSTER_CERT_OPT",
100
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
101
  "NEW_CONFD_HMAC_KEY_OPT",
102
  "NEW_RAPI_CERT_OPT",
103
  "NEW_SECONDARY_OPT",
104
  "NIC_PARAMS_OPT",
105
  "NODE_LIST_OPT",
106
  "NODE_PLACEMENT_OPT",
107
  "NODEGROUP_OPT",
108
  "NODRBD_STORAGE_OPT",
109
  "NOHDR_OPT",
110
  "NOIPCHECK_OPT",
111
  "NO_INSTALL_OPT",
112
  "NONAMECHECK_OPT",
113
  "NOLVM_STORAGE_OPT",
114
  "NOMODIFY_ETCHOSTS_OPT",
115
  "NOMODIFY_SSH_SETUP_OPT",
116
  "NONICS_OPT",
117
  "NONLIVE_OPT",
118
  "NONPLUS1_OPT",
119
  "NOSHUTDOWN_OPT",
120
  "NOSTART_OPT",
121
  "NOSSH_KEYCHECK_OPT",
122
  "NOVOTING_OPT",
123
  "NWSYNC_OPT",
124
  "ON_PRIMARY_OPT",
125
  "ON_SECONDARY_OPT",
126
  "OFFLINE_OPT",
127
  "OSPARAMS_OPT",
128
  "OS_OPT",
129
  "OS_SIZE_OPT",
130
  "PREALLOC_WIPE_DISKS_OPT",
131
  "PRIMARY_IP_VERSION_OPT",
132
  "PRIORITY_OPT",
133
  "RAPI_CERT_OPT",
134
  "READD_OPT",
135
  "REBOOT_TYPE_OPT",
136
  "REMOVE_INSTANCE_OPT",
137
  "REMOVE_UIDS_OPT",
138
  "RESERVED_LVS_OPT",
139
  "ROMAN_OPT",
140
  "SECONDARY_IP_OPT",
141
  "SELECT_OS_OPT",
142
  "SEP_OPT",
143
  "SHOWCMD_OPT",
144
  "SHUTDOWN_TIMEOUT_OPT",
145
  "SINGLE_NODE_OPT",
146
  "SRC_DIR_OPT",
147
  "SRC_NODE_OPT",
148
  "SUBMIT_OPT",
149
  "STATIC_OPT",
150
  "SYNC_OPT",
151
  "TAG_SRC_OPT",
152
  "TIMEOUT_OPT",
153
  "UIDPOOL_OPT",
154
  "USEUNITS_OPT",
155
  "USE_REPL_NET_OPT",
156
  "VERBOSE_OPT",
157
  "VG_NAME_OPT",
158
  "YES_DOIT_OPT",
159
  # Generic functions for CLI programs
160
  "GenericMain",
161
  "GenericInstanceCreate",
162
  "GetClient",
163
  "GetOnlineNodes",
164
  "JobExecutor",
165
  "JobSubmittedException",
166
  "ParseTimespec",
167
  "RunWhileClusterStopped",
168
  "SubmitOpCode",
169
  "SubmitOrSend",
170
  "UsesRPC",
171
  # Formatting functions
172
  "ToStderr", "ToStdout",
173
  "FormatError",
174
  "GenerateTable",
175
  "AskUser",
176
  "FormatTimestamp",
177
  "FormatLogMessage",
178
  # Tags functions
179
  "ListTags",
180
  "AddTags",
181
  "RemoveTags",
182
  # command line options support infrastructure
183
  "ARGS_MANY_INSTANCES",
184
  "ARGS_MANY_NODES",
185
  "ARGS_NONE",
186
  "ARGS_ONE_INSTANCE",
187
  "ARGS_ONE_NODE",
188
  "ARGS_ONE_OS",
189
  "ArgChoice",
190
  "ArgCommand",
191
  "ArgFile",
192
  "ArgHost",
193
  "ArgInstance",
194
  "ArgJobId",
195
  "ArgNode",
196
  "ArgOs",
197
  "ArgSuggest",
198
  "ArgUnknown",
199
  "OPT_COMPL_INST_ADD_NODES",
200
  "OPT_COMPL_MANY_NODES",
201
  "OPT_COMPL_ONE_IALLOCATOR",
202
  "OPT_COMPL_ONE_INSTANCE",
203
  "OPT_COMPL_ONE_NODE",
204
  "OPT_COMPL_ONE_NODEGROUP",
205
  "OPT_COMPL_ONE_OS",
206
  "cli_option",
207
  "SplitNodeOption",
208
  "CalculateOSNames",
209
  "ParseFields",
210
  "COMMON_CREATE_OPTS",
211
  ]
212

    
213
NO_PREFIX = "no_"
214
UN_PREFIX = "-"
215

    
216
#: Priorities (sorted)
217
_PRIORITY_NAMES = [
218
  ("low", constants.OP_PRIO_LOW),
219
  ("normal", constants.OP_PRIO_NORMAL),
220
  ("high", constants.OP_PRIO_HIGH),
221
  ]
222

    
223
#: Priority dictionary for easier lookup
224
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
225
# we migrate to Python 2.6
226
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
227

    
228

    
229
class _Argument:
230
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
231
    self.min = min
232
    self.max = max
233

    
234
  def __repr__(self):
235
    return ("<%s min=%s max=%s>" %
236
            (self.__class__.__name__, self.min, self.max))
237

    
238

    
239
class ArgSuggest(_Argument):
240
  """Suggesting argument.
241

242
  Value can be any of the ones passed to the constructor.
243

244
  """
245
  # pylint: disable-msg=W0622
246
  def __init__(self, min=0, max=None, choices=None):
247
    _Argument.__init__(self, min=min, max=max)
248
    self.choices = choices
249

    
250
  def __repr__(self):
251
    return ("<%s min=%s max=%s choices=%r>" %
252
            (self.__class__.__name__, self.min, self.max, self.choices))
253

    
254

    
255
class ArgChoice(ArgSuggest):
256
  """Choice argument.
257

258
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
259
  but value must be one of the choices.
260

261
  """
262

    
263

    
264
class ArgUnknown(_Argument):
265
  """Unknown argument to program (e.g. determined at runtime).
266

267
  """
268

    
269

    
270
class ArgInstance(_Argument):
271
  """Instances argument.
272

273
  """
274

    
275

    
276
class ArgNode(_Argument):
277
  """Node argument.
278

279
  """
280

    
281
class ArgJobId(_Argument):
282
  """Job ID argument.
283

284
  """
285

    
286

    
287
class ArgFile(_Argument):
288
  """File path argument.
289

290
  """
291

    
292

    
293
class ArgCommand(_Argument):
294
  """Command argument.
295

296
  """
297

    
298

    
299
class ArgHost(_Argument):
300
  """Host argument.
301

302
  """
303

    
304

    
305
class ArgOs(_Argument):
306
  """OS argument.
307

308
  """
309

    
310

    
311
ARGS_NONE = []
312
ARGS_MANY_INSTANCES = [ArgInstance()]
313
ARGS_MANY_NODES = [ArgNode()]
314
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
315
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
316
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
317

    
318

    
319
def _ExtractTagsObject(opts, args):
320
  """Extract the tag type object.
321

322
  Note that this function will modify its args parameter.
323

324
  """
325
  if not hasattr(opts, "tag_type"):
326
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
327
  kind = opts.tag_type
328
  if kind == constants.TAG_CLUSTER:
329
    retval = kind, kind
330
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
331
    if not args:
332
      raise errors.OpPrereqError("no arguments passed to the command")
333
    name = args.pop(0)
334
    retval = kind, name
335
  else:
336
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
337
  return retval
338

    
339

    
340
def _ExtendTags(opts, args):
341
  """Extend the args if a source file has been given.
342

343
  This function will extend the tags with the contents of the file
344
  passed in the 'tags_source' attribute of the opts parameter. A file
345
  named '-' will be replaced by stdin.
346

347
  """
348
  fname = opts.tags_source
349
  if fname is None:
350
    return
351
  if fname == "-":
352
    new_fh = sys.stdin
353
  else:
354
    new_fh = open(fname, "r")
355
  new_data = []
356
  try:
357
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
358
    # because of python bug 1633941
359
    while True:
360
      line = new_fh.readline()
361
      if not line:
362
        break
363
      new_data.append(line.strip())
364
  finally:
365
    new_fh.close()
366
  args.extend(new_data)
367

    
368

    
369
def ListTags(opts, args):
370
  """List the tags on a given object.
371

372
  This is a generic implementation that knows how to deal with all
373
  three cases of tag objects (cluster, node, instance). The opts
374
  argument is expected to contain a tag_type field denoting what
375
  object type we work on.
376

377
  """
378
  kind, name = _ExtractTagsObject(opts, args)
379
  cl = GetClient()
380
  result = cl.QueryTags(kind, name)
381
  result = list(result)
382
  result.sort()
383
  for tag in result:
384
    ToStdout(tag)
385

    
386

    
387
def AddTags(opts, args):
388
  """Add tags on a given object.
389

390
  This is a generic implementation that knows how to deal with all
391
  three cases of tag objects (cluster, node, instance). The opts
392
  argument is expected to contain a tag_type field denoting what
393
  object type we work on.
394

395
  """
396
  kind, name = _ExtractTagsObject(opts, args)
397
  _ExtendTags(opts, args)
398
  if not args:
399
    raise errors.OpPrereqError("No tags to be added")
400
  op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
401
  SubmitOpCode(op, opts=opts)
402

    
403

    
404
def RemoveTags(opts, args):
405
  """Remove tags from a given object.
406

407
  This is a generic implementation that knows how to deal with all
408
  three cases of tag objects (cluster, node, instance). The opts
409
  argument is expected to contain a tag_type field denoting what
410
  object type we work on.
411

412
  """
413
  kind, name = _ExtractTagsObject(opts, args)
414
  _ExtendTags(opts, args)
415
  if not args:
416
    raise errors.OpPrereqError("No tags to be removed")
417
  op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
418
  SubmitOpCode(op, opts=opts)
419

    
420

    
421
def check_unit(option, opt, value): # pylint: disable-msg=W0613
422
  """OptParsers custom converter for units.
423

424
  """
425
  try:
426
    return utils.ParseUnit(value)
427
  except errors.UnitParseError, err:
428
    raise OptionValueError("option %s: %s" % (opt, err))
429

    
430

    
431
def _SplitKeyVal(opt, data):
432
  """Convert a KeyVal string into a dict.
433

434
  This function will convert a key=val[,...] string into a dict. Empty
435
  values will be converted specially: keys which have the prefix 'no_'
436
  will have the value=False and the prefix stripped, the others will
437
  have value=True.
438

439
  @type opt: string
440
  @param opt: a string holding the option name for which we process the
441
      data, used in building error messages
442
  @type data: string
443
  @param data: a string of the format key=val,key=val,...
444
  @rtype: dict
445
  @return: {key=val, key=val}
446
  @raises errors.ParameterError: if there are duplicate keys
447

448
  """
449
  kv_dict = {}
450
  if data:
451
    for elem in utils.UnescapeAndSplit(data, sep=","):
452
      if "=" in elem:
453
        key, val = elem.split("=", 1)
454
      else:
455
        if elem.startswith(NO_PREFIX):
456
          key, val = elem[len(NO_PREFIX):], False
457
        elif elem.startswith(UN_PREFIX):
458
          key, val = elem[len(UN_PREFIX):], None
459
        else:
460
          key, val = elem, True
461
      if key in kv_dict:
462
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
463
                                    (key, opt))
464
      kv_dict[key] = val
465
  return kv_dict
466

    
467

    
468
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
469
  """Custom parser for ident:key=val,key=val options.
470

471
  This will store the parsed values as a tuple (ident, {key: val}). As such,
472
  multiple uses of this option via action=append is possible.
473

474
  """
475
  if ":" not in value:
476
    ident, rest = value, ''
477
  else:
478
    ident, rest = value.split(":", 1)
479

    
480
  if ident.startswith(NO_PREFIX):
481
    if rest:
482
      msg = "Cannot pass options when removing parameter groups: %s" % value
483
      raise errors.ParameterError(msg)
484
    retval = (ident[len(NO_PREFIX):], False)
485
  elif ident.startswith(UN_PREFIX):
486
    if rest:
487
      msg = "Cannot pass options when removing parameter groups: %s" % value
488
      raise errors.ParameterError(msg)
489
    retval = (ident[len(UN_PREFIX):], None)
490
  else:
491
    kv_dict = _SplitKeyVal(opt, rest)
492
    retval = (ident, kv_dict)
493
  return retval
494

    
495

    
496
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
497
  """Custom parser class for key=val,key=val options.
498

499
  This will store the parsed values as a dict {key: val}.
500

501
  """
502
  return _SplitKeyVal(opt, value)
503

    
504

    
505
def check_bool(option, opt, value): # pylint: disable-msg=W0613
506
  """Custom parser for yes/no options.
507

508
  This will store the parsed value as either True or False.
509

510
  """
511
  value = value.lower()
512
  if value == constants.VALUE_FALSE or value == "no":
513
    return False
514
  elif value == constants.VALUE_TRUE or value == "yes":
515
    return True
516
  else:
517
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
518

    
519

    
520
# completion_suggestion is normally a list. Using numeric values not evaluating
521
# to False for dynamic completion.
522
(OPT_COMPL_MANY_NODES,
523
 OPT_COMPL_ONE_NODE,
524
 OPT_COMPL_ONE_INSTANCE,
525
 OPT_COMPL_ONE_OS,
526
 OPT_COMPL_ONE_IALLOCATOR,
527
 OPT_COMPL_INST_ADD_NODES,
528
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
529

    
530
OPT_COMPL_ALL = frozenset([
531
  OPT_COMPL_MANY_NODES,
532
  OPT_COMPL_ONE_NODE,
533
  OPT_COMPL_ONE_INSTANCE,
534
  OPT_COMPL_ONE_OS,
535
  OPT_COMPL_ONE_IALLOCATOR,
536
  OPT_COMPL_INST_ADD_NODES,
537
  OPT_COMPL_ONE_NODEGROUP,
538
  ])
539

    
540

    
541
class CliOption(Option):
542
  """Custom option class for optparse.
543

544
  """
545
  ATTRS = Option.ATTRS + [
546
    "completion_suggest",
547
    ]
548
  TYPES = Option.TYPES + (
549
    "identkeyval",
550
    "keyval",
551
    "unit",
552
    "bool",
553
    )
554
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
555
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
556
  TYPE_CHECKER["keyval"] = check_key_val
557
  TYPE_CHECKER["unit"] = check_unit
558
  TYPE_CHECKER["bool"] = check_bool
559

    
560

    
561
# optparse.py sets make_option, so we do it for our own option class, too
562
cli_option = CliOption
563

    
564

    
565
_YORNO = "yes|no"
566

    
567
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
568
                       help="Increase debugging level")
569

    
570
NOHDR_OPT = cli_option("--no-headers", default=False,
571
                       action="store_true", dest="no_headers",
572
                       help="Don't display column headers")
573

    
574
SEP_OPT = cli_option("--separator", default=None,
575
                     action="store", dest="separator",
576
                     help=("Separator between output fields"
577
                           " (defaults to one space)"))
578

    
579
USEUNITS_OPT = cli_option("--units", default=None,
580
                          dest="units", choices=('h', 'm', 'g', 't'),
581
                          help="Specify units for output (one of hmgt)")
582

    
583
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
584
                        type="string", metavar="FIELDS",
585
                        help="Comma separated list of output fields")
586

    
587
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
588
                       default=False, help="Force the operation")
589

    
590
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
591
                         default=False, help="Do not require confirmation")
592

    
593
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
594
                                  action="store_true", default=False,
595
                                  help=("Ignore offline nodes and do as much"
596
                                        " as possible"))
597

    
598
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
599
                         default=None, help="File with tag names")
600

    
601
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
602
                        default=False, action="store_true",
603
                        help=("Submit the job and return the job ID, but"
604
                              " don't wait for the job to finish"))
605

    
606
SYNC_OPT = cli_option("--sync", dest="do_locking",
607
                      default=False, action="store_true",
608
                      help=("Grab locks while doing the queries"
609
                            " in order to ensure more consistent results"))
610

    
611
DRY_RUN_OPT = cli_option("--dry-run", default=False,
612
                         action="store_true",
613
                         help=("Do not execute the operation, just run the"
614
                               " check steps and verify it it could be"
615
                               " executed"))
616

    
617
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
618
                         action="store_true",
619
                         help="Increase the verbosity of the operation")
620

    
621
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
622
                              action="store_true", dest="simulate_errors",
623
                              help="Debugging option that makes the operation"
624
                              " treat most runtime checks as failed")
625

    
626
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
627
                        default=True, action="store_false",
628
                        help="Don't wait for sync (DANGEROUS!)")
629

    
630
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
631
                               help="Custom disk setup (diskless, file,"
632
                               " plain or drbd)",
633
                               default=None, metavar="TEMPL",
634
                               choices=list(constants.DISK_TEMPLATES))
635

    
636
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
637
                        help="Do not create any network cards for"
638
                        " the instance")
639

    
640
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
641
                               help="Relative path under default cluster-wide"
642
                               " file storage dir to store file-based disks",
643
                               default=None, metavar="<DIR>")
644

    
645
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
646
                                  help="Driver to use for image files",
647
                                  default="loop", metavar="<DRIVER>",
648
                                  choices=list(constants.FILE_DRIVER))
649

    
650
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
651
                            help="Select nodes for the instance automatically"
652
                            " using the <NAME> iallocator plugin",
653
                            default=None, type="string",
654
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
655

    
656
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
657
                            metavar="<NAME>",
658
                            help="Set the default instance allocator plugin",
659
                            default=None, type="string",
660
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
661

    
662
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
663
                    metavar="<os>",
664
                    completion_suggest=OPT_COMPL_ONE_OS)
665

    
666
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
667
                         type="keyval", default={},
668
                         help="OS parameters")
669

    
670
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
671
                               action="store_true", default=False,
672
                               help="Force an unknown variant")
673

    
674
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
675
                            action="store_true", default=False,
676
                            help="Do not install the OS (will"
677
                            " enable no-start)")
678

    
679
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
680
                         type="keyval", default={},
681
                         help="Backend parameters")
682

    
683
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
684
                         default={}, dest="hvparams",
685
                         help="Hypervisor parameters")
686

    
687
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
688
                            help="Hypervisor and hypervisor options, in the"
689
                            " format hypervisor:option=value,option=value,...",
690
                            default=None, type="identkeyval")
691

    
692
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
693
                        help="Hypervisor and hypervisor options, in the"
694
                        " format hypervisor:option=value,option=value,...",
695
                        default=[], action="append", type="identkeyval")
696

    
697
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
698
                           action="store_false",
699
                           help="Don't check that the instance's IP"
700
                           " is alive")
701

    
702
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
703
                             default=True, action="store_false",
704
                             help="Don't check that the instance's name"
705
                             " is resolvable")
706

    
707
NET_OPT = cli_option("--net",
708
                     help="NIC parameters", default=[],
709
                     dest="nics", action="append", type="identkeyval")
710

    
711
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
712
                      dest="disks", action="append", type="identkeyval")
713

    
714
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
715
                         help="Comma-separated list of disks"
716
                         " indices to act on (e.g. 0,2) (optional,"
717
                         " defaults to all disks)")
718

    
719
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
720
                         help="Enforces a single-disk configuration using the"
721
                         " given disk size, in MiB unless a suffix is used",
722
                         default=None, type="unit", metavar="<size>")
723

    
724
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
725
                                dest="ignore_consistency",
726
                                action="store_true", default=False,
727
                                help="Ignore the consistency of the disks on"
728
                                " the secondary")
729

    
730
NONLIVE_OPT = cli_option("--non-live", dest="live",
731
                         default=True, action="store_false",
732
                         help="Do a non-live migration (this usually means"
733
                         " freeze the instance, save the state, transfer and"
734
                         " only then resume running on the secondary node)")
735

    
736
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
737
                                default=None,
738
                                choices=list(constants.HT_MIGRATION_MODES),
739
                                help="Override default migration mode (choose"
740
                                " either live or non-live")
741

    
742
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
743
                                help="Target node and optional secondary node",
744
                                metavar="<pnode>[:<snode>]",
745
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
746

    
747
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
748
                           action="append", metavar="<node>",
749
                           help="Use only this node (can be used multiple"
750
                           " times, if not given defaults to all nodes)",
751
                           completion_suggest=OPT_COMPL_ONE_NODE)
752

    
753
NODEGROUP_OPT = cli_option("-g", "--node-group",
754
                           dest="nodegroup",
755
                           help="Node group (name or uuid)",
756
                           metavar="<nodegroup>",
757
                           default=None, type="string",
758
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
759

    
760
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
761
                             metavar="<node>",
762
                             completion_suggest=OPT_COMPL_ONE_NODE)
763

    
764
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
765
                         action="store_false",
766
                         help="Don't start the instance after creation")
767

    
768
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
769
                         action="store_true", default=False,
770
                         help="Show command instead of executing it")
771

    
772
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
773
                         default=False, action="store_true",
774
                         help="Instead of performing the migration, try to"
775
                         " recover from a failed cleanup. This is safe"
776
                         " to run even if the instance is healthy, but it"
777
                         " will create extra replication traffic and "
778
                         " disrupt briefly the replication (like during the"
779
                         " migration")
780

    
781
STATIC_OPT = cli_option("-s", "--static", dest="static",
782
                        action="store_true", default=False,
783
                        help="Only show configuration data, not runtime data")
784

    
785
ALL_OPT = cli_option("--all", dest="show_all",
786
                     default=False, action="store_true",
787
                     help="Show info on all instances on the cluster."
788
                     " This can take a long time to run, use wisely")
789

    
790
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
791
                           action="store_true", default=False,
792
                           help="Interactive OS reinstall, lists available"
793
                           " OS templates for selection")
794

    
795
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
796
                                 action="store_true", default=False,
797
                                 help="Remove the instance from the cluster"
798
                                 " configuration even if there are failures"
799
                                 " during the removal process")
800

    
801
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
802
                                        dest="ignore_remove_failures",
803
                                        action="store_true", default=False,
804
                                        help="Remove the instance from the"
805
                                        " cluster configuration even if there"
806
                                        " are failures during the removal"
807
                                        " process")
808

    
809
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
810
                                 action="store_true", default=False,
811
                                 help="Remove the instance from the cluster")
812

    
813
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
814
                               help="Specifies the new secondary node",
815
                               metavar="NODE", default=None,
816
                               completion_suggest=OPT_COMPL_ONE_NODE)
817

    
818
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
819
                            default=False, action="store_true",
820
                            help="Replace the disk(s) on the primary"
821
                            " node (only for the drbd template)")
822

    
823
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
824
                              default=False, action="store_true",
825
                              help="Replace the disk(s) on the secondary"
826
                              " node (only for the drbd template)")
827

    
828
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
829
                              default=False, action="store_true",
830
                              help="Lock all nodes and auto-promote as needed"
831
                              " to MC status")
832

    
833
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
834
                              default=False, action="store_true",
835
                              help="Automatically replace faulty disks"
836
                              " (only for the drbd template)")
837

    
838
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
839
                             default=False, action="store_true",
840
                             help="Ignore current recorded size"
841
                             " (useful for forcing activation when"
842
                             " the recorded size is wrong)")
843

    
844
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
845
                          metavar="<node>",
846
                          completion_suggest=OPT_COMPL_ONE_NODE)
847

    
848
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
849
                         metavar="<dir>")
850

    
851
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
852
                              help="Specify the secondary ip for the node",
853
                              metavar="ADDRESS", default=None)
854

    
855
READD_OPT = cli_option("--readd", dest="readd",
856
                       default=False, action="store_true",
857
                       help="Readd old node after replacing it")
858

    
859
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
860
                                default=True, action="store_false",
861
                                help="Disable SSH key fingerprint checking")
862

    
863

    
864
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
865
                    type="bool", default=None, metavar=_YORNO,
866
                    help="Set the master_candidate flag on the node")
867

    
868
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
869
                         type="bool", default=None,
870
                         help="Set the offline flag on the node")
871

    
872
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
873
                         type="bool", default=None,
874
                         help="Set the drained flag on the node")
875

    
876
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
877
                    type="bool", default=None, metavar=_YORNO,
878
                    help="Set the master_capable flag on the node")
879

    
880
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
881
                    type="bool", default=None, metavar=_YORNO,
882
                    help="Set the vm_capable flag on the node")
883

    
884
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
885
                             type="bool", default=None, metavar=_YORNO,
886
                             help="Set the allocatable flag on a volume")
887

    
888
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
889
                               help="Disable support for lvm based instances"
890
                               " (cluster-wide)",
891
                               action="store_false", default=True)
892

    
893
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
894
                            dest="enabled_hypervisors",
895
                            help="Comma-separated list of hypervisors",
896
                            type="string", default=None)
897

    
898
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
899
                            type="keyval", default={},
900
                            help="NIC parameters")
901

    
902
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
903
                         dest="candidate_pool_size", type="int",
904
                         help="Set the candidate pool size")
905

    
906
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
907
                         help="Enables LVM and specifies the volume group"
908
                         " name (cluster-wide) for disk allocation [xenvg]",
909
                         metavar="VG", default=None)
910

    
911
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
912
                          help="Destroy cluster", action="store_true")
913

    
914
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
915
                          help="Skip node agreement check (dangerous)",
916
                          action="store_true", default=False)
917

    
918
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
919
                            help="Specify the mac prefix for the instance IP"
920
                            " addresses, in the format XX:XX:XX",
921
                            metavar="PREFIX",
922
                            default=None)
923

    
924
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
925
                               help="Specify the node interface (cluster-wide)"
926
                               " on which the master IP address will be added "
927
                               " [%s]" % constants.DEFAULT_BRIDGE,
928
                               metavar="NETDEV",
929
                               default=constants.DEFAULT_BRIDGE)
930

    
931
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
932
                                help="Specify the default directory (cluster-"
933
                                "wide) for storing the file-based disks [%s]" %
934
                                constants.DEFAULT_FILE_STORAGE_DIR,
935
                                metavar="DIR",
936
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
937

    
938
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
939
                                   help="Don't modify /etc/hosts",
940
                                   action="store_false", default=True)
941

    
942
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
943
                                    help="Don't initialize SSH keys",
944
                                    action="store_false", default=True)
945

    
946
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
947
                             help="Enable parseable error messages",
948
                             action="store_true", default=False)
949

    
950
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
951
                          help="Skip N+1 memory redundancy tests",
952
                          action="store_true", default=False)
953

    
954
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
955
                             help="Type of reboot: soft/hard/full",
956
                             default=constants.INSTANCE_REBOOT_HARD,
957
                             metavar="<REBOOT>",
958
                             choices=list(constants.REBOOT_TYPES))
959

    
960
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
961
                                    dest="ignore_secondaries",
962
                                    default=False, action="store_true",
963
                                    help="Ignore errors from secondaries")
964

    
965
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
966
                            action="store_false", default=True,
967
                            help="Don't shutdown the instance (unsafe)")
968

    
969
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
970
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
971
                         help="Maximum time to wait")
972

    
973
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
974
                         dest="shutdown_timeout", type="int",
975
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
976
                         help="Maximum time to wait for instance shutdown")
977

    
978
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
979
                          default=None,
980
                          help=("Number of seconds between repetions of the"
981
                                " command"))
982

    
983
EARLY_RELEASE_OPT = cli_option("--early-release",
984
                               dest="early_release", default=False,
985
                               action="store_true",
986
                               help="Release the locks on the secondary"
987
                               " node(s) early")
988

    
989
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
990
                                  dest="new_cluster_cert",
991
                                  default=False, action="store_true",
992
                                  help="Generate a new cluster certificate")
993

    
994
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
995
                           default=None,
996
                           help="File containing new RAPI certificate")
997

    
998
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
999
                               default=None, action="store_true",
1000
                               help=("Generate a new self-signed RAPI"
1001
                                     " certificate"))
1002

    
1003
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1004
                                    dest="new_confd_hmac_key",
1005
                                    default=False, action="store_true",
1006
                                    help=("Create a new HMAC key for %s" %
1007
                                          constants.CONFD))
1008

    
1009
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1010
                                       dest="cluster_domain_secret",
1011
                                       default=None,
1012
                                       help=("Load new new cluster domain"
1013
                                             " secret from file"))
1014

    
1015
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1016
                                           dest="new_cluster_domain_secret",
1017
                                           default=False, action="store_true",
1018
                                           help=("Create a new cluster domain"
1019
                                                 " secret"))
1020

    
1021
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1022
                              dest="use_replication_network",
1023
                              help="Whether to use the replication network"
1024
                              " for talking to the nodes",
1025
                              action="store_true", default=False)
1026

    
1027
MAINTAIN_NODE_HEALTH_OPT = \
1028
    cli_option("--maintain-node-health", dest="maintain_node_health",
1029
               metavar=_YORNO, default=None, type="bool",
1030
               help="Configure the cluster to automatically maintain node"
1031
               " health, by shutting down unknown instances, shutting down"
1032
               " unknown DRBD devices, etc.")
1033

    
1034
IDENTIFY_DEFAULTS_OPT = \
1035
    cli_option("--identify-defaults", dest="identify_defaults",
1036
               default=False, action="store_true",
1037
               help="Identify which saved instance parameters are equal to"
1038
               " the current cluster defaults and set them as such, instead"
1039
               " of marking them as overridden")
1040

    
1041
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1042
                         action="store", dest="uid_pool",
1043
                         help=("A list of user-ids or user-id"
1044
                               " ranges separated by commas"))
1045

    
1046
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1047
                          action="store", dest="add_uids",
1048
                          help=("A list of user-ids or user-id"
1049
                                " ranges separated by commas, to be"
1050
                                " added to the user-id pool"))
1051

    
1052
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1053
                             action="store", dest="remove_uids",
1054
                             help=("A list of user-ids or user-id"
1055
                                   " ranges separated by commas, to be"
1056
                                   " removed from the user-id pool"))
1057

    
1058
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1059
                             action="store", dest="reserved_lvs",
1060
                             help=("A comma-separated list of reserved"
1061
                                   " logical volumes names, that will be"
1062
                                   " ignored by cluster verify"))
1063

    
1064
ROMAN_OPT = cli_option("--roman",
1065
                       dest="roman_integers", default=False,
1066
                       action="store_true",
1067
                       help="Use roman numbers for positive integers")
1068

    
1069
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1070
                             action="store", default=None,
1071
                             help="Specifies usermode helper for DRBD")
1072

    
1073
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1074
                                action="store_false", default=True,
1075
                                help="Disable support for DRBD")
1076

    
1077
PRIMARY_IP_VERSION_OPT = \
1078
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1079
               action="store", dest="primary_ip_version",
1080
               metavar="%d|%d" % (constants.IP4_VERSION,
1081
                                  constants.IP6_VERSION),
1082
               help="Cluster-wide IP version for primary IP")
1083

    
1084
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1085
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1086
                          choices=_PRIONAME_TO_VALUE.keys(),
1087
                          help="Priority for opcode processing")
1088

    
1089
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1090
                        type="bool", default=None, metavar=_YORNO,
1091
                        help="Sets the hidden flag on the OS")
1092

    
1093
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1094
                        type="bool", default=None, metavar=_YORNO,
1095
                        help="Sets the blacklisted flag on the OS")
1096

    
1097
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1098
                                     type="bool", metavar=_YORNO,
1099
                                     dest="prealloc_wipe_disks",
1100
                                     help=("Wipe disks prior to instance"
1101
                                           " creation"))
1102

    
1103

    
1104
#: Options provided by all commands
1105
COMMON_OPTS = [DEBUG_OPT]
1106

    
1107
# common options for creating instances. add and import then add their own
1108
# specific ones.
1109
COMMON_CREATE_OPTS = [
1110
  BACKEND_OPT,
1111
  DISK_OPT,
1112
  DISK_TEMPLATE_OPT,
1113
  FILESTORE_DIR_OPT,
1114
  FILESTORE_DRIVER_OPT,
1115
  HYPERVISOR_OPT,
1116
  IALLOCATOR_OPT,
1117
  NET_OPT,
1118
  NODE_PLACEMENT_OPT,
1119
  NOIPCHECK_OPT,
1120
  NONAMECHECK_OPT,
1121
  NONICS_OPT,
1122
  NWSYNC_OPT,
1123
  OSPARAMS_OPT,
1124
  OS_SIZE_OPT,
1125
  SUBMIT_OPT,
1126
  DRY_RUN_OPT,
1127
  PRIORITY_OPT,
1128
  ]
1129

    
1130

    
1131
def _ParseArgs(argv, commands, aliases):
1132
  """Parser for the command line arguments.
1133

1134
  This function parses the arguments and returns the function which
1135
  must be executed together with its (modified) arguments.
1136

1137
  @param argv: the command line
1138
  @param commands: dictionary with special contents, see the design
1139
      doc for cmdline handling
1140
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1141

1142
  """
1143
  if len(argv) == 0:
1144
    binary = "<command>"
1145
  else:
1146
    binary = argv[0].split("/")[-1]
1147

    
1148
  if len(argv) > 1 and argv[1] == "--version":
1149
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1150
             constants.RELEASE_VERSION)
1151
    # Quit right away. That way we don't have to care about this special
1152
    # argument. optparse.py does it the same.
1153
    sys.exit(0)
1154

    
1155
  if len(argv) < 2 or not (argv[1] in commands or
1156
                           argv[1] in aliases):
1157
    # let's do a nice thing
1158
    sortedcmds = commands.keys()
1159
    sortedcmds.sort()
1160

    
1161
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1162
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1163
    ToStdout("")
1164

    
1165
    # compute the max line length for cmd + usage
1166
    mlen = max([len(" %s" % cmd) for cmd in commands])
1167
    mlen = min(60, mlen) # should not get here...
1168

    
1169
    # and format a nice command list
1170
    ToStdout("Commands:")
1171
    for cmd in sortedcmds:
1172
      cmdstr = " %s" % (cmd,)
1173
      help_text = commands[cmd][4]
1174
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1175
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1176
      for line in help_lines:
1177
        ToStdout("%-*s   %s", mlen, "", line)
1178

    
1179
    ToStdout("")
1180

    
1181
    return None, None, None
1182

    
1183
  # get command, unalias it, and look it up in commands
1184
  cmd = argv.pop(1)
1185
  if cmd in aliases:
1186
    if cmd in commands:
1187
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1188
                                   " command" % cmd)
1189

    
1190
    if aliases[cmd] not in commands:
1191
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1192
                                   " command '%s'" % (cmd, aliases[cmd]))
1193

    
1194
    cmd = aliases[cmd]
1195

    
1196
  func, args_def, parser_opts, usage, description = commands[cmd]
1197
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1198
                        description=description,
1199
                        formatter=TitledHelpFormatter(),
1200
                        usage="%%prog %s %s" % (cmd, usage))
1201
  parser.disable_interspersed_args()
1202
  options, args = parser.parse_args()
1203

    
1204
  if not _CheckArguments(cmd, args_def, args):
1205
    return None, None, None
1206

    
1207
  return func, options, args
1208

    
1209

    
1210
def _CheckArguments(cmd, args_def, args):
1211
  """Verifies the arguments using the argument definition.
1212

1213
  Algorithm:
1214

1215
    1. Abort with error if values specified by user but none expected.
1216

1217
    1. For each argument in definition
1218

1219
      1. Keep running count of minimum number of values (min_count)
1220
      1. Keep running count of maximum number of values (max_count)
1221
      1. If it has an unlimited number of values
1222

1223
        1. Abort with error if it's not the last argument in the definition
1224

1225
    1. If last argument has limited number of values
1226

1227
      1. Abort with error if number of values doesn't match or is too large
1228

1229
    1. Abort with error if user didn't pass enough values (min_count)
1230

1231
  """
1232
  if args and not args_def:
1233
    ToStderr("Error: Command %s expects no arguments", cmd)
1234
    return False
1235

    
1236
  min_count = None
1237
  max_count = None
1238
  check_max = None
1239

    
1240
  last_idx = len(args_def) - 1
1241

    
1242
  for idx, arg in enumerate(args_def):
1243
    if min_count is None:
1244
      min_count = arg.min
1245
    elif arg.min is not None:
1246
      min_count += arg.min
1247

    
1248
    if max_count is None:
1249
      max_count = arg.max
1250
    elif arg.max is not None:
1251
      max_count += arg.max
1252

    
1253
    if idx == last_idx:
1254
      check_max = (arg.max is not None)
1255

    
1256
    elif arg.max is None:
1257
      raise errors.ProgrammerError("Only the last argument can have max=None")
1258

    
1259
  if check_max:
1260
    # Command with exact number of arguments
1261
    if (min_count is not None and max_count is not None and
1262
        min_count == max_count and len(args) != min_count):
1263
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1264
      return False
1265

    
1266
    # Command with limited number of arguments
1267
    if max_count is not None and len(args) > max_count:
1268
      ToStderr("Error: Command %s expects only %d argument(s)",
1269
               cmd, max_count)
1270
      return False
1271

    
1272
  # Command with some required arguments
1273
  if min_count is not None and len(args) < min_count:
1274
    ToStderr("Error: Command %s expects at least %d argument(s)",
1275
             cmd, min_count)
1276
    return False
1277

    
1278
  return True
1279

    
1280

    
1281
def SplitNodeOption(value):
1282
  """Splits the value of a --node option.
1283

1284
  """
1285
  if value and ':' in value:
1286
    return value.split(':', 1)
1287
  else:
1288
    return (value, None)
1289

    
1290

    
1291
def CalculateOSNames(os_name, os_variants):
1292
  """Calculates all the names an OS can be called, according to its variants.
1293

1294
  @type os_name: string
1295
  @param os_name: base name of the os
1296
  @type os_variants: list or None
1297
  @param os_variants: list of supported variants
1298
  @rtype: list
1299
  @return: list of valid names
1300

1301
  """
1302
  if os_variants:
1303
    return ['%s+%s' % (os_name, v) for v in os_variants]
1304
  else:
1305
    return [os_name]
1306

    
1307

    
1308
def ParseFields(selected, default):
1309
  """Parses the values of "--field"-like options.
1310

1311
  @type selected: string or None
1312
  @param selected: User-selected options
1313
  @type default: list
1314
  @param default: Default fields
1315

1316
  """
1317
  if selected is None:
1318
    return default
1319

    
1320
  if selected.startswith("+"):
1321
    return default + selected[1:].split(",")
1322

    
1323
  return selected.split(",")
1324

    
1325

    
1326
UsesRPC = rpc.RunWithRPC
1327

    
1328

    
1329
def AskUser(text, choices=None):
1330
  """Ask the user a question.
1331

1332
  @param text: the question to ask
1333

1334
  @param choices: list with elements tuples (input_char, return_value,
1335
      description); if not given, it will default to: [('y', True,
1336
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1337
      note that the '?' char is reserved for help
1338

1339
  @return: one of the return values from the choices list; if input is
1340
      not possible (i.e. not running with a tty, we return the last
1341
      entry from the list
1342

1343
  """
1344
  if choices is None:
1345
    choices = [('y', True, 'Perform the operation'),
1346
               ('n', False, 'Do not perform the operation')]
1347
  if not choices or not isinstance(choices, list):
1348
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1349
  for entry in choices:
1350
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1351
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1352

    
1353
  answer = choices[-1][1]
1354
  new_text = []
1355
  for line in text.splitlines():
1356
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1357
  text = "\n".join(new_text)
1358
  try:
1359
    f = file("/dev/tty", "a+")
1360
  except IOError:
1361
    return answer
1362
  try:
1363
    chars = [entry[0] for entry in choices]
1364
    chars[-1] = "[%s]" % chars[-1]
1365
    chars.append('?')
1366
    maps = dict([(entry[0], entry[1]) for entry in choices])
1367
    while True:
1368
      f.write(text)
1369
      f.write('\n')
1370
      f.write("/".join(chars))
1371
      f.write(": ")
1372
      line = f.readline(2).strip().lower()
1373
      if line in maps:
1374
        answer = maps[line]
1375
        break
1376
      elif line == '?':
1377
        for entry in choices:
1378
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1379
        f.write("\n")
1380
        continue
1381
  finally:
1382
    f.close()
1383
  return answer
1384

    
1385

    
1386
class JobSubmittedException(Exception):
1387
  """Job was submitted, client should exit.
1388

1389
  This exception has one argument, the ID of the job that was
1390
  submitted. The handler should print this ID.
1391

1392
  This is not an error, just a structured way to exit from clients.
1393

1394
  """
1395

    
1396

    
1397
def SendJob(ops, cl=None):
1398
  """Function to submit an opcode without waiting for the results.
1399

1400
  @type ops: list
1401
  @param ops: list of opcodes
1402
  @type cl: luxi.Client
1403
  @param cl: the luxi client to use for communicating with the master;
1404
             if None, a new client will be created
1405

1406
  """
1407
  if cl is None:
1408
    cl = GetClient()
1409

    
1410
  job_id = cl.SubmitJob(ops)
1411

    
1412
  return job_id
1413

    
1414

    
1415
def GenericPollJob(job_id, cbs, report_cbs):
1416
  """Generic job-polling function.
1417

1418
  @type job_id: number
1419
  @param job_id: Job ID
1420
  @type cbs: Instance of L{JobPollCbBase}
1421
  @param cbs: Data callbacks
1422
  @type report_cbs: Instance of L{JobPollReportCbBase}
1423
  @param report_cbs: Reporting callbacks
1424

1425
  """
1426
  prev_job_info = None
1427
  prev_logmsg_serial = None
1428

    
1429
  status = None
1430

    
1431
  while True:
1432
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1433
                                      prev_logmsg_serial)
1434
    if not result:
1435
      # job not found, go away!
1436
      raise errors.JobLost("Job with id %s lost" % job_id)
1437

    
1438
    if result == constants.JOB_NOTCHANGED:
1439
      report_cbs.ReportNotChanged(job_id, status)
1440

    
1441
      # Wait again
1442
      continue
1443

    
1444
    # Split result, a tuple of (field values, log entries)
1445
    (job_info, log_entries) = result
1446
    (status, ) = job_info
1447

    
1448
    if log_entries:
1449
      for log_entry in log_entries:
1450
        (serial, timestamp, log_type, message) = log_entry
1451
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1452
                                    log_type, message)
1453
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1454

    
1455
    # TODO: Handle canceled and archived jobs
1456
    elif status in (constants.JOB_STATUS_SUCCESS,
1457
                    constants.JOB_STATUS_ERROR,
1458
                    constants.JOB_STATUS_CANCELING,
1459
                    constants.JOB_STATUS_CANCELED):
1460
      break
1461

    
1462
    prev_job_info = job_info
1463

    
1464
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1465
  if not jobs:
1466
    raise errors.JobLost("Job with id %s lost" % job_id)
1467

    
1468
  status, opstatus, result = jobs[0]
1469

    
1470
  if status == constants.JOB_STATUS_SUCCESS:
1471
    return result
1472

    
1473
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1474
    raise errors.OpExecError("Job was canceled")
1475

    
1476
  has_ok = False
1477
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1478
    if status == constants.OP_STATUS_SUCCESS:
1479
      has_ok = True
1480
    elif status == constants.OP_STATUS_ERROR:
1481
      errors.MaybeRaise(msg)
1482

    
1483
      if has_ok:
1484
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1485
                                 (idx, msg))
1486

    
1487
      raise errors.OpExecError(str(msg))
1488

    
1489
  # default failure mode
1490
  raise errors.OpExecError(result)
1491

    
1492

    
1493
class JobPollCbBase:
1494
  """Base class for L{GenericPollJob} callbacks.
1495

1496
  """
1497
  def __init__(self):
1498
    """Initializes this class.
1499

1500
    """
1501

    
1502
  def WaitForJobChangeOnce(self, job_id, fields,
1503
                           prev_job_info, prev_log_serial):
1504
    """Waits for changes on a job.
1505

1506
    """
1507
    raise NotImplementedError()
1508

    
1509
  def QueryJobs(self, job_ids, fields):
1510
    """Returns the selected fields for the selected job IDs.
1511

1512
    @type job_ids: list of numbers
1513
    @param job_ids: Job IDs
1514
    @type fields: list of strings
1515
    @param fields: Fields
1516

1517
    """
1518
    raise NotImplementedError()
1519

    
1520

    
1521
class JobPollReportCbBase:
1522
  """Base class for L{GenericPollJob} reporting callbacks.
1523

1524
  """
1525
  def __init__(self):
1526
    """Initializes this class.
1527

1528
    """
1529

    
1530
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1531
    """Handles a log message.
1532

1533
    """
1534
    raise NotImplementedError()
1535

    
1536
  def ReportNotChanged(self, job_id, status):
1537
    """Called for if a job hasn't changed in a while.
1538

1539
    @type job_id: number
1540
    @param job_id: Job ID
1541
    @type status: string or None
1542
    @param status: Job status if available
1543

1544
    """
1545
    raise NotImplementedError()
1546

    
1547

    
1548
class _LuxiJobPollCb(JobPollCbBase):
1549
  def __init__(self, cl):
1550
    """Initializes this class.
1551

1552
    """
1553
    JobPollCbBase.__init__(self)
1554
    self.cl = cl
1555

    
1556
  def WaitForJobChangeOnce(self, job_id, fields,
1557
                           prev_job_info, prev_log_serial):
1558
    """Waits for changes on a job.
1559

1560
    """
1561
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1562
                                        prev_job_info, prev_log_serial)
1563

    
1564
  def QueryJobs(self, job_ids, fields):
1565
    """Returns the selected fields for the selected job IDs.
1566

1567
    """
1568
    return self.cl.QueryJobs(job_ids, fields)
1569

    
1570

    
1571
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1572
  def __init__(self, feedback_fn):
1573
    """Initializes this class.
1574

1575
    """
1576
    JobPollReportCbBase.__init__(self)
1577

    
1578
    self.feedback_fn = feedback_fn
1579

    
1580
    assert callable(feedback_fn)
1581

    
1582
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1583
    """Handles a log message.
1584

1585
    """
1586
    self.feedback_fn((timestamp, log_type, log_msg))
1587

    
1588
  def ReportNotChanged(self, job_id, status):
1589
    """Called if a job hasn't changed in a while.
1590

1591
    """
1592
    # Ignore
1593

    
1594

    
1595
class StdioJobPollReportCb(JobPollReportCbBase):
1596
  def __init__(self):
1597
    """Initializes this class.
1598

1599
    """
1600
    JobPollReportCbBase.__init__(self)
1601

    
1602
    self.notified_queued = False
1603
    self.notified_waitlock = False
1604

    
1605
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1606
    """Handles a log message.
1607

1608
    """
1609
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1610
             FormatLogMessage(log_type, log_msg))
1611

    
1612
  def ReportNotChanged(self, job_id, status):
1613
    """Called if a job hasn't changed in a while.
1614

1615
    """
1616
    if status is None:
1617
      return
1618

    
1619
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1620
      ToStderr("Job %s is waiting in queue", job_id)
1621
      self.notified_queued = True
1622

    
1623
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1624
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1625
      self.notified_waitlock = True
1626

    
1627

    
1628
def FormatLogMessage(log_type, log_msg):
1629
  """Formats a job message according to its type.
1630

1631
  """
1632
  if log_type != constants.ELOG_MESSAGE:
1633
    log_msg = str(log_msg)
1634

    
1635
  return utils.SafeEncode(log_msg)
1636

    
1637

    
1638
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1639
  """Function to poll for the result of a job.
1640

1641
  @type job_id: job identified
1642
  @param job_id: the job to poll for results
1643
  @type cl: luxi.Client
1644
  @param cl: the luxi client to use for communicating with the master;
1645
             if None, a new client will be created
1646

1647
  """
1648
  if cl is None:
1649
    cl = GetClient()
1650

    
1651
  if reporter is None:
1652
    if feedback_fn:
1653
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1654
    else:
1655
      reporter = StdioJobPollReportCb()
1656
  elif feedback_fn:
1657
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1658

    
1659
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1660

    
1661

    
1662
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1663
  """Legacy function to submit an opcode.
1664

1665
  This is just a simple wrapper over the construction of the processor
1666
  instance. It should be extended to better handle feedback and
1667
  interaction functions.
1668

1669
  """
1670
  if cl is None:
1671
    cl = GetClient()
1672

    
1673
  SetGenericOpcodeOpts([op], opts)
1674

    
1675
  job_id = SendJob([op], cl=cl)
1676

    
1677
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1678
                       reporter=reporter)
1679

    
1680
  return op_results[0]
1681

    
1682

    
1683
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1684
  """Wrapper around SubmitOpCode or SendJob.
1685

1686
  This function will decide, based on the 'opts' parameter, whether to
1687
  submit and wait for the result of the opcode (and return it), or
1688
  whether to just send the job and print its identifier. It is used in
1689
  order to simplify the implementation of the '--submit' option.
1690

1691
  It will also process the opcodes if we're sending the via SendJob
1692
  (otherwise SubmitOpCode does it).
1693

1694
  """
1695
  if opts and opts.submit_only:
1696
    job = [op]
1697
    SetGenericOpcodeOpts(job, opts)
1698
    job_id = SendJob(job, cl=cl)
1699
    raise JobSubmittedException(job_id)
1700
  else:
1701
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1702

    
1703

    
1704
def SetGenericOpcodeOpts(opcode_list, options):
1705
  """Processor for generic options.
1706

1707
  This function updates the given opcodes based on generic command
1708
  line options (like debug, dry-run, etc.).
1709

1710
  @param opcode_list: list of opcodes
1711
  @param options: command line options or None
1712
  @return: None (in-place modification)
1713

1714
  """
1715
  if not options:
1716
    return
1717
  for op in opcode_list:
1718
    op.debug_level = options.debug
1719
    if hasattr(options, "dry_run"):
1720
      op.dry_run = options.dry_run
1721
    if getattr(options, "priority", None) is not None:
1722
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1723

    
1724

    
1725
def GetClient():
1726
  # TODO: Cache object?
1727
  try:
1728
    client = luxi.Client()
1729
  except luxi.NoMasterError:
1730
    ss = ssconf.SimpleStore()
1731

    
1732
    # Try to read ssconf file
1733
    try:
1734
      ss.GetMasterNode()
1735
    except errors.ConfigurationError:
1736
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1737
                                 " not part of a cluster")
1738

    
1739
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1740
    if master != myself:
1741
      raise errors.OpPrereqError("This is not the master node, please connect"
1742
                                 " to node '%s' and rerun the command" %
1743
                                 master)
1744
    raise
1745
  return client
1746

    
1747

    
1748
def FormatError(err):
1749
  """Return a formatted error message for a given error.
1750

1751
  This function takes an exception instance and returns a tuple
1752
  consisting of two values: first, the recommended exit code, and
1753
  second, a string describing the error message (not
1754
  newline-terminated).
1755

1756
  """
1757
  retcode = 1
1758
  obuf = StringIO()
1759
  msg = str(err)
1760
  if isinstance(err, errors.ConfigurationError):
1761
    txt = "Corrupt configuration file: %s" % msg
1762
    logging.error(txt)
1763
    obuf.write(txt + "\n")
1764
    obuf.write("Aborting.")
1765
    retcode = 2
1766
  elif isinstance(err, errors.HooksAbort):
1767
    obuf.write("Failure: hooks execution failed:\n")
1768
    for node, script, out in err.args[0]:
1769
      if out:
1770
        obuf.write("  node: %s, script: %s, output: %s\n" %
1771
                   (node, script, out))
1772
      else:
1773
        obuf.write("  node: %s, script: %s (no output)\n" %
1774
                   (node, script))
1775
  elif isinstance(err, errors.HooksFailure):
1776
    obuf.write("Failure: hooks general failure: %s" % msg)
1777
  elif isinstance(err, errors.ResolverError):
1778
    this_host = netutils.Hostname.GetSysName()
1779
    if err.args[0] == this_host:
1780
      msg = "Failure: can't resolve my own hostname ('%s')"
1781
    else:
1782
      msg = "Failure: can't resolve hostname '%s'"
1783
    obuf.write(msg % err.args[0])
1784
  elif isinstance(err, errors.OpPrereqError):
1785
    if len(err.args) == 2:
1786
      obuf.write("Failure: prerequisites not met for this"
1787
               " operation:\nerror type: %s, error details:\n%s" %
1788
                 (err.args[1], err.args[0]))
1789
    else:
1790
      obuf.write("Failure: prerequisites not met for this"
1791
                 " operation:\n%s" % msg)
1792
  elif isinstance(err, errors.OpExecError):
1793
    obuf.write("Failure: command execution error:\n%s" % msg)
1794
  elif isinstance(err, errors.TagError):
1795
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1796
  elif isinstance(err, errors.JobQueueDrainError):
1797
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1798
               " accept new requests\n")
1799
  elif isinstance(err, errors.JobQueueFull):
1800
    obuf.write("Failure: the job queue is full and doesn't accept new"
1801
               " job submissions until old jobs are archived\n")
1802
  elif isinstance(err, errors.TypeEnforcementError):
1803
    obuf.write("Parameter Error: %s" % msg)
1804
  elif isinstance(err, errors.ParameterError):
1805
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1806
  elif isinstance(err, luxi.NoMasterError):
1807
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1808
               " and listening for connections?")
1809
  elif isinstance(err, luxi.TimeoutError):
1810
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
1811
               " been submitted and will continue to run even if the call"
1812
               " timed out. Useful commands in this situation are \"gnt-job"
1813
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1814
    obuf.write(msg)
1815
  elif isinstance(err, luxi.PermissionError):
1816
    obuf.write("It seems you don't have permissions to connect to the"
1817
               " master daemon.\nPlease retry as a different user.")
1818
  elif isinstance(err, luxi.ProtocolError):
1819
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1820
               "%s" % msg)
1821
  elif isinstance(err, errors.JobLost):
1822
    obuf.write("Error checking job status: %s" % msg)
1823
  elif isinstance(err, errors.GenericError):
1824
    obuf.write("Unhandled Ganeti error: %s" % msg)
1825
  elif isinstance(err, JobSubmittedException):
1826
    obuf.write("JobID: %s\n" % err.args[0])
1827
    retcode = 0
1828
  else:
1829
    obuf.write("Unhandled exception: %s" % msg)
1830
  return retcode, obuf.getvalue().rstrip('\n')
1831

    
1832

    
1833
def GenericMain(commands, override=None, aliases=None):
1834
  """Generic main function for all the gnt-* commands.
1835

1836
  Arguments:
1837
    - commands: a dictionary with a special structure, see the design doc
1838
                for command line handling.
1839
    - override: if not None, we expect a dictionary with keys that will
1840
                override command line options; this can be used to pass
1841
                options from the scripts to generic functions
1842
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1843

1844
  """
1845
  # save the program name and the entire command line for later logging
1846
  if sys.argv:
1847
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1848
    if len(sys.argv) >= 2:
1849
      binary += " " + sys.argv[1]
1850
      old_cmdline = " ".join(sys.argv[2:])
1851
    else:
1852
      old_cmdline = ""
1853
  else:
1854
    binary = "<unknown program>"
1855
    old_cmdline = ""
1856

    
1857
  if aliases is None:
1858
    aliases = {}
1859

    
1860
  try:
1861
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1862
  except errors.ParameterError, err:
1863
    result, err_msg = FormatError(err)
1864
    ToStderr(err_msg)
1865
    return 1
1866

    
1867
  if func is None: # parse error
1868
    return 1
1869

    
1870
  if override is not None:
1871
    for key, val in override.iteritems():
1872
      setattr(options, key, val)
1873

    
1874
  utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1875
                     stderr_logging=True, program=binary)
1876

    
1877
  if old_cmdline:
1878
    logging.info("run with arguments '%s'", old_cmdline)
1879
  else:
1880
    logging.info("run with no arguments")
1881

    
1882
  try:
1883
    result = func(options, args)
1884
  except (errors.GenericError, luxi.ProtocolError,
1885
          JobSubmittedException), err:
1886
    result, err_msg = FormatError(err)
1887
    logging.exception("Error during command processing")
1888
    ToStderr(err_msg)
1889

    
1890
  return result
1891

    
1892

    
1893
def ParseNicOption(optvalue):
1894
  """Parses the value of the --net option(s).
1895

1896
  """
1897
  try:
1898
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1899
  except (TypeError, ValueError), err:
1900
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1901

    
1902
  nics = [{}] * nic_max
1903
  for nidx, ndict in optvalue:
1904
    nidx = int(nidx)
1905

    
1906
    if not isinstance(ndict, dict):
1907
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1908
                                 " got %s" % (nidx, ndict))
1909

    
1910
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1911

    
1912
    nics[nidx] = ndict
1913

    
1914
  return nics
1915

    
1916

    
1917
def GenericInstanceCreate(mode, opts, args):
1918
  """Add an instance to the cluster via either creation or import.
1919

1920
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1921
  @param opts: the command line options selected by the user
1922
  @type args: list
1923
  @param args: should contain only one element, the new instance name
1924
  @rtype: int
1925
  @return: the desired exit code
1926

1927
  """
1928
  instance = args[0]
1929

    
1930
  (pnode, snode) = SplitNodeOption(opts.node)
1931

    
1932
  hypervisor = None
1933
  hvparams = {}
1934
  if opts.hypervisor:
1935
    hypervisor, hvparams = opts.hypervisor
1936

    
1937
  if opts.nics:
1938
    nics = ParseNicOption(opts.nics)
1939
  elif opts.no_nics:
1940
    # no nics
1941
    nics = []
1942
  elif mode == constants.INSTANCE_CREATE:
1943
    # default of one nic, all auto
1944
    nics = [{}]
1945
  else:
1946
    # mode == import
1947
    nics = []
1948

    
1949
  if opts.disk_template == constants.DT_DISKLESS:
1950
    if opts.disks or opts.sd_size is not None:
1951
      raise errors.OpPrereqError("Diskless instance but disk"
1952
                                 " information passed")
1953
    disks = []
1954
  else:
1955
    if (not opts.disks and not opts.sd_size
1956
        and mode == constants.INSTANCE_CREATE):
1957
      raise errors.OpPrereqError("No disk information specified")
1958
    if opts.disks and opts.sd_size is not None:
1959
      raise errors.OpPrereqError("Please use either the '--disk' or"
1960
                                 " '-s' option")
1961
    if opts.sd_size is not None:
1962
      opts.disks = [(0, {"size": opts.sd_size})]
1963

    
1964
    if opts.disks:
1965
      try:
1966
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
1967
      except ValueError, err:
1968
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
1969
      disks = [{}] * disk_max
1970
    else:
1971
      disks = []
1972
    for didx, ddict in opts.disks:
1973
      didx = int(didx)
1974
      if not isinstance(ddict, dict):
1975
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
1976
        raise errors.OpPrereqError(msg)
1977
      elif "size" in ddict:
1978
        if "adopt" in ddict:
1979
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
1980
                                     " (disk %d)" % didx)
1981
        try:
1982
          ddict["size"] = utils.ParseUnit(ddict["size"])
1983
        except ValueError, err:
1984
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
1985
                                     (didx, err))
1986
      elif "adopt" in ddict:
1987
        if mode == constants.INSTANCE_IMPORT:
1988
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
1989
                                     " import")
1990
        ddict["size"] = 0
1991
      else:
1992
        raise errors.OpPrereqError("Missing size or adoption source for"
1993
                                   " disk %d" % didx)
1994
      disks[didx] = ddict
1995

    
1996
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
1997
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
1998

    
1999
  if mode == constants.INSTANCE_CREATE:
2000
    start = opts.start
2001
    os_type = opts.os
2002
    force_variant = opts.force_variant
2003
    src_node = None
2004
    src_path = None
2005
    no_install = opts.no_install
2006
    identify_defaults = False
2007
  elif mode == constants.INSTANCE_IMPORT:
2008
    start = False
2009
    os_type = None
2010
    force_variant = False
2011
    src_node = opts.src_node
2012
    src_path = opts.src_dir
2013
    no_install = None
2014
    identify_defaults = opts.identify_defaults
2015
  else:
2016
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2017

    
2018
  op = opcodes.OpCreateInstance(instance_name=instance,
2019
                                disks=disks,
2020
                                disk_template=opts.disk_template,
2021
                                nics=nics,
2022
                                pnode=pnode, snode=snode,
2023
                                ip_check=opts.ip_check,
2024
                                name_check=opts.name_check,
2025
                                wait_for_sync=opts.wait_for_sync,
2026
                                file_storage_dir=opts.file_storage_dir,
2027
                                file_driver=opts.file_driver,
2028
                                iallocator=opts.iallocator,
2029
                                hypervisor=hypervisor,
2030
                                hvparams=hvparams,
2031
                                beparams=opts.beparams,
2032
                                osparams=opts.osparams,
2033
                                mode=mode,
2034
                                start=start,
2035
                                os_type=os_type,
2036
                                force_variant=force_variant,
2037
                                src_node=src_node,
2038
                                src_path=src_path,
2039
                                no_install=no_install,
2040
                                identify_defaults=identify_defaults)
2041

    
2042
  SubmitOrSend(op, opts)
2043
  return 0
2044

    
2045

    
2046
class _RunWhileClusterStoppedHelper:
2047
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2048

2049
  """
2050
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2051
    """Initializes this class.
2052

2053
    @type feedback_fn: callable
2054
    @param feedback_fn: Feedback function
2055
    @type cluster_name: string
2056
    @param cluster_name: Cluster name
2057
    @type master_node: string
2058
    @param master_node Master node name
2059
    @type online_nodes: list
2060
    @param online_nodes: List of names of online nodes
2061

2062
    """
2063
    self.feedback_fn = feedback_fn
2064
    self.cluster_name = cluster_name
2065
    self.master_node = master_node
2066
    self.online_nodes = online_nodes
2067

    
2068
    self.ssh = ssh.SshRunner(self.cluster_name)
2069

    
2070
    self.nonmaster_nodes = [name for name in online_nodes
2071
                            if name != master_node]
2072

    
2073
    assert self.master_node not in self.nonmaster_nodes
2074

    
2075
  def _RunCmd(self, node_name, cmd):
2076
    """Runs a command on the local or a remote machine.
2077

2078
    @type node_name: string
2079
    @param node_name: Machine name
2080
    @type cmd: list
2081
    @param cmd: Command
2082

2083
    """
2084
    if node_name is None or node_name == self.master_node:
2085
      # No need to use SSH
2086
      result = utils.RunCmd(cmd)
2087
    else:
2088
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2089

    
2090
    if result.failed:
2091
      errmsg = ["Failed to run command %s" % result.cmd]
2092
      if node_name:
2093
        errmsg.append("on node %s" % node_name)
2094
      errmsg.append(": exitcode %s and error %s" %
2095
                    (result.exit_code, result.output))
2096
      raise errors.OpExecError(" ".join(errmsg))
2097

    
2098
  def Call(self, fn, *args):
2099
    """Call function while all daemons are stopped.
2100

2101
    @type fn: callable
2102
    @param fn: Function to be called
2103

2104
    """
2105
    # Pause watcher by acquiring an exclusive lock on watcher state file
2106
    self.feedback_fn("Blocking watcher")
2107
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2108
    try:
2109
      # TODO: Currently, this just blocks. There's no timeout.
2110
      # TODO: Should it be a shared lock?
2111
      watcher_block.Exclusive(blocking=True)
2112

    
2113
      # Stop master daemons, so that no new jobs can come in and all running
2114
      # ones are finished
2115
      self.feedback_fn("Stopping master daemons")
2116
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2117
      try:
2118
        # Stop daemons on all nodes
2119
        for node_name in self.online_nodes:
2120
          self.feedback_fn("Stopping daemons on %s" % node_name)
2121
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2122

    
2123
        # All daemons are shut down now
2124
        try:
2125
          return fn(self, *args)
2126
        except Exception, err:
2127
          _, errmsg = FormatError(err)
2128
          logging.exception("Caught exception")
2129
          self.feedback_fn(errmsg)
2130
          raise
2131
      finally:
2132
        # Start cluster again, master node last
2133
        for node_name in self.nonmaster_nodes + [self.master_node]:
2134
          self.feedback_fn("Starting daemons on %s" % node_name)
2135
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2136
    finally:
2137
      # Resume watcher
2138
      watcher_block.Close()
2139

    
2140

    
2141
def RunWhileClusterStopped(feedback_fn, fn, *args):
2142
  """Calls a function while all cluster daemons are stopped.
2143

2144
  @type feedback_fn: callable
2145
  @param feedback_fn: Feedback function
2146
  @type fn: callable
2147
  @param fn: Function to be called when daemons are stopped
2148

2149
  """
2150
  feedback_fn("Gathering cluster information")
2151

    
2152
  # This ensures we're running on the master daemon
2153
  cl = GetClient()
2154

    
2155
  (cluster_name, master_node) = \
2156
    cl.QueryConfigValues(["cluster_name", "master_node"])
2157

    
2158
  online_nodes = GetOnlineNodes([], cl=cl)
2159

    
2160
  # Don't keep a reference to the client. The master daemon will go away.
2161
  del cl
2162

    
2163
  assert master_node in online_nodes
2164

    
2165
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2166
                                       online_nodes).Call(fn, *args)
2167

    
2168

    
2169
def GenerateTable(headers, fields, separator, data,
2170
                  numfields=None, unitfields=None,
2171
                  units=None):
2172
  """Prints a table with headers and different fields.
2173

2174
  @type headers: dict
2175
  @param headers: dictionary mapping field names to headers for
2176
      the table
2177
  @type fields: list
2178
  @param fields: the field names corresponding to each row in
2179
      the data field
2180
  @param separator: the separator to be used; if this is None,
2181
      the default 'smart' algorithm is used which computes optimal
2182
      field width, otherwise just the separator is used between
2183
      each field
2184
  @type data: list
2185
  @param data: a list of lists, each sublist being one row to be output
2186
  @type numfields: list
2187
  @param numfields: a list with the fields that hold numeric
2188
      values and thus should be right-aligned
2189
  @type unitfields: list
2190
  @param unitfields: a list with the fields that hold numeric
2191
      values that should be formatted with the units field
2192
  @type units: string or None
2193
  @param units: the units we should use for formatting, or None for
2194
      automatic choice (human-readable for non-separator usage, otherwise
2195
      megabytes); this is a one-letter string
2196

2197
  """
2198
  if units is None:
2199
    if separator:
2200
      units = "m"
2201
    else:
2202
      units = "h"
2203

    
2204
  if numfields is None:
2205
    numfields = []
2206
  if unitfields is None:
2207
    unitfields = []
2208

    
2209
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2210
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2211

    
2212
  format_fields = []
2213
  for field in fields:
2214
    if headers and field not in headers:
2215
      # TODO: handle better unknown fields (either revert to old
2216
      # style of raising exception, or deal more intelligently with
2217
      # variable fields)
2218
      headers[field] = field
2219
    if separator is not None:
2220
      format_fields.append("%s")
2221
    elif numfields.Matches(field):
2222
      format_fields.append("%*s")
2223
    else:
2224
      format_fields.append("%-*s")
2225

    
2226
  if separator is None:
2227
    mlens = [0 for name in fields]
2228
    format_str = ' '.join(format_fields)
2229
  else:
2230
    format_str = separator.replace("%", "%%").join(format_fields)
2231

    
2232
  for row in data:
2233
    if row is None:
2234
      continue
2235
    for idx, val in enumerate(row):
2236
      if unitfields.Matches(fields[idx]):
2237
        try:
2238
          val = int(val)
2239
        except (TypeError, ValueError):
2240
          pass
2241
        else:
2242
          val = row[idx] = utils.FormatUnit(val, units)
2243
      val = row[idx] = str(val)
2244
      if separator is None:
2245
        mlens[idx] = max(mlens[idx], len(val))
2246

    
2247
  result = []
2248
  if headers:
2249
    args = []
2250
    for idx, name in enumerate(fields):
2251
      hdr = headers[name]
2252
      if separator is None:
2253
        mlens[idx] = max(mlens[idx], len(hdr))
2254
        args.append(mlens[idx])
2255
      args.append(hdr)
2256
    result.append(format_str % tuple(args))
2257

    
2258
  if separator is None:
2259
    assert len(mlens) == len(fields)
2260

    
2261
    if fields and not numfields.Matches(fields[-1]):
2262
      mlens[-1] = 0
2263

    
2264
  for line in data:
2265
    args = []
2266
    if line is None:
2267
      line = ['-' for _ in fields]
2268
    for idx in range(len(fields)):
2269
      if separator is None:
2270
        args.append(mlens[idx])
2271
      args.append(line[idx])
2272
    result.append(format_str % tuple(args))
2273

    
2274
  return result
2275

    
2276

    
2277
def FormatTimestamp(ts):
2278
  """Formats a given timestamp.
2279

2280
  @type ts: timestamp
2281
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2282

2283
  @rtype: string
2284
  @return: a string with the formatted timestamp
2285

2286
  """
2287
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2288
    return '?'
2289
  sec, usec = ts
2290
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2291

    
2292

    
2293
def ParseTimespec(value):
2294
  """Parse a time specification.
2295

2296
  The following suffixed will be recognized:
2297

2298
    - s: seconds
2299
    - m: minutes
2300
    - h: hours
2301
    - d: day
2302
    - w: weeks
2303

2304
  Without any suffix, the value will be taken to be in seconds.
2305

2306
  """
2307
  value = str(value)
2308
  if not value:
2309
    raise errors.OpPrereqError("Empty time specification passed")
2310
  suffix_map = {
2311
    's': 1,
2312
    'm': 60,
2313
    'h': 3600,
2314
    'd': 86400,
2315
    'w': 604800,
2316
    }
2317
  if value[-1] not in suffix_map:
2318
    try:
2319
      value = int(value)
2320
    except (TypeError, ValueError):
2321
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2322
  else:
2323
    multiplier = suffix_map[value[-1]]
2324
    value = value[:-1]
2325
    if not value: # no data left after stripping the suffix
2326
      raise errors.OpPrereqError("Invalid time specification (only"
2327
                                 " suffix passed)")
2328
    try:
2329
      value = int(value) * multiplier
2330
    except (TypeError, ValueError):
2331
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2332
  return value
2333

    
2334

    
2335
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2336
                   filter_master=False):
2337
  """Returns the names of online nodes.
2338

2339
  This function will also log a warning on stderr with the names of
2340
  the online nodes.
2341

2342
  @param nodes: if not empty, use only this subset of nodes (minus the
2343
      offline ones)
2344
  @param cl: if not None, luxi client to use
2345
  @type nowarn: boolean
2346
  @param nowarn: by default, this function will output a note with the
2347
      offline nodes that are skipped; if this parameter is True the
2348
      note is not displayed
2349
  @type secondary_ips: boolean
2350
  @param secondary_ips: if True, return the secondary IPs instead of the
2351
      names, useful for doing network traffic over the replication interface
2352
      (if any)
2353
  @type filter_master: boolean
2354
  @param filter_master: if True, do not return the master node in the list
2355
      (useful in coordination with secondary_ips where we cannot check our
2356
      node name against the list)
2357

2358
  """
2359
  if cl is None:
2360
    cl = GetClient()
2361

    
2362
  if secondary_ips:
2363
    name_idx = 2
2364
  else:
2365
    name_idx = 0
2366

    
2367
  if filter_master:
2368
    master_node = cl.QueryConfigValues(["master_node"])[0]
2369
    filter_fn = lambda x: x != master_node
2370
  else:
2371
    filter_fn = lambda _: True
2372

    
2373
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2374
                         use_locking=False)
2375
  offline = [row[0] for row in result if row[1]]
2376
  if offline and not nowarn:
2377
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2378
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2379

    
2380

    
2381
def _ToStream(stream, txt, *args):
2382
  """Write a message to a stream, bypassing the logging system
2383

2384
  @type stream: file object
2385
  @param stream: the file to which we should write
2386
  @type txt: str
2387
  @param txt: the message
2388

2389
  """
2390
  if args:
2391
    args = tuple(args)
2392
    stream.write(txt % args)
2393
  else:
2394
    stream.write(txt)
2395
  stream.write('\n')
2396
  stream.flush()
2397

    
2398

    
2399
def ToStdout(txt, *args):
2400
  """Write a message to stdout only, bypassing the logging system
2401

2402
  This is just a wrapper over _ToStream.
2403

2404
  @type txt: str
2405
  @param txt: the message
2406

2407
  """
2408
  _ToStream(sys.stdout, txt, *args)
2409

    
2410

    
2411
def ToStderr(txt, *args):
2412
  """Write a message to stderr only, bypassing the logging system
2413

2414
  This is just a wrapper over _ToStream.
2415

2416
  @type txt: str
2417
  @param txt: the message
2418

2419
  """
2420
  _ToStream(sys.stderr, txt, *args)
2421

    
2422

    
2423
class JobExecutor(object):
2424
  """Class which manages the submission and execution of multiple jobs.
2425

2426
  Note that instances of this class should not be reused between
2427
  GetResults() calls.
2428

2429
  """
2430
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2431
    self.queue = []
2432
    if cl is None:
2433
      cl = GetClient()
2434
    self.cl = cl
2435
    self.verbose = verbose
2436
    self.jobs = []
2437
    self.opts = opts
2438
    self.feedback_fn = feedback_fn
2439

    
2440
  def QueueJob(self, name, *ops):
2441
    """Record a job for later submit.
2442

2443
    @type name: string
2444
    @param name: a description of the job, will be used in WaitJobSet
2445
    """
2446
    SetGenericOpcodeOpts(ops, self.opts)
2447
    self.queue.append((name, ops))
2448

    
2449
  def SubmitPending(self, each=False):
2450
    """Submit all pending jobs.
2451

2452
    """
2453
    if each:
2454
      results = []
2455
      for row in self.queue:
2456
        # SubmitJob will remove the success status, but raise an exception if
2457
        # the submission fails, so we'll notice that anyway.
2458
        results.append([True, self.cl.SubmitJob(row[1])])
2459
    else:
2460
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2461
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2462
                                                            self.queue)):
2463
      self.jobs.append((idx, status, data, name))
2464

    
2465
  def _ChooseJob(self):
2466
    """Choose a non-waiting/queued job to poll next.
2467

2468
    """
2469
    assert self.jobs, "_ChooseJob called with empty job list"
2470

    
2471
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2472
    assert result
2473

    
2474
    for job_data, status in zip(self.jobs, result):
2475
      if (isinstance(status, list) and status and
2476
          status[0] in (constants.JOB_STATUS_QUEUED,
2477
                        constants.JOB_STATUS_WAITLOCK,
2478
                        constants.JOB_STATUS_CANCELING)):
2479
        # job is still present and waiting
2480
        continue
2481
      # good candidate found (either running job or lost job)
2482
      self.jobs.remove(job_data)
2483
      return job_data
2484

    
2485
    # no job found
2486
    return self.jobs.pop(0)
2487

    
2488
  def GetResults(self):
2489
    """Wait for and return the results of all jobs.
2490

2491
    @rtype: list
2492
    @return: list of tuples (success, job results), in the same order
2493
        as the submitted jobs; if a job has failed, instead of the result
2494
        there will be the error message
2495

2496
    """
2497
    if not self.jobs:
2498
      self.SubmitPending()
2499
    results = []
2500
    if self.verbose:
2501
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2502
      if ok_jobs:
2503
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2504

    
2505
    # first, remove any non-submitted jobs
2506
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2507
    for idx, _, jid, name in failures:
2508
      ToStderr("Failed to submit job for %s: %s", name, jid)
2509
      results.append((idx, False, jid))
2510

    
2511
    while self.jobs:
2512
      (idx, _, jid, name) = self._ChooseJob()
2513
      ToStdout("Waiting for job %s for %s...", jid, name)
2514
      try:
2515
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2516
        success = True
2517
      except errors.JobLost, err:
2518
        _, job_result = FormatError(err)
2519
        ToStderr("Job %s for %s has been archived, cannot check its result",
2520
                 jid, name)
2521
        success = False
2522
      except (errors.GenericError, luxi.ProtocolError), err:
2523
        _, job_result = FormatError(err)
2524
        success = False
2525
        # the error message will always be shown, verbose or not
2526
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2527

    
2528
      results.append((idx, success, job_result))
2529

    
2530
    # sort based on the index, then drop it
2531
    results.sort()
2532
    results = [i[1:] for i in results]
2533

    
2534
    return results
2535

    
2536
  def WaitOrShow(self, wait):
2537
    """Wait for job results or only print the job IDs.
2538

2539
    @type wait: boolean
2540
    @param wait: whether to wait or not
2541

2542
    """
2543
    if wait:
2544
      return self.GetResults()
2545
    else:
2546
      if not self.jobs:
2547
        self.SubmitPending()
2548
      for _, status, result, name in self.jobs:
2549
        if status:
2550
          ToStdout("%s: %s", result, name)
2551
        else:
2552
          ToStderr("Failure for %s: %s", name, result)
2553
      return [row[1:3] for row in self.jobs]