Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ b2f29800

History | View | Annotate | Download (82.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42

    
43
from optparse import (OptionParser, TitledHelpFormatter,
44
                      Option, OptionValueError)
45

    
46

    
47
__all__ = [
48
  # Command line options
49
  "ADD_UIDS_OPT",
50
  "ALLOCATABLE_OPT",
51
  "ALL_OPT",
52
  "AUTO_PROMOTE_OPT",
53
  "AUTO_REPLACE_OPT",
54
  "BACKEND_OPT",
55
  "BLK_OS_OPT",
56
  "CAPAB_MASTER_OPT",
57
  "CAPAB_VM_OPT",
58
  "CLEANUP_OPT",
59
  "CLUSTER_DOMAIN_SECRET_OPT",
60
  "CONFIRM_OPT",
61
  "CP_SIZE_OPT",
62
  "DEBUG_OPT",
63
  "DEBUG_SIMERR_OPT",
64
  "DISKIDX_OPT",
65
  "DISK_OPT",
66
  "DISK_TEMPLATE_OPT",
67
  "DRAINED_OPT",
68
  "DRY_RUN_OPT",
69
  "DRBD_HELPER_OPT",
70
  "EARLY_RELEASE_OPT",
71
  "ENABLED_HV_OPT",
72
  "ERROR_CODES_OPT",
73
  "FIELDS_OPT",
74
  "FILESTORE_DIR_OPT",
75
  "FILESTORE_DRIVER_OPT",
76
  "FORCE_OPT",
77
  "FORCE_VARIANT_OPT",
78
  "GLOBAL_FILEDIR_OPT",
79
  "HID_OS_OPT",
80
  "HVLIST_OPT",
81
  "HVOPTS_OPT",
82
  "HYPERVISOR_OPT",
83
  "IALLOCATOR_OPT",
84
  "DEFAULT_IALLOCATOR_OPT",
85
  "IDENTIFY_DEFAULTS_OPT",
86
  "IGNORE_CONSIST_OPT",
87
  "IGNORE_FAILURES_OPT",
88
  "IGNORE_OFFLINE_OPT",
89
  "IGNORE_REMOVE_FAILURES_OPT",
90
  "IGNORE_SECONDARIES_OPT",
91
  "IGNORE_SIZE_OPT",
92
  "INTERVAL_OPT",
93
  "MAC_PREFIX_OPT",
94
  "MAINTAIN_NODE_HEALTH_OPT",
95
  "MASTER_NETDEV_OPT",
96
  "MC_OPT",
97
  "MIGRATION_MODE_OPT",
98
  "NET_OPT",
99
  "NEW_CLUSTER_CERT_OPT",
100
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
101
  "NEW_CONFD_HMAC_KEY_OPT",
102
  "NEW_RAPI_CERT_OPT",
103
  "NEW_SECONDARY_OPT",
104
  "NIC_PARAMS_OPT",
105
  "NODE_LIST_OPT",
106
  "NODE_PLACEMENT_OPT",
107
  "NODEGROUP_OPT",
108
  "NODE_PARAMS_OPT",
109
  "NODRBD_STORAGE_OPT",
110
  "NOHDR_OPT",
111
  "NOIPCHECK_OPT",
112
  "NO_INSTALL_OPT",
113
  "NONAMECHECK_OPT",
114
  "NOLVM_STORAGE_OPT",
115
  "NOMODIFY_ETCHOSTS_OPT",
116
  "NOMODIFY_SSH_SETUP_OPT",
117
  "NONICS_OPT",
118
  "NONLIVE_OPT",
119
  "NONPLUS1_OPT",
120
  "NOSHUTDOWN_OPT",
121
  "NOSTART_OPT",
122
  "NOSSH_KEYCHECK_OPT",
123
  "NOVOTING_OPT",
124
  "NWSYNC_OPT",
125
  "ON_PRIMARY_OPT",
126
  "ON_SECONDARY_OPT",
127
  "OFFLINE_OPT",
128
  "OSPARAMS_OPT",
129
  "OS_OPT",
130
  "OS_SIZE_OPT",
131
  "PREALLOC_WIPE_DISKS_OPT",
132
  "PRIMARY_IP_VERSION_OPT",
133
  "PRIORITY_OPT",
134
  "RAPI_CERT_OPT",
135
  "READD_OPT",
136
  "REBOOT_TYPE_OPT",
137
  "REMOVE_INSTANCE_OPT",
138
  "REMOVE_UIDS_OPT",
139
  "RESERVED_LVS_OPT",
140
  "ROMAN_OPT",
141
  "SECONDARY_IP_OPT",
142
  "SELECT_OS_OPT",
143
  "SEP_OPT",
144
  "SHOWCMD_OPT",
145
  "SHUTDOWN_TIMEOUT_OPT",
146
  "SINGLE_NODE_OPT",
147
  "SRC_DIR_OPT",
148
  "SRC_NODE_OPT",
149
  "SUBMIT_OPT",
150
  "STATIC_OPT",
151
  "SYNC_OPT",
152
  "TAG_SRC_OPT",
153
  "TIMEOUT_OPT",
154
  "UIDPOOL_OPT",
155
  "USEUNITS_OPT",
156
  "USE_REPL_NET_OPT",
157
  "VERBOSE_OPT",
158
  "VG_NAME_OPT",
159
  "YES_DOIT_OPT",
160
  # Generic functions for CLI programs
161
  "GenericMain",
162
  "GenericInstanceCreate",
163
  "GetClient",
164
  "GetOnlineNodes",
165
  "JobExecutor",
166
  "JobSubmittedException",
167
  "ParseTimespec",
168
  "RunWhileClusterStopped",
169
  "SubmitOpCode",
170
  "SubmitOrSend",
171
  "UsesRPC",
172
  # Formatting functions
173
  "ToStderr", "ToStdout",
174
  "FormatError",
175
  "GenerateTable",
176
  "AskUser",
177
  "FormatTimestamp",
178
  "FormatLogMessage",
179
  # Tags functions
180
  "ListTags",
181
  "AddTags",
182
  "RemoveTags",
183
  # command line options support infrastructure
184
  "ARGS_MANY_INSTANCES",
185
  "ARGS_MANY_NODES",
186
  "ARGS_NONE",
187
  "ARGS_ONE_INSTANCE",
188
  "ARGS_ONE_NODE",
189
  "ARGS_ONE_OS",
190
  "ArgChoice",
191
  "ArgCommand",
192
  "ArgFile",
193
  "ArgHost",
194
  "ArgInstance",
195
  "ArgJobId",
196
  "ArgNode",
197
  "ArgOs",
198
  "ArgSuggest",
199
  "ArgUnknown",
200
  "OPT_COMPL_INST_ADD_NODES",
201
  "OPT_COMPL_MANY_NODES",
202
  "OPT_COMPL_ONE_IALLOCATOR",
203
  "OPT_COMPL_ONE_INSTANCE",
204
  "OPT_COMPL_ONE_NODE",
205
  "OPT_COMPL_ONE_NODEGROUP",
206
  "OPT_COMPL_ONE_OS",
207
  "cli_option",
208
  "SplitNodeOption",
209
  "CalculateOSNames",
210
  "ParseFields",
211
  "COMMON_CREATE_OPTS",
212
  ]
213

    
214
NO_PREFIX = "no_"
215
UN_PREFIX = "-"
216

    
217
#: Priorities (sorted)
218
_PRIORITY_NAMES = [
219
  ("low", constants.OP_PRIO_LOW),
220
  ("normal", constants.OP_PRIO_NORMAL),
221
  ("high", constants.OP_PRIO_HIGH),
222
  ]
223

    
224
#: Priority dictionary for easier lookup
225
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
226
# we migrate to Python 2.6
227
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
228

    
229

    
230
class _Argument:
231
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
232
    self.min = min
233
    self.max = max
234

    
235
  def __repr__(self):
236
    return ("<%s min=%s max=%s>" %
237
            (self.__class__.__name__, self.min, self.max))
238

    
239

    
240
class ArgSuggest(_Argument):
241
  """Suggesting argument.
242

243
  Value can be any of the ones passed to the constructor.
244

245
  """
246
  # pylint: disable-msg=W0622
247
  def __init__(self, min=0, max=None, choices=None):
248
    _Argument.__init__(self, min=min, max=max)
249
    self.choices = choices
250

    
251
  def __repr__(self):
252
    return ("<%s min=%s max=%s choices=%r>" %
253
            (self.__class__.__name__, self.min, self.max, self.choices))
254

    
255

    
256
class ArgChoice(ArgSuggest):
257
  """Choice argument.
258

259
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
260
  but value must be one of the choices.
261

262
  """
263

    
264

    
265
class ArgUnknown(_Argument):
266
  """Unknown argument to program (e.g. determined at runtime).
267

268
  """
269

    
270

    
271
class ArgInstance(_Argument):
272
  """Instances argument.
273

274
  """
275

    
276

    
277
class ArgNode(_Argument):
278
  """Node argument.
279

280
  """
281

    
282
class ArgJobId(_Argument):
283
  """Job ID argument.
284

285
  """
286

    
287

    
288
class ArgFile(_Argument):
289
  """File path argument.
290

291
  """
292

    
293

    
294
class ArgCommand(_Argument):
295
  """Command argument.
296

297
  """
298

    
299

    
300
class ArgHost(_Argument):
301
  """Host argument.
302

303
  """
304

    
305

    
306
class ArgOs(_Argument):
307
  """OS argument.
308

309
  """
310

    
311

    
312
ARGS_NONE = []
313
ARGS_MANY_INSTANCES = [ArgInstance()]
314
ARGS_MANY_NODES = [ArgNode()]
315
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
316
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
317
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
318

    
319

    
320
def _ExtractTagsObject(opts, args):
321
  """Extract the tag type object.
322

323
  Note that this function will modify its args parameter.
324

325
  """
326
  if not hasattr(opts, "tag_type"):
327
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
328
  kind = opts.tag_type
329
  if kind == constants.TAG_CLUSTER:
330
    retval = kind, kind
331
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
332
    if not args:
333
      raise errors.OpPrereqError("no arguments passed to the command")
334
    name = args.pop(0)
335
    retval = kind, name
336
  else:
337
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
338
  return retval
339

    
340

    
341
def _ExtendTags(opts, args):
342
  """Extend the args if a source file has been given.
343

344
  This function will extend the tags with the contents of the file
345
  passed in the 'tags_source' attribute of the opts parameter. A file
346
  named '-' will be replaced by stdin.
347

348
  """
349
  fname = opts.tags_source
350
  if fname is None:
351
    return
352
  if fname == "-":
353
    new_fh = sys.stdin
354
  else:
355
    new_fh = open(fname, "r")
356
  new_data = []
357
  try:
358
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
359
    # because of python bug 1633941
360
    while True:
361
      line = new_fh.readline()
362
      if not line:
363
        break
364
      new_data.append(line.strip())
365
  finally:
366
    new_fh.close()
367
  args.extend(new_data)
368

    
369

    
370
def ListTags(opts, args):
371
  """List the tags on a given object.
372

373
  This is a generic implementation that knows how to deal with all
374
  three cases of tag objects (cluster, node, instance). The opts
375
  argument is expected to contain a tag_type field denoting what
376
  object type we work on.
377

378
  """
379
  kind, name = _ExtractTagsObject(opts, args)
380
  cl = GetClient()
381
  result = cl.QueryTags(kind, name)
382
  result = list(result)
383
  result.sort()
384
  for tag in result:
385
    ToStdout(tag)
386

    
387

    
388
def AddTags(opts, args):
389
  """Add tags on a given object.
390

391
  This is a generic implementation that knows how to deal with all
392
  three cases of tag objects (cluster, node, instance). The opts
393
  argument is expected to contain a tag_type field denoting what
394
  object type we work on.
395

396
  """
397
  kind, name = _ExtractTagsObject(opts, args)
398
  _ExtendTags(opts, args)
399
  if not args:
400
    raise errors.OpPrereqError("No tags to be added")
401
  op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
402
  SubmitOpCode(op, opts=opts)
403

    
404

    
405
def RemoveTags(opts, args):
406
  """Remove tags from a given object.
407

408
  This is a generic implementation that knows how to deal with all
409
  three cases of tag objects (cluster, node, instance). The opts
410
  argument is expected to contain a tag_type field denoting what
411
  object type we work on.
412

413
  """
414
  kind, name = _ExtractTagsObject(opts, args)
415
  _ExtendTags(opts, args)
416
  if not args:
417
    raise errors.OpPrereqError("No tags to be removed")
418
  op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
419
  SubmitOpCode(op, opts=opts)
420

    
421

    
422
def check_unit(option, opt, value): # pylint: disable-msg=W0613
423
  """OptParsers custom converter for units.
424

425
  """
426
  try:
427
    return utils.ParseUnit(value)
428
  except errors.UnitParseError, err:
429
    raise OptionValueError("option %s: %s" % (opt, err))
430

    
431

    
432
def _SplitKeyVal(opt, data):
433
  """Convert a KeyVal string into a dict.
434

435
  This function will convert a key=val[,...] string into a dict. Empty
436
  values will be converted specially: keys which have the prefix 'no_'
437
  will have the value=False and the prefix stripped, the others will
438
  have value=True.
439

440
  @type opt: string
441
  @param opt: a string holding the option name for which we process the
442
      data, used in building error messages
443
  @type data: string
444
  @param data: a string of the format key=val,key=val,...
445
  @rtype: dict
446
  @return: {key=val, key=val}
447
  @raises errors.ParameterError: if there are duplicate keys
448

449
  """
450
  kv_dict = {}
451
  if data:
452
    for elem in utils.UnescapeAndSplit(data, sep=","):
453
      if "=" in elem:
454
        key, val = elem.split("=", 1)
455
      else:
456
        if elem.startswith(NO_PREFIX):
457
          key, val = elem[len(NO_PREFIX):], False
458
        elif elem.startswith(UN_PREFIX):
459
          key, val = elem[len(UN_PREFIX):], None
460
        else:
461
          key, val = elem, True
462
      if key in kv_dict:
463
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
464
                                    (key, opt))
465
      kv_dict[key] = val
466
  return kv_dict
467

    
468

    
469
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
470
  """Custom parser for ident:key=val,key=val options.
471

472
  This will store the parsed values as a tuple (ident, {key: val}). As such,
473
  multiple uses of this option via action=append is possible.
474

475
  """
476
  if ":" not in value:
477
    ident, rest = value, ''
478
  else:
479
    ident, rest = value.split(":", 1)
480

    
481
  if ident.startswith(NO_PREFIX):
482
    if rest:
483
      msg = "Cannot pass options when removing parameter groups: %s" % value
484
      raise errors.ParameterError(msg)
485
    retval = (ident[len(NO_PREFIX):], False)
486
  elif ident.startswith(UN_PREFIX):
487
    if rest:
488
      msg = "Cannot pass options when removing parameter groups: %s" % value
489
      raise errors.ParameterError(msg)
490
    retval = (ident[len(UN_PREFIX):], None)
491
  else:
492
    kv_dict = _SplitKeyVal(opt, rest)
493
    retval = (ident, kv_dict)
494
  return retval
495

    
496

    
497
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
498
  """Custom parser class for key=val,key=val options.
499

500
  This will store the parsed values as a dict {key: val}.
501

502
  """
503
  return _SplitKeyVal(opt, value)
504

    
505

    
506
def check_bool(option, opt, value): # pylint: disable-msg=W0613
507
  """Custom parser for yes/no options.
508

509
  This will store the parsed value as either True or False.
510

511
  """
512
  value = value.lower()
513
  if value == constants.VALUE_FALSE or value == "no":
514
    return False
515
  elif value == constants.VALUE_TRUE or value == "yes":
516
    return True
517
  else:
518
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
519

    
520

    
521
# completion_suggestion is normally a list. Using numeric values not evaluating
522
# to False for dynamic completion.
523
(OPT_COMPL_MANY_NODES,
524
 OPT_COMPL_ONE_NODE,
525
 OPT_COMPL_ONE_INSTANCE,
526
 OPT_COMPL_ONE_OS,
527
 OPT_COMPL_ONE_IALLOCATOR,
528
 OPT_COMPL_INST_ADD_NODES,
529
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
530

    
531
OPT_COMPL_ALL = frozenset([
532
  OPT_COMPL_MANY_NODES,
533
  OPT_COMPL_ONE_NODE,
534
  OPT_COMPL_ONE_INSTANCE,
535
  OPT_COMPL_ONE_OS,
536
  OPT_COMPL_ONE_IALLOCATOR,
537
  OPT_COMPL_INST_ADD_NODES,
538
  OPT_COMPL_ONE_NODEGROUP,
539
  ])
540

    
541

    
542
class CliOption(Option):
543
  """Custom option class for optparse.
544

545
  """
546
  ATTRS = Option.ATTRS + [
547
    "completion_suggest",
548
    ]
549
  TYPES = Option.TYPES + (
550
    "identkeyval",
551
    "keyval",
552
    "unit",
553
    "bool",
554
    )
555
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
556
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
557
  TYPE_CHECKER["keyval"] = check_key_val
558
  TYPE_CHECKER["unit"] = check_unit
559
  TYPE_CHECKER["bool"] = check_bool
560

    
561

    
562
# optparse.py sets make_option, so we do it for our own option class, too
563
cli_option = CliOption
564

    
565

    
566
_YORNO = "yes|no"
567

    
568
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
569
                       help="Increase debugging level")
570

    
571
NOHDR_OPT = cli_option("--no-headers", default=False,
572
                       action="store_true", dest="no_headers",
573
                       help="Don't display column headers")
574

    
575
SEP_OPT = cli_option("--separator", default=None,
576
                     action="store", dest="separator",
577
                     help=("Separator between output fields"
578
                           " (defaults to one space)"))
579

    
580
USEUNITS_OPT = cli_option("--units", default=None,
581
                          dest="units", choices=('h', 'm', 'g', 't'),
582
                          help="Specify units for output (one of hmgt)")
583

    
584
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
585
                        type="string", metavar="FIELDS",
586
                        help="Comma separated list of output fields")
587

    
588
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
589
                       default=False, help="Force the operation")
590

    
591
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
592
                         default=False, help="Do not require confirmation")
593

    
594
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
595
                                  action="store_true", default=False,
596
                                  help=("Ignore offline nodes and do as much"
597
                                        " as possible"))
598

    
599
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
600
                         default=None, help="File with tag names")
601

    
602
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
603
                        default=False, action="store_true",
604
                        help=("Submit the job and return the job ID, but"
605
                              " don't wait for the job to finish"))
606

    
607
SYNC_OPT = cli_option("--sync", dest="do_locking",
608
                      default=False, action="store_true",
609
                      help=("Grab locks while doing the queries"
610
                            " in order to ensure more consistent results"))
611

    
612
DRY_RUN_OPT = cli_option("--dry-run", default=False,
613
                         action="store_true",
614
                         help=("Do not execute the operation, just run the"
615
                               " check steps and verify it it could be"
616
                               " executed"))
617

    
618
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
619
                         action="store_true",
620
                         help="Increase the verbosity of the operation")
621

    
622
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
623
                              action="store_true", dest="simulate_errors",
624
                              help="Debugging option that makes the operation"
625
                              " treat most runtime checks as failed")
626

    
627
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
628
                        default=True, action="store_false",
629
                        help="Don't wait for sync (DANGEROUS!)")
630

    
631
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
632
                               help="Custom disk setup (diskless, file,"
633
                               " plain or drbd)",
634
                               default=None, metavar="TEMPL",
635
                               choices=list(constants.DISK_TEMPLATES))
636

    
637
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
638
                        help="Do not create any network cards for"
639
                        " the instance")
640

    
641
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
642
                               help="Relative path under default cluster-wide"
643
                               " file storage dir to store file-based disks",
644
                               default=None, metavar="<DIR>")
645

    
646
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
647
                                  help="Driver to use for image files",
648
                                  default="loop", metavar="<DRIVER>",
649
                                  choices=list(constants.FILE_DRIVER))
650

    
651
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
652
                            help="Select nodes for the instance automatically"
653
                            " using the <NAME> iallocator plugin",
654
                            default=None, type="string",
655
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
656

    
657
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
658
                            metavar="<NAME>",
659
                            help="Set the default instance allocator plugin",
660
                            default=None, type="string",
661
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
662

    
663
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
664
                    metavar="<os>",
665
                    completion_suggest=OPT_COMPL_ONE_OS)
666

    
667
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
668
                         type="keyval", default={},
669
                         help="OS parameters")
670

    
671
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
672
                               action="store_true", default=False,
673
                               help="Force an unknown variant")
674

    
675
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
676
                            action="store_true", default=False,
677
                            help="Do not install the OS (will"
678
                            " enable no-start)")
679

    
680
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
681
                         type="keyval", default={},
682
                         help="Backend parameters")
683

    
684
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
685
                         default={}, dest="hvparams",
686
                         help="Hypervisor parameters")
687

    
688
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
689
                            help="Hypervisor and hypervisor options, in the"
690
                            " format hypervisor:option=value,option=value,...",
691
                            default=None, type="identkeyval")
692

    
693
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
694
                        help="Hypervisor and hypervisor options, in the"
695
                        " format hypervisor:option=value,option=value,...",
696
                        default=[], action="append", type="identkeyval")
697

    
698
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
699
                           action="store_false",
700
                           help="Don't check that the instance's IP"
701
                           " is alive")
702

    
703
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
704
                             default=True, action="store_false",
705
                             help="Don't check that the instance's name"
706
                             " is resolvable")
707

    
708
NET_OPT = cli_option("--net",
709
                     help="NIC parameters", default=[],
710
                     dest="nics", action="append", type="identkeyval")
711

    
712
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
713
                      dest="disks", action="append", type="identkeyval")
714

    
715
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
716
                         help="Comma-separated list of disks"
717
                         " indices to act on (e.g. 0,2) (optional,"
718
                         " defaults to all disks)")
719

    
720
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
721
                         help="Enforces a single-disk configuration using the"
722
                         " given disk size, in MiB unless a suffix is used",
723
                         default=None, type="unit", metavar="<size>")
724

    
725
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
726
                                dest="ignore_consistency",
727
                                action="store_true", default=False,
728
                                help="Ignore the consistency of the disks on"
729
                                " the secondary")
730

    
731
NONLIVE_OPT = cli_option("--non-live", dest="live",
732
                         default=True, action="store_false",
733
                         help="Do a non-live migration (this usually means"
734
                         " freeze the instance, save the state, transfer and"
735
                         " only then resume running on the secondary node)")
736

    
737
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
738
                                default=None,
739
                                choices=list(constants.HT_MIGRATION_MODES),
740
                                help="Override default migration mode (choose"
741
                                " either live or non-live")
742

    
743
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
744
                                help="Target node and optional secondary node",
745
                                metavar="<pnode>[:<snode>]",
746
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
747

    
748
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
749
                           action="append", metavar="<node>",
750
                           help="Use only this node (can be used multiple"
751
                           " times, if not given defaults to all nodes)",
752
                           completion_suggest=OPT_COMPL_ONE_NODE)
753

    
754
NODEGROUP_OPT = cli_option("-g", "--node-group",
755
                           dest="nodegroup",
756
                           help="Node group (name or uuid)",
757
                           metavar="<nodegroup>",
758
                           default=None, type="string",
759
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
760

    
761
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
762
                             metavar="<node>",
763
                             completion_suggest=OPT_COMPL_ONE_NODE)
764

    
765
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
766
                         action="store_false",
767
                         help="Don't start the instance after creation")
768

    
769
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
770
                         action="store_true", default=False,
771
                         help="Show command instead of executing it")
772

    
773
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
774
                         default=False, action="store_true",
775
                         help="Instead of performing the migration, try to"
776
                         " recover from a failed cleanup. This is safe"
777
                         " to run even if the instance is healthy, but it"
778
                         " will create extra replication traffic and "
779
                         " disrupt briefly the replication (like during the"
780
                         " migration")
781

    
782
STATIC_OPT = cli_option("-s", "--static", dest="static",
783
                        action="store_true", default=False,
784
                        help="Only show configuration data, not runtime data")
785

    
786
ALL_OPT = cli_option("--all", dest="show_all",
787
                     default=False, action="store_true",
788
                     help="Show info on all instances on the cluster."
789
                     " This can take a long time to run, use wisely")
790

    
791
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
792
                           action="store_true", default=False,
793
                           help="Interactive OS reinstall, lists available"
794
                           " OS templates for selection")
795

    
796
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
797
                                 action="store_true", default=False,
798
                                 help="Remove the instance from the cluster"
799
                                 " configuration even if there are failures"
800
                                 " during the removal process")
801

    
802
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
803
                                        dest="ignore_remove_failures",
804
                                        action="store_true", default=False,
805
                                        help="Remove the instance from the"
806
                                        " cluster configuration even if there"
807
                                        " are failures during the removal"
808
                                        " process")
809

    
810
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
811
                                 action="store_true", default=False,
812
                                 help="Remove the instance from the cluster")
813

    
814
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
815
                               help="Specifies the new secondary node",
816
                               metavar="NODE", default=None,
817
                               completion_suggest=OPT_COMPL_ONE_NODE)
818

    
819
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
820
                            default=False, action="store_true",
821
                            help="Replace the disk(s) on the primary"
822
                            " node (only for the drbd template)")
823

    
824
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
825
                              default=False, action="store_true",
826
                              help="Replace the disk(s) on the secondary"
827
                              " node (only for the drbd template)")
828

    
829
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
830
                              default=False, action="store_true",
831
                              help="Lock all nodes and auto-promote as needed"
832
                              " to MC status")
833

    
834
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
835
                              default=False, action="store_true",
836
                              help="Automatically replace faulty disks"
837
                              " (only for the drbd template)")
838

    
839
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
840
                             default=False, action="store_true",
841
                             help="Ignore current recorded size"
842
                             " (useful for forcing activation when"
843
                             " the recorded size is wrong)")
844

    
845
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
846
                          metavar="<node>",
847
                          completion_suggest=OPT_COMPL_ONE_NODE)
848

    
849
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
850
                         metavar="<dir>")
851

    
852
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
853
                              help="Specify the secondary ip for the node",
854
                              metavar="ADDRESS", default=None)
855

    
856
READD_OPT = cli_option("--readd", dest="readd",
857
                       default=False, action="store_true",
858
                       help="Readd old node after replacing it")
859

    
860
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
861
                                default=True, action="store_false",
862
                                help="Disable SSH key fingerprint checking")
863

    
864

    
865
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
866
                    type="bool", default=None, metavar=_YORNO,
867
                    help="Set the master_candidate flag on the node")
868

    
869
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
870
                         type="bool", default=None,
871
                         help="Set the offline flag on the node")
872

    
873
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
874
                         type="bool", default=None,
875
                         help="Set the drained flag on the node")
876

    
877
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
878
                    type="bool", default=None, metavar=_YORNO,
879
                    help="Set the master_capable flag on the node")
880

    
881
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
882
                    type="bool", default=None, metavar=_YORNO,
883
                    help="Set the vm_capable flag on the node")
884

    
885
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
886
                             type="bool", default=None, metavar=_YORNO,
887
                             help="Set the allocatable flag on a volume")
888

    
889
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
890
                               help="Disable support for lvm based instances"
891
                               " (cluster-wide)",
892
                               action="store_false", default=True)
893

    
894
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
895
                            dest="enabled_hypervisors",
896
                            help="Comma-separated list of hypervisors",
897
                            type="string", default=None)
898

    
899
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
900
                            type="keyval", default={},
901
                            help="NIC parameters")
902

    
903
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
904
                         dest="candidate_pool_size", type="int",
905
                         help="Set the candidate pool size")
906

    
907
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
908
                         help="Enables LVM and specifies the volume group"
909
                         " name (cluster-wide) for disk allocation [xenvg]",
910
                         metavar="VG", default=None)
911

    
912
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
913
                          help="Destroy cluster", action="store_true")
914

    
915
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
916
                          help="Skip node agreement check (dangerous)",
917
                          action="store_true", default=False)
918

    
919
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
920
                            help="Specify the mac prefix for the instance IP"
921
                            " addresses, in the format XX:XX:XX",
922
                            metavar="PREFIX",
923
                            default=None)
924

    
925
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
926
                               help="Specify the node interface (cluster-wide)"
927
                               " on which the master IP address will be added "
928
                               " [%s]" % constants.DEFAULT_BRIDGE,
929
                               metavar="NETDEV",
930
                               default=constants.DEFAULT_BRIDGE)
931

    
932
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
933
                                help="Specify the default directory (cluster-"
934
                                "wide) for storing the file-based disks [%s]" %
935
                                constants.DEFAULT_FILE_STORAGE_DIR,
936
                                metavar="DIR",
937
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
938

    
939
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
940
                                   help="Don't modify /etc/hosts",
941
                                   action="store_false", default=True)
942

    
943
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
944
                                    help="Don't initialize SSH keys",
945
                                    action="store_false", default=True)
946

    
947
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
948
                             help="Enable parseable error messages",
949
                             action="store_true", default=False)
950

    
951
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
952
                          help="Skip N+1 memory redundancy tests",
953
                          action="store_true", default=False)
954

    
955
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
956
                             help="Type of reboot: soft/hard/full",
957
                             default=constants.INSTANCE_REBOOT_HARD,
958
                             metavar="<REBOOT>",
959
                             choices=list(constants.REBOOT_TYPES))
960

    
961
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
962
                                    dest="ignore_secondaries",
963
                                    default=False, action="store_true",
964
                                    help="Ignore errors from secondaries")
965

    
966
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
967
                            action="store_false", default=True,
968
                            help="Don't shutdown the instance (unsafe)")
969

    
970
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
971
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
972
                         help="Maximum time to wait")
973

    
974
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
975
                         dest="shutdown_timeout", type="int",
976
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
977
                         help="Maximum time to wait for instance shutdown")
978

    
979
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
980
                          default=None,
981
                          help=("Number of seconds between repetions of the"
982
                                " command"))
983

    
984
EARLY_RELEASE_OPT = cli_option("--early-release",
985
                               dest="early_release", default=False,
986
                               action="store_true",
987
                               help="Release the locks on the secondary"
988
                               " node(s) early")
989

    
990
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
991
                                  dest="new_cluster_cert",
992
                                  default=False, action="store_true",
993
                                  help="Generate a new cluster certificate")
994

    
995
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
996
                           default=None,
997
                           help="File containing new RAPI certificate")
998

    
999
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1000
                               default=None, action="store_true",
1001
                               help=("Generate a new self-signed RAPI"
1002
                                     " certificate"))
1003

    
1004
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1005
                                    dest="new_confd_hmac_key",
1006
                                    default=False, action="store_true",
1007
                                    help=("Create a new HMAC key for %s" %
1008
                                          constants.CONFD))
1009

    
1010
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1011
                                       dest="cluster_domain_secret",
1012
                                       default=None,
1013
                                       help=("Load new new cluster domain"
1014
                                             " secret from file"))
1015

    
1016
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1017
                                           dest="new_cluster_domain_secret",
1018
                                           default=False, action="store_true",
1019
                                           help=("Create a new cluster domain"
1020
                                                 " secret"))
1021

    
1022
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1023
                              dest="use_replication_network",
1024
                              help="Whether to use the replication network"
1025
                              " for talking to the nodes",
1026
                              action="store_true", default=False)
1027

    
1028
MAINTAIN_NODE_HEALTH_OPT = \
1029
    cli_option("--maintain-node-health", dest="maintain_node_health",
1030
               metavar=_YORNO, default=None, type="bool",
1031
               help="Configure the cluster to automatically maintain node"
1032
               " health, by shutting down unknown instances, shutting down"
1033
               " unknown DRBD devices, etc.")
1034

    
1035
IDENTIFY_DEFAULTS_OPT = \
1036
    cli_option("--identify-defaults", dest="identify_defaults",
1037
               default=False, action="store_true",
1038
               help="Identify which saved instance parameters are equal to"
1039
               " the current cluster defaults and set them as such, instead"
1040
               " of marking them as overridden")
1041

    
1042
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1043
                         action="store", dest="uid_pool",
1044
                         help=("A list of user-ids or user-id"
1045
                               " ranges separated by commas"))
1046

    
1047
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1048
                          action="store", dest="add_uids",
1049
                          help=("A list of user-ids or user-id"
1050
                                " ranges separated by commas, to be"
1051
                                " added to the user-id pool"))
1052

    
1053
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1054
                             action="store", dest="remove_uids",
1055
                             help=("A list of user-ids or user-id"
1056
                                   " ranges separated by commas, to be"
1057
                                   " removed from the user-id pool"))
1058

    
1059
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1060
                             action="store", dest="reserved_lvs",
1061
                             help=("A comma-separated list of reserved"
1062
                                   " logical volumes names, that will be"
1063
                                   " ignored by cluster verify"))
1064

    
1065
ROMAN_OPT = cli_option("--roman",
1066
                       dest="roman_integers", default=False,
1067
                       action="store_true",
1068
                       help="Use roman numbers for positive integers")
1069

    
1070
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1071
                             action="store", default=None,
1072
                             help="Specifies usermode helper for DRBD")
1073

    
1074
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1075
                                action="store_false", default=True,
1076
                                help="Disable support for DRBD")
1077

    
1078
PRIMARY_IP_VERSION_OPT = \
1079
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1080
               action="store", dest="primary_ip_version",
1081
               metavar="%d|%d" % (constants.IP4_VERSION,
1082
                                  constants.IP6_VERSION),
1083
               help="Cluster-wide IP version for primary IP")
1084

    
1085
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1086
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1087
                          choices=_PRIONAME_TO_VALUE.keys(),
1088
                          help="Priority for opcode processing")
1089

    
1090
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1091
                        type="bool", default=None, metavar=_YORNO,
1092
                        help="Sets the hidden flag on the OS")
1093

    
1094
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1095
                        type="bool", default=None, metavar=_YORNO,
1096
                        help="Sets the blacklisted flag on the OS")
1097

    
1098
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1099
                                     type="bool", metavar=_YORNO,
1100
                                     dest="prealloc_wipe_disks",
1101
                                     help=("Wipe disks prior to instance"
1102
                                           " creation"))
1103

    
1104
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1105
                             type="keyval", default=None,
1106
                             help="Node parameters")
1107

    
1108

    
1109
#: Options provided by all commands
1110
COMMON_OPTS = [DEBUG_OPT]
1111

    
1112
# common options for creating instances. add and import then add their own
1113
# specific ones.
1114
COMMON_CREATE_OPTS = [
1115
  BACKEND_OPT,
1116
  DISK_OPT,
1117
  DISK_TEMPLATE_OPT,
1118
  FILESTORE_DIR_OPT,
1119
  FILESTORE_DRIVER_OPT,
1120
  HYPERVISOR_OPT,
1121
  IALLOCATOR_OPT,
1122
  NET_OPT,
1123
  NODE_PLACEMENT_OPT,
1124
  NOIPCHECK_OPT,
1125
  NONAMECHECK_OPT,
1126
  NONICS_OPT,
1127
  NWSYNC_OPT,
1128
  OSPARAMS_OPT,
1129
  OS_SIZE_OPT,
1130
  SUBMIT_OPT,
1131
  DRY_RUN_OPT,
1132
  PRIORITY_OPT,
1133
  ]
1134

    
1135

    
1136
def _ParseArgs(argv, commands, aliases):
1137
  """Parser for the command line arguments.
1138

1139
  This function parses the arguments and returns the function which
1140
  must be executed together with its (modified) arguments.
1141

1142
  @param argv: the command line
1143
  @param commands: dictionary with special contents, see the design
1144
      doc for cmdline handling
1145
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1146

1147
  """
1148
  if len(argv) == 0:
1149
    binary = "<command>"
1150
  else:
1151
    binary = argv[0].split("/")[-1]
1152

    
1153
  if len(argv) > 1 and argv[1] == "--version":
1154
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1155
             constants.RELEASE_VERSION)
1156
    # Quit right away. That way we don't have to care about this special
1157
    # argument. optparse.py does it the same.
1158
    sys.exit(0)
1159

    
1160
  if len(argv) < 2 or not (argv[1] in commands or
1161
                           argv[1] in aliases):
1162
    # let's do a nice thing
1163
    sortedcmds = commands.keys()
1164
    sortedcmds.sort()
1165

    
1166
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1167
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1168
    ToStdout("")
1169

    
1170
    # compute the max line length for cmd + usage
1171
    mlen = max([len(" %s" % cmd) for cmd in commands])
1172
    mlen = min(60, mlen) # should not get here...
1173

    
1174
    # and format a nice command list
1175
    ToStdout("Commands:")
1176
    for cmd in sortedcmds:
1177
      cmdstr = " %s" % (cmd,)
1178
      help_text = commands[cmd][4]
1179
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1180
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1181
      for line in help_lines:
1182
        ToStdout("%-*s   %s", mlen, "", line)
1183

    
1184
    ToStdout("")
1185

    
1186
    return None, None, None
1187

    
1188
  # get command, unalias it, and look it up in commands
1189
  cmd = argv.pop(1)
1190
  if cmd in aliases:
1191
    if cmd in commands:
1192
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1193
                                   " command" % cmd)
1194

    
1195
    if aliases[cmd] not in commands:
1196
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1197
                                   " command '%s'" % (cmd, aliases[cmd]))
1198

    
1199
    cmd = aliases[cmd]
1200

    
1201
  func, args_def, parser_opts, usage, description = commands[cmd]
1202
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1203
                        description=description,
1204
                        formatter=TitledHelpFormatter(),
1205
                        usage="%%prog %s %s" % (cmd, usage))
1206
  parser.disable_interspersed_args()
1207
  options, args = parser.parse_args()
1208

    
1209
  if not _CheckArguments(cmd, args_def, args):
1210
    return None, None, None
1211

    
1212
  return func, options, args
1213

    
1214

    
1215
def _CheckArguments(cmd, args_def, args):
1216
  """Verifies the arguments using the argument definition.
1217

1218
  Algorithm:
1219

1220
    1. Abort with error if values specified by user but none expected.
1221

1222
    1. For each argument in definition
1223

1224
      1. Keep running count of minimum number of values (min_count)
1225
      1. Keep running count of maximum number of values (max_count)
1226
      1. If it has an unlimited number of values
1227

1228
        1. Abort with error if it's not the last argument in the definition
1229

1230
    1. If last argument has limited number of values
1231

1232
      1. Abort with error if number of values doesn't match or is too large
1233

1234
    1. Abort with error if user didn't pass enough values (min_count)
1235

1236
  """
1237
  if args and not args_def:
1238
    ToStderr("Error: Command %s expects no arguments", cmd)
1239
    return False
1240

    
1241
  min_count = None
1242
  max_count = None
1243
  check_max = None
1244

    
1245
  last_idx = len(args_def) - 1
1246

    
1247
  for idx, arg in enumerate(args_def):
1248
    if min_count is None:
1249
      min_count = arg.min
1250
    elif arg.min is not None:
1251
      min_count += arg.min
1252

    
1253
    if max_count is None:
1254
      max_count = arg.max
1255
    elif arg.max is not None:
1256
      max_count += arg.max
1257

    
1258
    if idx == last_idx:
1259
      check_max = (arg.max is not None)
1260

    
1261
    elif arg.max is None:
1262
      raise errors.ProgrammerError("Only the last argument can have max=None")
1263

    
1264
  if check_max:
1265
    # Command with exact number of arguments
1266
    if (min_count is not None and max_count is not None and
1267
        min_count == max_count and len(args) != min_count):
1268
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1269
      return False
1270

    
1271
    # Command with limited number of arguments
1272
    if max_count is not None and len(args) > max_count:
1273
      ToStderr("Error: Command %s expects only %d argument(s)",
1274
               cmd, max_count)
1275
      return False
1276

    
1277
  # Command with some required arguments
1278
  if min_count is not None and len(args) < min_count:
1279
    ToStderr("Error: Command %s expects at least %d argument(s)",
1280
             cmd, min_count)
1281
    return False
1282

    
1283
  return True
1284

    
1285

    
1286
def SplitNodeOption(value):
1287
  """Splits the value of a --node option.
1288

1289
  """
1290
  if value and ':' in value:
1291
    return value.split(':', 1)
1292
  else:
1293
    return (value, None)
1294

    
1295

    
1296
def CalculateOSNames(os_name, os_variants):
1297
  """Calculates all the names an OS can be called, according to its variants.
1298

1299
  @type os_name: string
1300
  @param os_name: base name of the os
1301
  @type os_variants: list or None
1302
  @param os_variants: list of supported variants
1303
  @rtype: list
1304
  @return: list of valid names
1305

1306
  """
1307
  if os_variants:
1308
    return ['%s+%s' % (os_name, v) for v in os_variants]
1309
  else:
1310
    return [os_name]
1311

    
1312

    
1313
def ParseFields(selected, default):
1314
  """Parses the values of "--field"-like options.
1315

1316
  @type selected: string or None
1317
  @param selected: User-selected options
1318
  @type default: list
1319
  @param default: Default fields
1320

1321
  """
1322
  if selected is None:
1323
    return default
1324

    
1325
  if selected.startswith("+"):
1326
    return default + selected[1:].split(",")
1327

    
1328
  return selected.split(",")
1329

    
1330

    
1331
UsesRPC = rpc.RunWithRPC
1332

    
1333

    
1334
def AskUser(text, choices=None):
1335
  """Ask the user a question.
1336

1337
  @param text: the question to ask
1338

1339
  @param choices: list with elements tuples (input_char, return_value,
1340
      description); if not given, it will default to: [('y', True,
1341
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1342
      note that the '?' char is reserved for help
1343

1344
  @return: one of the return values from the choices list; if input is
1345
      not possible (i.e. not running with a tty, we return the last
1346
      entry from the list
1347

1348
  """
1349
  if choices is None:
1350
    choices = [('y', True, 'Perform the operation'),
1351
               ('n', False, 'Do not perform the operation')]
1352
  if not choices or not isinstance(choices, list):
1353
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1354
  for entry in choices:
1355
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1356
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1357

    
1358
  answer = choices[-1][1]
1359
  new_text = []
1360
  for line in text.splitlines():
1361
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1362
  text = "\n".join(new_text)
1363
  try:
1364
    f = file("/dev/tty", "a+")
1365
  except IOError:
1366
    return answer
1367
  try:
1368
    chars = [entry[0] for entry in choices]
1369
    chars[-1] = "[%s]" % chars[-1]
1370
    chars.append('?')
1371
    maps = dict([(entry[0], entry[1]) for entry in choices])
1372
    while True:
1373
      f.write(text)
1374
      f.write('\n')
1375
      f.write("/".join(chars))
1376
      f.write(": ")
1377
      line = f.readline(2).strip().lower()
1378
      if line in maps:
1379
        answer = maps[line]
1380
        break
1381
      elif line == '?':
1382
        for entry in choices:
1383
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1384
        f.write("\n")
1385
        continue
1386
  finally:
1387
    f.close()
1388
  return answer
1389

    
1390

    
1391
class JobSubmittedException(Exception):
1392
  """Job was submitted, client should exit.
1393

1394
  This exception has one argument, the ID of the job that was
1395
  submitted. The handler should print this ID.
1396

1397
  This is not an error, just a structured way to exit from clients.
1398

1399
  """
1400

    
1401

    
1402
def SendJob(ops, cl=None):
1403
  """Function to submit an opcode without waiting for the results.
1404

1405
  @type ops: list
1406
  @param ops: list of opcodes
1407
  @type cl: luxi.Client
1408
  @param cl: the luxi client to use for communicating with the master;
1409
             if None, a new client will be created
1410

1411
  """
1412
  if cl is None:
1413
    cl = GetClient()
1414

    
1415
  job_id = cl.SubmitJob(ops)
1416

    
1417
  return job_id
1418

    
1419

    
1420
def GenericPollJob(job_id, cbs, report_cbs):
1421
  """Generic job-polling function.
1422

1423
  @type job_id: number
1424
  @param job_id: Job ID
1425
  @type cbs: Instance of L{JobPollCbBase}
1426
  @param cbs: Data callbacks
1427
  @type report_cbs: Instance of L{JobPollReportCbBase}
1428
  @param report_cbs: Reporting callbacks
1429

1430
  """
1431
  prev_job_info = None
1432
  prev_logmsg_serial = None
1433

    
1434
  status = None
1435

    
1436
  while True:
1437
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1438
                                      prev_logmsg_serial)
1439
    if not result:
1440
      # job not found, go away!
1441
      raise errors.JobLost("Job with id %s lost" % job_id)
1442

    
1443
    if result == constants.JOB_NOTCHANGED:
1444
      report_cbs.ReportNotChanged(job_id, status)
1445

    
1446
      # Wait again
1447
      continue
1448

    
1449
    # Split result, a tuple of (field values, log entries)
1450
    (job_info, log_entries) = result
1451
    (status, ) = job_info
1452

    
1453
    if log_entries:
1454
      for log_entry in log_entries:
1455
        (serial, timestamp, log_type, message) = log_entry
1456
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1457
                                    log_type, message)
1458
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1459

    
1460
    # TODO: Handle canceled and archived jobs
1461
    elif status in (constants.JOB_STATUS_SUCCESS,
1462
                    constants.JOB_STATUS_ERROR,
1463
                    constants.JOB_STATUS_CANCELING,
1464
                    constants.JOB_STATUS_CANCELED):
1465
      break
1466

    
1467
    prev_job_info = job_info
1468

    
1469
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1470
  if not jobs:
1471
    raise errors.JobLost("Job with id %s lost" % job_id)
1472

    
1473
  status, opstatus, result = jobs[0]
1474

    
1475
  if status == constants.JOB_STATUS_SUCCESS:
1476
    return result
1477

    
1478
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1479
    raise errors.OpExecError("Job was canceled")
1480

    
1481
  has_ok = False
1482
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1483
    if status == constants.OP_STATUS_SUCCESS:
1484
      has_ok = True
1485
    elif status == constants.OP_STATUS_ERROR:
1486
      errors.MaybeRaise(msg)
1487

    
1488
      if has_ok:
1489
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1490
                                 (idx, msg))
1491

    
1492
      raise errors.OpExecError(str(msg))
1493

    
1494
  # default failure mode
1495
  raise errors.OpExecError(result)
1496

    
1497

    
1498
class JobPollCbBase:
1499
  """Base class for L{GenericPollJob} callbacks.
1500

1501
  """
1502
  def __init__(self):
1503
    """Initializes this class.
1504

1505
    """
1506

    
1507
  def WaitForJobChangeOnce(self, job_id, fields,
1508
                           prev_job_info, prev_log_serial):
1509
    """Waits for changes on a job.
1510

1511
    """
1512
    raise NotImplementedError()
1513

    
1514
  def QueryJobs(self, job_ids, fields):
1515
    """Returns the selected fields for the selected job IDs.
1516

1517
    @type job_ids: list of numbers
1518
    @param job_ids: Job IDs
1519
    @type fields: list of strings
1520
    @param fields: Fields
1521

1522
    """
1523
    raise NotImplementedError()
1524

    
1525

    
1526
class JobPollReportCbBase:
1527
  """Base class for L{GenericPollJob} reporting callbacks.
1528

1529
  """
1530
  def __init__(self):
1531
    """Initializes this class.
1532

1533
    """
1534

    
1535
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1536
    """Handles a log message.
1537

1538
    """
1539
    raise NotImplementedError()
1540

    
1541
  def ReportNotChanged(self, job_id, status):
1542
    """Called for if a job hasn't changed in a while.
1543

1544
    @type job_id: number
1545
    @param job_id: Job ID
1546
    @type status: string or None
1547
    @param status: Job status if available
1548

1549
    """
1550
    raise NotImplementedError()
1551

    
1552

    
1553
class _LuxiJobPollCb(JobPollCbBase):
1554
  def __init__(self, cl):
1555
    """Initializes this class.
1556

1557
    """
1558
    JobPollCbBase.__init__(self)
1559
    self.cl = cl
1560

    
1561
  def WaitForJobChangeOnce(self, job_id, fields,
1562
                           prev_job_info, prev_log_serial):
1563
    """Waits for changes on a job.
1564

1565
    """
1566
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1567
                                        prev_job_info, prev_log_serial)
1568

    
1569
  def QueryJobs(self, job_ids, fields):
1570
    """Returns the selected fields for the selected job IDs.
1571

1572
    """
1573
    return self.cl.QueryJobs(job_ids, fields)
1574

    
1575

    
1576
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1577
  def __init__(self, feedback_fn):
1578
    """Initializes this class.
1579

1580
    """
1581
    JobPollReportCbBase.__init__(self)
1582

    
1583
    self.feedback_fn = feedback_fn
1584

    
1585
    assert callable(feedback_fn)
1586

    
1587
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1588
    """Handles a log message.
1589

1590
    """
1591
    self.feedback_fn((timestamp, log_type, log_msg))
1592

    
1593
  def ReportNotChanged(self, job_id, status):
1594
    """Called if a job hasn't changed in a while.
1595

1596
    """
1597
    # Ignore
1598

    
1599

    
1600
class StdioJobPollReportCb(JobPollReportCbBase):
1601
  def __init__(self):
1602
    """Initializes this class.
1603

1604
    """
1605
    JobPollReportCbBase.__init__(self)
1606

    
1607
    self.notified_queued = False
1608
    self.notified_waitlock = False
1609

    
1610
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1611
    """Handles a log message.
1612

1613
    """
1614
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1615
             FormatLogMessage(log_type, log_msg))
1616

    
1617
  def ReportNotChanged(self, job_id, status):
1618
    """Called if a job hasn't changed in a while.
1619

1620
    """
1621
    if status is None:
1622
      return
1623

    
1624
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1625
      ToStderr("Job %s is waiting in queue", job_id)
1626
      self.notified_queued = True
1627

    
1628
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1629
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1630
      self.notified_waitlock = True
1631

    
1632

    
1633
def FormatLogMessage(log_type, log_msg):
1634
  """Formats a job message according to its type.
1635

1636
  """
1637
  if log_type != constants.ELOG_MESSAGE:
1638
    log_msg = str(log_msg)
1639

    
1640
  return utils.SafeEncode(log_msg)
1641

    
1642

    
1643
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1644
  """Function to poll for the result of a job.
1645

1646
  @type job_id: job identified
1647
  @param job_id: the job to poll for results
1648
  @type cl: luxi.Client
1649
  @param cl: the luxi client to use for communicating with the master;
1650
             if None, a new client will be created
1651

1652
  """
1653
  if cl is None:
1654
    cl = GetClient()
1655

    
1656
  if reporter is None:
1657
    if feedback_fn:
1658
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1659
    else:
1660
      reporter = StdioJobPollReportCb()
1661
  elif feedback_fn:
1662
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1663

    
1664
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1665

    
1666

    
1667
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1668
  """Legacy function to submit an opcode.
1669

1670
  This is just a simple wrapper over the construction of the processor
1671
  instance. It should be extended to better handle feedback and
1672
  interaction functions.
1673

1674
  """
1675
  if cl is None:
1676
    cl = GetClient()
1677

    
1678
  SetGenericOpcodeOpts([op], opts)
1679

    
1680
  job_id = SendJob([op], cl=cl)
1681

    
1682
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1683
                       reporter=reporter)
1684

    
1685
  return op_results[0]
1686

    
1687

    
1688
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1689
  """Wrapper around SubmitOpCode or SendJob.
1690

1691
  This function will decide, based on the 'opts' parameter, whether to
1692
  submit and wait for the result of the opcode (and return it), or
1693
  whether to just send the job and print its identifier. It is used in
1694
  order to simplify the implementation of the '--submit' option.
1695

1696
  It will also process the opcodes if we're sending the via SendJob
1697
  (otherwise SubmitOpCode does it).
1698

1699
  """
1700
  if opts and opts.submit_only:
1701
    job = [op]
1702
    SetGenericOpcodeOpts(job, opts)
1703
    job_id = SendJob(job, cl=cl)
1704
    raise JobSubmittedException(job_id)
1705
  else:
1706
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1707

    
1708

    
1709
def SetGenericOpcodeOpts(opcode_list, options):
1710
  """Processor for generic options.
1711

1712
  This function updates the given opcodes based on generic command
1713
  line options (like debug, dry-run, etc.).
1714

1715
  @param opcode_list: list of opcodes
1716
  @param options: command line options or None
1717
  @return: None (in-place modification)
1718

1719
  """
1720
  if not options:
1721
    return
1722
  for op in opcode_list:
1723
    op.debug_level = options.debug
1724
    if hasattr(options, "dry_run"):
1725
      op.dry_run = options.dry_run
1726
    if getattr(options, "priority", None) is not None:
1727
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1728

    
1729

    
1730
def GetClient():
1731
  # TODO: Cache object?
1732
  try:
1733
    client = luxi.Client()
1734
  except luxi.NoMasterError:
1735
    ss = ssconf.SimpleStore()
1736

    
1737
    # Try to read ssconf file
1738
    try:
1739
      ss.GetMasterNode()
1740
    except errors.ConfigurationError:
1741
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1742
                                 " not part of a cluster")
1743

    
1744
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1745
    if master != myself:
1746
      raise errors.OpPrereqError("This is not the master node, please connect"
1747
                                 " to node '%s' and rerun the command" %
1748
                                 master)
1749
    raise
1750
  return client
1751

    
1752

    
1753
def FormatError(err):
1754
  """Return a formatted error message for a given error.
1755

1756
  This function takes an exception instance and returns a tuple
1757
  consisting of two values: first, the recommended exit code, and
1758
  second, a string describing the error message (not
1759
  newline-terminated).
1760

1761
  """
1762
  retcode = 1
1763
  obuf = StringIO()
1764
  msg = str(err)
1765
  if isinstance(err, errors.ConfigurationError):
1766
    txt = "Corrupt configuration file: %s" % msg
1767
    logging.error(txt)
1768
    obuf.write(txt + "\n")
1769
    obuf.write("Aborting.")
1770
    retcode = 2
1771
  elif isinstance(err, errors.HooksAbort):
1772
    obuf.write("Failure: hooks execution failed:\n")
1773
    for node, script, out in err.args[0]:
1774
      if out:
1775
        obuf.write("  node: %s, script: %s, output: %s\n" %
1776
                   (node, script, out))
1777
      else:
1778
        obuf.write("  node: %s, script: %s (no output)\n" %
1779
                   (node, script))
1780
  elif isinstance(err, errors.HooksFailure):
1781
    obuf.write("Failure: hooks general failure: %s" % msg)
1782
  elif isinstance(err, errors.ResolverError):
1783
    this_host = netutils.Hostname.GetSysName()
1784
    if err.args[0] == this_host:
1785
      msg = "Failure: can't resolve my own hostname ('%s')"
1786
    else:
1787
      msg = "Failure: can't resolve hostname '%s'"
1788
    obuf.write(msg % err.args[0])
1789
  elif isinstance(err, errors.OpPrereqError):
1790
    if len(err.args) == 2:
1791
      obuf.write("Failure: prerequisites not met for this"
1792
               " operation:\nerror type: %s, error details:\n%s" %
1793
                 (err.args[1], err.args[0]))
1794
    else:
1795
      obuf.write("Failure: prerequisites not met for this"
1796
                 " operation:\n%s" % msg)
1797
  elif isinstance(err, errors.OpExecError):
1798
    obuf.write("Failure: command execution error:\n%s" % msg)
1799
  elif isinstance(err, errors.TagError):
1800
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1801
  elif isinstance(err, errors.JobQueueDrainError):
1802
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1803
               " accept new requests\n")
1804
  elif isinstance(err, errors.JobQueueFull):
1805
    obuf.write("Failure: the job queue is full and doesn't accept new"
1806
               " job submissions until old jobs are archived\n")
1807
  elif isinstance(err, errors.TypeEnforcementError):
1808
    obuf.write("Parameter Error: %s" % msg)
1809
  elif isinstance(err, errors.ParameterError):
1810
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1811
  elif isinstance(err, luxi.NoMasterError):
1812
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1813
               " and listening for connections?")
1814
  elif isinstance(err, luxi.TimeoutError):
1815
    obuf.write("Timeout while talking to the master daemon. Error:\n"
1816
               "%s" % msg)
1817
  elif isinstance(err, luxi.PermissionError):
1818
    obuf.write("It seems you don't have permissions to connect to the"
1819
               " master daemon.\nPlease retry as a different user.")
1820
  elif isinstance(err, luxi.ProtocolError):
1821
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1822
               "%s" % msg)
1823
  elif isinstance(err, errors.JobLost):
1824
    obuf.write("Error checking job status: %s" % msg)
1825
  elif isinstance(err, errors.GenericError):
1826
    obuf.write("Unhandled Ganeti error: %s" % msg)
1827
  elif isinstance(err, JobSubmittedException):
1828
    obuf.write("JobID: %s\n" % err.args[0])
1829
    retcode = 0
1830
  else:
1831
    obuf.write("Unhandled exception: %s" % msg)
1832
  return retcode, obuf.getvalue().rstrip('\n')
1833

    
1834

    
1835
def GenericMain(commands, override=None, aliases=None):
1836
  """Generic main function for all the gnt-* commands.
1837

1838
  Arguments:
1839
    - commands: a dictionary with a special structure, see the design doc
1840
                for command line handling.
1841
    - override: if not None, we expect a dictionary with keys that will
1842
                override command line options; this can be used to pass
1843
                options from the scripts to generic functions
1844
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1845

1846
  """
1847
  # save the program name and the entire command line for later logging
1848
  if sys.argv:
1849
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1850
    if len(sys.argv) >= 2:
1851
      binary += " " + sys.argv[1]
1852
      old_cmdline = " ".join(sys.argv[2:])
1853
    else:
1854
      old_cmdline = ""
1855
  else:
1856
    binary = "<unknown program>"
1857
    old_cmdline = ""
1858

    
1859
  if aliases is None:
1860
    aliases = {}
1861

    
1862
  try:
1863
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1864
  except errors.ParameterError, err:
1865
    result, err_msg = FormatError(err)
1866
    ToStderr(err_msg)
1867
    return 1
1868

    
1869
  if func is None: # parse error
1870
    return 1
1871

    
1872
  if override is not None:
1873
    for key, val in override.iteritems():
1874
      setattr(options, key, val)
1875

    
1876
  utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1877
                     stderr_logging=True, program=binary)
1878

    
1879
  if old_cmdline:
1880
    logging.info("run with arguments '%s'", old_cmdline)
1881
  else:
1882
    logging.info("run with no arguments")
1883

    
1884
  try:
1885
    result = func(options, args)
1886
  except (errors.GenericError, luxi.ProtocolError,
1887
          JobSubmittedException), err:
1888
    result, err_msg = FormatError(err)
1889
    logging.exception("Error during command processing")
1890
    ToStderr(err_msg)
1891

    
1892
  return result
1893

    
1894

    
1895
def ParseNicOption(optvalue):
1896
  """Parses the value of the --net option(s).
1897

1898
  """
1899
  try:
1900
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1901
  except (TypeError, ValueError), err:
1902
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1903

    
1904
  nics = [{}] * nic_max
1905
  for nidx, ndict in optvalue:
1906
    nidx = int(nidx)
1907

    
1908
    if not isinstance(ndict, dict):
1909
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1910
                                 " got %s" % (nidx, ndict))
1911

    
1912
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1913

    
1914
    nics[nidx] = ndict
1915

    
1916
  return nics
1917

    
1918

    
1919
def GenericInstanceCreate(mode, opts, args):
1920
  """Add an instance to the cluster via either creation or import.
1921

1922
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1923
  @param opts: the command line options selected by the user
1924
  @type args: list
1925
  @param args: should contain only one element, the new instance name
1926
  @rtype: int
1927
  @return: the desired exit code
1928

1929
  """
1930
  instance = args[0]
1931

    
1932
  (pnode, snode) = SplitNodeOption(opts.node)
1933

    
1934
  hypervisor = None
1935
  hvparams = {}
1936
  if opts.hypervisor:
1937
    hypervisor, hvparams = opts.hypervisor
1938

    
1939
  if opts.nics:
1940
    nics = ParseNicOption(opts.nics)
1941
  elif opts.no_nics:
1942
    # no nics
1943
    nics = []
1944
  elif mode == constants.INSTANCE_CREATE:
1945
    # default of one nic, all auto
1946
    nics = [{}]
1947
  else:
1948
    # mode == import
1949
    nics = []
1950

    
1951
  if opts.disk_template == constants.DT_DISKLESS:
1952
    if opts.disks or opts.sd_size is not None:
1953
      raise errors.OpPrereqError("Diskless instance but disk"
1954
                                 " information passed")
1955
    disks = []
1956
  else:
1957
    if (not opts.disks and not opts.sd_size
1958
        and mode == constants.INSTANCE_CREATE):
1959
      raise errors.OpPrereqError("No disk information specified")
1960
    if opts.disks and opts.sd_size is not None:
1961
      raise errors.OpPrereqError("Please use either the '--disk' or"
1962
                                 " '-s' option")
1963
    if opts.sd_size is not None:
1964
      opts.disks = [(0, {"size": opts.sd_size})]
1965

    
1966
    if opts.disks:
1967
      try:
1968
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
1969
      except ValueError, err:
1970
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
1971
      disks = [{}] * disk_max
1972
    else:
1973
      disks = []
1974
    for didx, ddict in opts.disks:
1975
      didx = int(didx)
1976
      if not isinstance(ddict, dict):
1977
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
1978
        raise errors.OpPrereqError(msg)
1979
      elif "size" in ddict:
1980
        if "adopt" in ddict:
1981
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
1982
                                     " (disk %d)" % didx)
1983
        try:
1984
          ddict["size"] = utils.ParseUnit(ddict["size"])
1985
        except ValueError, err:
1986
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
1987
                                     (didx, err))
1988
      elif "adopt" in ddict:
1989
        if mode == constants.INSTANCE_IMPORT:
1990
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
1991
                                     " import")
1992
        ddict["size"] = 0
1993
      else:
1994
        raise errors.OpPrereqError("Missing size or adoption source for"
1995
                                   " disk %d" % didx)
1996
      disks[didx] = ddict
1997

    
1998
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
1999
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2000

    
2001
  if mode == constants.INSTANCE_CREATE:
2002
    start = opts.start
2003
    os_type = opts.os
2004
    force_variant = opts.force_variant
2005
    src_node = None
2006
    src_path = None
2007
    no_install = opts.no_install
2008
    identify_defaults = False
2009
  elif mode == constants.INSTANCE_IMPORT:
2010
    start = False
2011
    os_type = None
2012
    force_variant = False
2013
    src_node = opts.src_node
2014
    src_path = opts.src_dir
2015
    no_install = None
2016
    identify_defaults = opts.identify_defaults
2017
  else:
2018
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2019

    
2020
  op = opcodes.OpCreateInstance(instance_name=instance,
2021
                                disks=disks,
2022
                                disk_template=opts.disk_template,
2023
                                nics=nics,
2024
                                pnode=pnode, snode=snode,
2025
                                ip_check=opts.ip_check,
2026
                                name_check=opts.name_check,
2027
                                wait_for_sync=opts.wait_for_sync,
2028
                                file_storage_dir=opts.file_storage_dir,
2029
                                file_driver=opts.file_driver,
2030
                                iallocator=opts.iallocator,
2031
                                hypervisor=hypervisor,
2032
                                hvparams=hvparams,
2033
                                beparams=opts.beparams,
2034
                                osparams=opts.osparams,
2035
                                mode=mode,
2036
                                start=start,
2037
                                os_type=os_type,
2038
                                force_variant=force_variant,
2039
                                src_node=src_node,
2040
                                src_path=src_path,
2041
                                no_install=no_install,
2042
                                identify_defaults=identify_defaults)
2043

    
2044
  SubmitOrSend(op, opts)
2045
  return 0
2046

    
2047

    
2048
class _RunWhileClusterStoppedHelper:
2049
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2050

2051
  """
2052
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2053
    """Initializes this class.
2054

2055
    @type feedback_fn: callable
2056
    @param feedback_fn: Feedback function
2057
    @type cluster_name: string
2058
    @param cluster_name: Cluster name
2059
    @type master_node: string
2060
    @param master_node Master node name
2061
    @type online_nodes: list
2062
    @param online_nodes: List of names of online nodes
2063

2064
    """
2065
    self.feedback_fn = feedback_fn
2066
    self.cluster_name = cluster_name
2067
    self.master_node = master_node
2068
    self.online_nodes = online_nodes
2069

    
2070
    self.ssh = ssh.SshRunner(self.cluster_name)
2071

    
2072
    self.nonmaster_nodes = [name for name in online_nodes
2073
                            if name != master_node]
2074

    
2075
    assert self.master_node not in self.nonmaster_nodes
2076

    
2077
  def _RunCmd(self, node_name, cmd):
2078
    """Runs a command on the local or a remote machine.
2079

2080
    @type node_name: string
2081
    @param node_name: Machine name
2082
    @type cmd: list
2083
    @param cmd: Command
2084

2085
    """
2086
    if node_name is None or node_name == self.master_node:
2087
      # No need to use SSH
2088
      result = utils.RunCmd(cmd)
2089
    else:
2090
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2091

    
2092
    if result.failed:
2093
      errmsg = ["Failed to run command %s" % result.cmd]
2094
      if node_name:
2095
        errmsg.append("on node %s" % node_name)
2096
      errmsg.append(": exitcode %s and error %s" %
2097
                    (result.exit_code, result.output))
2098
      raise errors.OpExecError(" ".join(errmsg))
2099

    
2100
  def Call(self, fn, *args):
2101
    """Call function while all daemons are stopped.
2102

2103
    @type fn: callable
2104
    @param fn: Function to be called
2105

2106
    """
2107
    # Pause watcher by acquiring an exclusive lock on watcher state file
2108
    self.feedback_fn("Blocking watcher")
2109
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2110
    try:
2111
      # TODO: Currently, this just blocks. There's no timeout.
2112
      # TODO: Should it be a shared lock?
2113
      watcher_block.Exclusive(blocking=True)
2114

    
2115
      # Stop master daemons, so that no new jobs can come in and all running
2116
      # ones are finished
2117
      self.feedback_fn("Stopping master daemons")
2118
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2119
      try:
2120
        # Stop daemons on all nodes
2121
        for node_name in self.online_nodes:
2122
          self.feedback_fn("Stopping daemons on %s" % node_name)
2123
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2124

    
2125
        # All daemons are shut down now
2126
        try:
2127
          return fn(self, *args)
2128
        except Exception, err:
2129
          _, errmsg = FormatError(err)
2130
          logging.exception("Caught exception")
2131
          self.feedback_fn(errmsg)
2132
          raise
2133
      finally:
2134
        # Start cluster again, master node last
2135
        for node_name in self.nonmaster_nodes + [self.master_node]:
2136
          self.feedback_fn("Starting daemons on %s" % node_name)
2137
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2138
    finally:
2139
      # Resume watcher
2140
      watcher_block.Close()
2141

    
2142

    
2143
def RunWhileClusterStopped(feedback_fn, fn, *args):
2144
  """Calls a function while all cluster daemons are stopped.
2145

2146
  @type feedback_fn: callable
2147
  @param feedback_fn: Feedback function
2148
  @type fn: callable
2149
  @param fn: Function to be called when daemons are stopped
2150

2151
  """
2152
  feedback_fn("Gathering cluster information")
2153

    
2154
  # This ensures we're running on the master daemon
2155
  cl = GetClient()
2156

    
2157
  (cluster_name, master_node) = \
2158
    cl.QueryConfigValues(["cluster_name", "master_node"])
2159

    
2160
  online_nodes = GetOnlineNodes([], cl=cl)
2161

    
2162
  # Don't keep a reference to the client. The master daemon will go away.
2163
  del cl
2164

    
2165
  assert master_node in online_nodes
2166

    
2167
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2168
                                       online_nodes).Call(fn, *args)
2169

    
2170

    
2171
def GenerateTable(headers, fields, separator, data,
2172
                  numfields=None, unitfields=None,
2173
                  units=None):
2174
  """Prints a table with headers and different fields.
2175

2176
  @type headers: dict
2177
  @param headers: dictionary mapping field names to headers for
2178
      the table
2179
  @type fields: list
2180
  @param fields: the field names corresponding to each row in
2181
      the data field
2182
  @param separator: the separator to be used; if this is None,
2183
      the default 'smart' algorithm is used which computes optimal
2184
      field width, otherwise just the separator is used between
2185
      each field
2186
  @type data: list
2187
  @param data: a list of lists, each sublist being one row to be output
2188
  @type numfields: list
2189
  @param numfields: a list with the fields that hold numeric
2190
      values and thus should be right-aligned
2191
  @type unitfields: list
2192
  @param unitfields: a list with the fields that hold numeric
2193
      values that should be formatted with the units field
2194
  @type units: string or None
2195
  @param units: the units we should use for formatting, or None for
2196
      automatic choice (human-readable for non-separator usage, otherwise
2197
      megabytes); this is a one-letter string
2198

2199
  """
2200
  if units is None:
2201
    if separator:
2202
      units = "m"
2203
    else:
2204
      units = "h"
2205

    
2206
  if numfields is None:
2207
    numfields = []
2208
  if unitfields is None:
2209
    unitfields = []
2210

    
2211
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2212
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2213

    
2214
  format_fields = []
2215
  for field in fields:
2216
    if headers and field not in headers:
2217
      # TODO: handle better unknown fields (either revert to old
2218
      # style of raising exception, or deal more intelligently with
2219
      # variable fields)
2220
      headers[field] = field
2221
    if separator is not None:
2222
      format_fields.append("%s")
2223
    elif numfields.Matches(field):
2224
      format_fields.append("%*s")
2225
    else:
2226
      format_fields.append("%-*s")
2227

    
2228
  if separator is None:
2229
    mlens = [0 for name in fields]
2230
    format_str = ' '.join(format_fields)
2231
  else:
2232
    format_str = separator.replace("%", "%%").join(format_fields)
2233

    
2234
  for row in data:
2235
    if row is None:
2236
      continue
2237
    for idx, val in enumerate(row):
2238
      if unitfields.Matches(fields[idx]):
2239
        try:
2240
          val = int(val)
2241
        except (TypeError, ValueError):
2242
          pass
2243
        else:
2244
          val = row[idx] = utils.FormatUnit(val, units)
2245
      val = row[idx] = str(val)
2246
      if separator is None:
2247
        mlens[idx] = max(mlens[idx], len(val))
2248

    
2249
  result = []
2250
  if headers:
2251
    args = []
2252
    for idx, name in enumerate(fields):
2253
      hdr = headers[name]
2254
      if separator is None:
2255
        mlens[idx] = max(mlens[idx], len(hdr))
2256
        args.append(mlens[idx])
2257
      args.append(hdr)
2258
    result.append(format_str % tuple(args))
2259

    
2260
  if separator is None:
2261
    assert len(mlens) == len(fields)
2262

    
2263
    if fields and not numfields.Matches(fields[-1]):
2264
      mlens[-1] = 0
2265

    
2266
  for line in data:
2267
    args = []
2268
    if line is None:
2269
      line = ['-' for _ in fields]
2270
    for idx in range(len(fields)):
2271
      if separator is None:
2272
        args.append(mlens[idx])
2273
      args.append(line[idx])
2274
    result.append(format_str % tuple(args))
2275

    
2276
  return result
2277

    
2278

    
2279
def FormatTimestamp(ts):
2280
  """Formats a given timestamp.
2281

2282
  @type ts: timestamp
2283
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2284

2285
  @rtype: string
2286
  @return: a string with the formatted timestamp
2287

2288
  """
2289
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2290
    return '?'
2291
  sec, usec = ts
2292
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2293

    
2294

    
2295
def ParseTimespec(value):
2296
  """Parse a time specification.
2297

2298
  The following suffixed will be recognized:
2299

2300
    - s: seconds
2301
    - m: minutes
2302
    - h: hours
2303
    - d: day
2304
    - w: weeks
2305

2306
  Without any suffix, the value will be taken to be in seconds.
2307

2308
  """
2309
  value = str(value)
2310
  if not value:
2311
    raise errors.OpPrereqError("Empty time specification passed")
2312
  suffix_map = {
2313
    's': 1,
2314
    'm': 60,
2315
    'h': 3600,
2316
    'd': 86400,
2317
    'w': 604800,
2318
    }
2319
  if value[-1] not in suffix_map:
2320
    try:
2321
      value = int(value)
2322
    except (TypeError, ValueError):
2323
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2324
  else:
2325
    multiplier = suffix_map[value[-1]]
2326
    value = value[:-1]
2327
    if not value: # no data left after stripping the suffix
2328
      raise errors.OpPrereqError("Invalid time specification (only"
2329
                                 " suffix passed)")
2330
    try:
2331
      value = int(value) * multiplier
2332
    except (TypeError, ValueError):
2333
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2334
  return value
2335

    
2336

    
2337
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2338
                   filter_master=False):
2339
  """Returns the names of online nodes.
2340

2341
  This function will also log a warning on stderr with the names of
2342
  the online nodes.
2343

2344
  @param nodes: if not empty, use only this subset of nodes (minus the
2345
      offline ones)
2346
  @param cl: if not None, luxi client to use
2347
  @type nowarn: boolean
2348
  @param nowarn: by default, this function will output a note with the
2349
      offline nodes that are skipped; if this parameter is True the
2350
      note is not displayed
2351
  @type secondary_ips: boolean
2352
  @param secondary_ips: if True, return the secondary IPs instead of the
2353
      names, useful for doing network traffic over the replication interface
2354
      (if any)
2355
  @type filter_master: boolean
2356
  @param filter_master: if True, do not return the master node in the list
2357
      (useful in coordination with secondary_ips where we cannot check our
2358
      node name against the list)
2359

2360
  """
2361
  if cl is None:
2362
    cl = GetClient()
2363

    
2364
  if secondary_ips:
2365
    name_idx = 2
2366
  else:
2367
    name_idx = 0
2368

    
2369
  if filter_master:
2370
    master_node = cl.QueryConfigValues(["master_node"])[0]
2371
    filter_fn = lambda x: x != master_node
2372
  else:
2373
    filter_fn = lambda _: True
2374

    
2375
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2376
                         use_locking=False)
2377
  offline = [row[0] for row in result if row[1]]
2378
  if offline and not nowarn:
2379
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2380
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2381

    
2382

    
2383
def _ToStream(stream, txt, *args):
2384
  """Write a message to a stream, bypassing the logging system
2385

2386
  @type stream: file object
2387
  @param stream: the file to which we should write
2388
  @type txt: str
2389
  @param txt: the message
2390

2391
  """
2392
  if args:
2393
    args = tuple(args)
2394
    stream.write(txt % args)
2395
  else:
2396
    stream.write(txt)
2397
  stream.write('\n')
2398
  stream.flush()
2399

    
2400

    
2401
def ToStdout(txt, *args):
2402
  """Write a message to stdout only, bypassing the logging system
2403

2404
  This is just a wrapper over _ToStream.
2405

2406
  @type txt: str
2407
  @param txt: the message
2408

2409
  """
2410
  _ToStream(sys.stdout, txt, *args)
2411

    
2412

    
2413
def ToStderr(txt, *args):
2414
  """Write a message to stderr only, bypassing the logging system
2415

2416
  This is just a wrapper over _ToStream.
2417

2418
  @type txt: str
2419
  @param txt: the message
2420

2421
  """
2422
  _ToStream(sys.stderr, txt, *args)
2423

    
2424

    
2425
class JobExecutor(object):
2426
  """Class which manages the submission and execution of multiple jobs.
2427

2428
  Note that instances of this class should not be reused between
2429
  GetResults() calls.
2430

2431
  """
2432
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2433
    self.queue = []
2434
    if cl is None:
2435
      cl = GetClient()
2436
    self.cl = cl
2437
    self.verbose = verbose
2438
    self.jobs = []
2439
    self.opts = opts
2440
    self.feedback_fn = feedback_fn
2441

    
2442
  def QueueJob(self, name, *ops):
2443
    """Record a job for later submit.
2444

2445
    @type name: string
2446
    @param name: a description of the job, will be used in WaitJobSet
2447
    """
2448
    SetGenericOpcodeOpts(ops, self.opts)
2449
    self.queue.append((name, ops))
2450

    
2451
  def SubmitPending(self, each=False):
2452
    """Submit all pending jobs.
2453

2454
    """
2455
    if each:
2456
      results = []
2457
      for row in self.queue:
2458
        # SubmitJob will remove the success status, but raise an exception if
2459
        # the submission fails, so we'll notice that anyway.
2460
        results.append([True, self.cl.SubmitJob(row[1])])
2461
    else:
2462
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2463
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2464
                                                            self.queue)):
2465
      self.jobs.append((idx, status, data, name))
2466

    
2467
  def _ChooseJob(self):
2468
    """Choose a non-waiting/queued job to poll next.
2469

2470
    """
2471
    assert self.jobs, "_ChooseJob called with empty job list"
2472

    
2473
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2474
    assert result
2475

    
2476
    for job_data, status in zip(self.jobs, result):
2477
      if (isinstance(status, list) and status and
2478
          status[0] in (constants.JOB_STATUS_QUEUED,
2479
                        constants.JOB_STATUS_WAITLOCK,
2480
                        constants.JOB_STATUS_CANCELING)):
2481
        # job is still present and waiting
2482
        continue
2483
      # good candidate found (either running job or lost job)
2484
      self.jobs.remove(job_data)
2485
      return job_data
2486

    
2487
    # no job found
2488
    return self.jobs.pop(0)
2489

    
2490
  def GetResults(self):
2491
    """Wait for and return the results of all jobs.
2492

2493
    @rtype: list
2494
    @return: list of tuples (success, job results), in the same order
2495
        as the submitted jobs; if a job has failed, instead of the result
2496
        there will be the error message
2497

2498
    """
2499
    if not self.jobs:
2500
      self.SubmitPending()
2501
    results = []
2502
    if self.verbose:
2503
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2504
      if ok_jobs:
2505
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2506

    
2507
    # first, remove any non-submitted jobs
2508
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2509
    for idx, _, jid, name in failures:
2510
      ToStderr("Failed to submit job for %s: %s", name, jid)
2511
      results.append((idx, False, jid))
2512

    
2513
    while self.jobs:
2514
      (idx, _, jid, name) = self._ChooseJob()
2515
      ToStdout("Waiting for job %s for %s...", jid, name)
2516
      try:
2517
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2518
        success = True
2519
      except errors.JobLost, err:
2520
        _, job_result = FormatError(err)
2521
        ToStderr("Job %s for %s has been archived, cannot check its result",
2522
                 jid, name)
2523
        success = False
2524
      except (errors.GenericError, luxi.ProtocolError), err:
2525
        _, job_result = FormatError(err)
2526
        success = False
2527
        # the error message will always be shown, verbose or not
2528
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2529

    
2530
      results.append((idx, success, job_result))
2531

    
2532
    # sort based on the index, then drop it
2533
    results.sort()
2534
    results = [i[1:] for i in results]
2535

    
2536
    return results
2537

    
2538
  def WaitOrShow(self, wait):
2539
    """Wait for job results or only print the job IDs.
2540

2541
    @type wait: boolean
2542
    @param wait: whether to wait or not
2543

2544
    """
2545
    if wait:
2546
      return self.GetResults()
2547
    else:
2548
      if not self.jobs:
2549
        self.SubmitPending()
2550
      for _, status, result, name in self.jobs:
2551
        if status:
2552
          ToStdout("%s: %s", result, name)
2553
        else:
2554
          ToStderr("Failure for %s: %s", name, result)
2555
      return [row[1:3] for row in self.jobs]