Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ a285fcfd

History | View | Annotate | Download (81.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42

    
43
from optparse import (OptionParser, TitledHelpFormatter,
44
                      Option, OptionValueError)
45

    
46

    
47
__all__ = [
48
  # Command line options
49
  "ADD_UIDS_OPT",
50
  "ALLOCATABLE_OPT",
51
  "ALL_OPT",
52
  "AUTO_PROMOTE_OPT",
53
  "AUTO_REPLACE_OPT",
54
  "BACKEND_OPT",
55
  "BLK_OS_OPT",
56
  "CLEANUP_OPT",
57
  "CLUSTER_DOMAIN_SECRET_OPT",
58
  "CONFIRM_OPT",
59
  "CP_SIZE_OPT",
60
  "DEBUG_OPT",
61
  "DEBUG_SIMERR_OPT",
62
  "DISKIDX_OPT",
63
  "DISK_OPT",
64
  "DISK_TEMPLATE_OPT",
65
  "DRAINED_OPT",
66
  "DRY_RUN_OPT",
67
  "DRBD_HELPER_OPT",
68
  "EARLY_RELEASE_OPT",
69
  "ENABLED_HV_OPT",
70
  "ERROR_CODES_OPT",
71
  "FIELDS_OPT",
72
  "FILESTORE_DIR_OPT",
73
  "FILESTORE_DRIVER_OPT",
74
  "FORCE_OPT",
75
  "FORCE_VARIANT_OPT",
76
  "GLOBAL_FILEDIR_OPT",
77
  "HID_OS_OPT",
78
  "HVLIST_OPT",
79
  "HVOPTS_OPT",
80
  "HYPERVISOR_OPT",
81
  "IALLOCATOR_OPT",
82
  "DEFAULT_IALLOCATOR_OPT",
83
  "IDENTIFY_DEFAULTS_OPT",
84
  "IGNORE_CONSIST_OPT",
85
  "IGNORE_FAILURES_OPT",
86
  "IGNORE_OFFLINE_OPT",
87
  "IGNORE_REMOVE_FAILURES_OPT",
88
  "IGNORE_SECONDARIES_OPT",
89
  "IGNORE_SIZE_OPT",
90
  "INTERVAL_OPT",
91
  "MAC_PREFIX_OPT",
92
  "MAINTAIN_NODE_HEALTH_OPT",
93
  "MASTER_NETDEV_OPT",
94
  "MC_OPT",
95
  "MIGRATION_MODE_OPT",
96
  "NET_OPT",
97
  "NEW_CLUSTER_CERT_OPT",
98
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
99
  "NEW_CONFD_HMAC_KEY_OPT",
100
  "NEW_RAPI_CERT_OPT",
101
  "NEW_SECONDARY_OPT",
102
  "NIC_PARAMS_OPT",
103
  "NODE_LIST_OPT",
104
  "NODE_PLACEMENT_OPT",
105
  "NODEGROUP_OPT",
106
  "NODRBD_STORAGE_OPT",
107
  "NOHDR_OPT",
108
  "NOIPCHECK_OPT",
109
  "NO_INSTALL_OPT",
110
  "NONAMECHECK_OPT",
111
  "NOLVM_STORAGE_OPT",
112
  "NOMODIFY_ETCHOSTS_OPT",
113
  "NOMODIFY_SSH_SETUP_OPT",
114
  "NONICS_OPT",
115
  "NONLIVE_OPT",
116
  "NONPLUS1_OPT",
117
  "NOSHUTDOWN_OPT",
118
  "NOSTART_OPT",
119
  "NOSSH_KEYCHECK_OPT",
120
  "NOVOTING_OPT",
121
  "NWSYNC_OPT",
122
  "ON_PRIMARY_OPT",
123
  "ON_SECONDARY_OPT",
124
  "OFFLINE_OPT",
125
  "OSPARAMS_OPT",
126
  "OS_OPT",
127
  "OS_SIZE_OPT",
128
  "PRIMARY_IP_VERSION_OPT",
129
  "PRIORITY_OPT",
130
  "RAPI_CERT_OPT",
131
  "READD_OPT",
132
  "REBOOT_TYPE_OPT",
133
  "REMOVE_INSTANCE_OPT",
134
  "REMOVE_UIDS_OPT",
135
  "RESERVED_LVS_OPT",
136
  "ROMAN_OPT",
137
  "SECONDARY_IP_OPT",
138
  "SELECT_OS_OPT",
139
  "SEP_OPT",
140
  "SHOWCMD_OPT",
141
  "SHUTDOWN_TIMEOUT_OPT",
142
  "SINGLE_NODE_OPT",
143
  "SRC_DIR_OPT",
144
  "SRC_NODE_OPT",
145
  "SUBMIT_OPT",
146
  "STATIC_OPT",
147
  "SYNC_OPT",
148
  "TAG_SRC_OPT",
149
  "TIMEOUT_OPT",
150
  "UIDPOOL_OPT",
151
  "USEUNITS_OPT",
152
  "USE_REPL_NET_OPT",
153
  "VERBOSE_OPT",
154
  "VG_NAME_OPT",
155
  "YES_DOIT_OPT",
156
  # Generic functions for CLI programs
157
  "GenericMain",
158
  "GenericInstanceCreate",
159
  "GetClient",
160
  "GetOnlineNodes",
161
  "JobExecutor",
162
  "JobSubmittedException",
163
  "ParseTimespec",
164
  "RunWhileClusterStopped",
165
  "SubmitOpCode",
166
  "SubmitOrSend",
167
  "UsesRPC",
168
  # Formatting functions
169
  "ToStderr", "ToStdout",
170
  "FormatError",
171
  "GenerateTable",
172
  "AskUser",
173
  "FormatTimestamp",
174
  "FormatLogMessage",
175
  # Tags functions
176
  "ListTags",
177
  "AddTags",
178
  "RemoveTags",
179
  # command line options support infrastructure
180
  "ARGS_MANY_INSTANCES",
181
  "ARGS_MANY_NODES",
182
  "ARGS_NONE",
183
  "ARGS_ONE_INSTANCE",
184
  "ARGS_ONE_NODE",
185
  "ARGS_ONE_OS",
186
  "ArgChoice",
187
  "ArgCommand",
188
  "ArgFile",
189
  "ArgHost",
190
  "ArgInstance",
191
  "ArgJobId",
192
  "ArgNode",
193
  "ArgOs",
194
  "ArgSuggest",
195
  "ArgUnknown",
196
  "OPT_COMPL_INST_ADD_NODES",
197
  "OPT_COMPL_MANY_NODES",
198
  "OPT_COMPL_ONE_IALLOCATOR",
199
  "OPT_COMPL_ONE_INSTANCE",
200
  "OPT_COMPL_ONE_NODE",
201
  "OPT_COMPL_ONE_NODEGROUP",
202
  "OPT_COMPL_ONE_OS",
203
  "cli_option",
204
  "SplitNodeOption",
205
  "CalculateOSNames",
206
  "ParseFields",
207
  ]
208

    
209
NO_PREFIX = "no_"
210
UN_PREFIX = "-"
211

    
212
#: Priorities (sorted)
213
_PRIORITY_NAMES = [
214
  ("low", constants.OP_PRIO_LOW),
215
  ("normal", constants.OP_PRIO_NORMAL),
216
  ("high", constants.OP_PRIO_HIGH),
217
  ]
218

    
219
#: Priority dictionary for easier lookup
220
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
221
# we migrate to Python 2.6
222
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
223

    
224

    
225
class _Argument:
226
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
227
    self.min = min
228
    self.max = max
229

    
230
  def __repr__(self):
231
    return ("<%s min=%s max=%s>" %
232
            (self.__class__.__name__, self.min, self.max))
233

    
234

    
235
class ArgSuggest(_Argument):
236
  """Suggesting argument.
237

238
  Value can be any of the ones passed to the constructor.
239

240
  """
241
  # pylint: disable-msg=W0622
242
  def __init__(self, min=0, max=None, choices=None):
243
    _Argument.__init__(self, min=min, max=max)
244
    self.choices = choices
245

    
246
  def __repr__(self):
247
    return ("<%s min=%s max=%s choices=%r>" %
248
            (self.__class__.__name__, self.min, self.max, self.choices))
249

    
250

    
251
class ArgChoice(ArgSuggest):
252
  """Choice argument.
253

254
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
255
  but value must be one of the choices.
256

257
  """
258

    
259

    
260
class ArgUnknown(_Argument):
261
  """Unknown argument to program (e.g. determined at runtime).
262

263
  """
264

    
265

    
266
class ArgInstance(_Argument):
267
  """Instances argument.
268

269
  """
270

    
271

    
272
class ArgNode(_Argument):
273
  """Node argument.
274

275
  """
276

    
277
class ArgJobId(_Argument):
278
  """Job ID argument.
279

280
  """
281

    
282

    
283
class ArgFile(_Argument):
284
  """File path argument.
285

286
  """
287

    
288

    
289
class ArgCommand(_Argument):
290
  """Command argument.
291

292
  """
293

    
294

    
295
class ArgHost(_Argument):
296
  """Host argument.
297

298
  """
299

    
300

    
301
class ArgOs(_Argument):
302
  """OS argument.
303

304
  """
305

    
306

    
307
ARGS_NONE = []
308
ARGS_MANY_INSTANCES = [ArgInstance()]
309
ARGS_MANY_NODES = [ArgNode()]
310
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
311
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
312
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
313

    
314

    
315
def _ExtractTagsObject(opts, args):
316
  """Extract the tag type object.
317

318
  Note that this function will modify its args parameter.
319

320
  """
321
  if not hasattr(opts, "tag_type"):
322
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
323
  kind = opts.tag_type
324
  if kind == constants.TAG_CLUSTER:
325
    retval = kind, kind
326
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
327
    if not args:
328
      raise errors.OpPrereqError("no arguments passed to the command")
329
    name = args.pop(0)
330
    retval = kind, name
331
  else:
332
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
333
  return retval
334

    
335

    
336
def _ExtendTags(opts, args):
337
  """Extend the args if a source file has been given.
338

339
  This function will extend the tags with the contents of the file
340
  passed in the 'tags_source' attribute of the opts parameter. A file
341
  named '-' will be replaced by stdin.
342

343
  """
344
  fname = opts.tags_source
345
  if fname is None:
346
    return
347
  if fname == "-":
348
    new_fh = sys.stdin
349
  else:
350
    new_fh = open(fname, "r")
351
  new_data = []
352
  try:
353
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
354
    # because of python bug 1633941
355
    while True:
356
      line = new_fh.readline()
357
      if not line:
358
        break
359
      new_data.append(line.strip())
360
  finally:
361
    new_fh.close()
362
  args.extend(new_data)
363

    
364

    
365
def ListTags(opts, args):
366
  """List the tags on a given object.
367

368
  This is a generic implementation that knows how to deal with all
369
  three cases of tag objects (cluster, node, instance). The opts
370
  argument is expected to contain a tag_type field denoting what
371
  object type we work on.
372

373
  """
374
  kind, name = _ExtractTagsObject(opts, args)
375
  cl = GetClient()
376
  result = cl.QueryTags(kind, name)
377
  result = list(result)
378
  result.sort()
379
  for tag in result:
380
    ToStdout(tag)
381

    
382

    
383
def AddTags(opts, args):
384
  """Add tags on a given object.
385

386
  This is a generic implementation that knows how to deal with all
387
  three cases of tag objects (cluster, node, instance). The opts
388
  argument is expected to contain a tag_type field denoting what
389
  object type we work on.
390

391
  """
392
  kind, name = _ExtractTagsObject(opts, args)
393
  _ExtendTags(opts, args)
394
  if not args:
395
    raise errors.OpPrereqError("No tags to be added")
396
  op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
397
  SubmitOpCode(op, opts=opts)
398

    
399

    
400
def RemoveTags(opts, args):
401
  """Remove tags from a given object.
402

403
  This is a generic implementation that knows how to deal with all
404
  three cases of tag objects (cluster, node, instance). The opts
405
  argument is expected to contain a tag_type field denoting what
406
  object type we work on.
407

408
  """
409
  kind, name = _ExtractTagsObject(opts, args)
410
  _ExtendTags(opts, args)
411
  if not args:
412
    raise errors.OpPrereqError("No tags to be removed")
413
  op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
414
  SubmitOpCode(op, opts=opts)
415

    
416

    
417
def check_unit(option, opt, value): # pylint: disable-msg=W0613
418
  """OptParsers custom converter for units.
419

420
  """
421
  try:
422
    return utils.ParseUnit(value)
423
  except errors.UnitParseError, err:
424
    raise OptionValueError("option %s: %s" % (opt, err))
425

    
426

    
427
def _SplitKeyVal(opt, data):
428
  """Convert a KeyVal string into a dict.
429

430
  This function will convert a key=val[,...] string into a dict. Empty
431
  values will be converted specially: keys which have the prefix 'no_'
432
  will have the value=False and the prefix stripped, the others will
433
  have value=True.
434

435
  @type opt: string
436
  @param opt: a string holding the option name for which we process the
437
      data, used in building error messages
438
  @type data: string
439
  @param data: a string of the format key=val,key=val,...
440
  @rtype: dict
441
  @return: {key=val, key=val}
442
  @raises errors.ParameterError: if there are duplicate keys
443

444
  """
445
  kv_dict = {}
446
  if data:
447
    for elem in utils.UnescapeAndSplit(data, sep=","):
448
      if "=" in elem:
449
        key, val = elem.split("=", 1)
450
      else:
451
        if elem.startswith(NO_PREFIX):
452
          key, val = elem[len(NO_PREFIX):], False
453
        elif elem.startswith(UN_PREFIX):
454
          key, val = elem[len(UN_PREFIX):], None
455
        else:
456
          key, val = elem, True
457
      if key in kv_dict:
458
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
459
                                    (key, opt))
460
      kv_dict[key] = val
461
  return kv_dict
462

    
463

    
464
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
465
  """Custom parser for ident:key=val,key=val options.
466

467
  This will store the parsed values as a tuple (ident, {key: val}). As such,
468
  multiple uses of this option via action=append is possible.
469

470
  """
471
  if ":" not in value:
472
    ident, rest = value, ''
473
  else:
474
    ident, rest = value.split(":", 1)
475

    
476
  if ident.startswith(NO_PREFIX):
477
    if rest:
478
      msg = "Cannot pass options when removing parameter groups: %s" % value
479
      raise errors.ParameterError(msg)
480
    retval = (ident[len(NO_PREFIX):], False)
481
  elif ident.startswith(UN_PREFIX):
482
    if rest:
483
      msg = "Cannot pass options when removing parameter groups: %s" % value
484
      raise errors.ParameterError(msg)
485
    retval = (ident[len(UN_PREFIX):], None)
486
  else:
487
    kv_dict = _SplitKeyVal(opt, rest)
488
    retval = (ident, kv_dict)
489
  return retval
490

    
491

    
492
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
493
  """Custom parser class for key=val,key=val options.
494

495
  This will store the parsed values as a dict {key: val}.
496

497
  """
498
  return _SplitKeyVal(opt, value)
499

    
500

    
501
def check_bool(option, opt, value): # pylint: disable-msg=W0613
502
  """Custom parser for yes/no options.
503

504
  This will store the parsed value as either True or False.
505

506
  """
507
  value = value.lower()
508
  if value == constants.VALUE_FALSE or value == "no":
509
    return False
510
  elif value == constants.VALUE_TRUE or value == "yes":
511
    return True
512
  else:
513
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
514

    
515

    
516
# completion_suggestion is normally a list. Using numeric values not evaluating
517
# to False for dynamic completion.
518
(OPT_COMPL_MANY_NODES,
519
 OPT_COMPL_ONE_NODE,
520
 OPT_COMPL_ONE_INSTANCE,
521
 OPT_COMPL_ONE_OS,
522
 OPT_COMPL_ONE_IALLOCATOR,
523
 OPT_COMPL_INST_ADD_NODES,
524
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
525

    
526
OPT_COMPL_ALL = frozenset([
527
  OPT_COMPL_MANY_NODES,
528
  OPT_COMPL_ONE_NODE,
529
  OPT_COMPL_ONE_INSTANCE,
530
  OPT_COMPL_ONE_OS,
531
  OPT_COMPL_ONE_IALLOCATOR,
532
  OPT_COMPL_INST_ADD_NODES,
533
  OPT_COMPL_ONE_NODEGROUP,
534
  ])
535

    
536

    
537
class CliOption(Option):
538
  """Custom option class for optparse.
539

540
  """
541
  ATTRS = Option.ATTRS + [
542
    "completion_suggest",
543
    ]
544
  TYPES = Option.TYPES + (
545
    "identkeyval",
546
    "keyval",
547
    "unit",
548
    "bool",
549
    )
550
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
551
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
552
  TYPE_CHECKER["keyval"] = check_key_val
553
  TYPE_CHECKER["unit"] = check_unit
554
  TYPE_CHECKER["bool"] = check_bool
555

    
556

    
557
# optparse.py sets make_option, so we do it for our own option class, too
558
cli_option = CliOption
559

    
560

    
561
_YORNO = "yes|no"
562

    
563
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
564
                       help="Increase debugging level")
565

    
566
NOHDR_OPT = cli_option("--no-headers", default=False,
567
                       action="store_true", dest="no_headers",
568
                       help="Don't display column headers")
569

    
570
SEP_OPT = cli_option("--separator", default=None,
571
                     action="store", dest="separator",
572
                     help=("Separator between output fields"
573
                           " (defaults to one space)"))
574

    
575
USEUNITS_OPT = cli_option("--units", default=None,
576
                          dest="units", choices=('h', 'm', 'g', 't'),
577
                          help="Specify units for output (one of hmgt)")
578

    
579
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
580
                        type="string", metavar="FIELDS",
581
                        help="Comma separated list of output fields")
582

    
583
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
584
                       default=False, help="Force the operation")
585

    
586
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
587
                         default=False, help="Do not require confirmation")
588

    
589
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
590
                                  action="store_true", default=False,
591
                                  help=("Ignore offline nodes and do as much"
592
                                        " as possible"))
593

    
594
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
595
                         default=None, help="File with tag names")
596

    
597
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
598
                        default=False, action="store_true",
599
                        help=("Submit the job and return the job ID, but"
600
                              " don't wait for the job to finish"))
601

    
602
SYNC_OPT = cli_option("--sync", dest="do_locking",
603
                      default=False, action="store_true",
604
                      help=("Grab locks while doing the queries"
605
                            " in order to ensure more consistent results"))
606

    
607
DRY_RUN_OPT = cli_option("--dry-run", default=False,
608
                         action="store_true",
609
                         help=("Do not execute the operation, just run the"
610
                               " check steps and verify it it could be"
611
                               " executed"))
612

    
613
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
614
                         action="store_true",
615
                         help="Increase the verbosity of the operation")
616

    
617
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
618
                              action="store_true", dest="simulate_errors",
619
                              help="Debugging option that makes the operation"
620
                              " treat most runtime checks as failed")
621

    
622
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
623
                        default=True, action="store_false",
624
                        help="Don't wait for sync (DANGEROUS!)")
625

    
626
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
627
                               help="Custom disk setup (diskless, file,"
628
                               " plain or drbd)",
629
                               default=None, metavar="TEMPL",
630
                               choices=list(constants.DISK_TEMPLATES))
631

    
632
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
633
                        help="Do not create any network cards for"
634
                        " the instance")
635

    
636
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
637
                               help="Relative path under default cluster-wide"
638
                               " file storage dir to store file-based disks",
639
                               default=None, metavar="<DIR>")
640

    
641
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
642
                                  help="Driver to use for image files",
643
                                  default="loop", metavar="<DRIVER>",
644
                                  choices=list(constants.FILE_DRIVER))
645

    
646
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
647
                            help="Select nodes for the instance automatically"
648
                            " using the <NAME> iallocator plugin",
649
                            default=None, type="string",
650
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
651

    
652
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
653
                            metavar="<NAME>",
654
                            help="Set the default instance allocator plugin",
655
                            default=None, type="string",
656
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
657

    
658
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
659
                    metavar="<os>",
660
                    completion_suggest=OPT_COMPL_ONE_OS)
661

    
662
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
663
                         type="keyval", default={},
664
                         help="OS parameters")
665

    
666
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
667
                               action="store_true", default=False,
668
                               help="Force an unknown variant")
669

    
670
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
671
                            action="store_true", default=False,
672
                            help="Do not install the OS (will"
673
                            " enable no-start)")
674

    
675
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
676
                         type="keyval", default={},
677
                         help="Backend parameters")
678

    
679
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
680
                         default={}, dest="hvparams",
681
                         help="Hypervisor parameters")
682

    
683
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
684
                            help="Hypervisor and hypervisor options, in the"
685
                            " format hypervisor:option=value,option=value,...",
686
                            default=None, type="identkeyval")
687

    
688
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
689
                        help="Hypervisor and hypervisor options, in the"
690
                        " format hypervisor:option=value,option=value,...",
691
                        default=[], action="append", type="identkeyval")
692

    
693
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
694
                           action="store_false",
695
                           help="Don't check that the instance's IP"
696
                           " is alive")
697

    
698
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
699
                             default=True, action="store_false",
700
                             help="Don't check that the instance's name"
701
                             " is resolvable")
702

    
703
NET_OPT = cli_option("--net",
704
                     help="NIC parameters", default=[],
705
                     dest="nics", action="append", type="identkeyval")
706

    
707
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
708
                      dest="disks", action="append", type="identkeyval")
709

    
710
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
711
                         help="Comma-separated list of disks"
712
                         " indices to act on (e.g. 0,2) (optional,"
713
                         " defaults to all disks)")
714

    
715
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
716
                         help="Enforces a single-disk configuration using the"
717
                         " given disk size, in MiB unless a suffix is used",
718
                         default=None, type="unit", metavar="<size>")
719

    
720
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
721
                                dest="ignore_consistency",
722
                                action="store_true", default=False,
723
                                help="Ignore the consistency of the disks on"
724
                                " the secondary")
725

    
726
NONLIVE_OPT = cli_option("--non-live", dest="live",
727
                         default=True, action="store_false",
728
                         help="Do a non-live migration (this usually means"
729
                         " freeze the instance, save the state, transfer and"
730
                         " only then resume running on the secondary node)")
731

    
732
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
733
                                default=None,
734
                                choices=list(constants.HT_MIGRATION_MODES),
735
                                help="Override default migration mode (choose"
736
                                " either live or non-live")
737

    
738
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
739
                                help="Target node and optional secondary node",
740
                                metavar="<pnode>[:<snode>]",
741
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
742

    
743
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
744
                           action="append", metavar="<node>",
745
                           help="Use only this node (can be used multiple"
746
                           " times, if not given defaults to all nodes)",
747
                           completion_suggest=OPT_COMPL_ONE_NODE)
748

    
749
NODEGROUP_OPT = cli_option("-g", "--node-group",
750
                           dest="nodegroup",
751
                           help="Node group (name or uuid)",
752
                           metavar="<nodegroup>",
753
                           default=None, type="string",
754
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
755

    
756
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
757
                             metavar="<node>",
758
                             completion_suggest=OPT_COMPL_ONE_NODE)
759

    
760
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
761
                         action="store_false",
762
                         help="Don't start the instance after creation")
763

    
764
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
765
                         action="store_true", default=False,
766
                         help="Show command instead of executing it")
767

    
768
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
769
                         default=False, action="store_true",
770
                         help="Instead of performing the migration, try to"
771
                         " recover from a failed cleanup. This is safe"
772
                         " to run even if the instance is healthy, but it"
773
                         " will create extra replication traffic and "
774
                         " disrupt briefly the replication (like during the"
775
                         " migration")
776

    
777
STATIC_OPT = cli_option("-s", "--static", dest="static",
778
                        action="store_true", default=False,
779
                        help="Only show configuration data, not runtime data")
780

    
781
ALL_OPT = cli_option("--all", dest="show_all",
782
                     default=False, action="store_true",
783
                     help="Show info on all instances on the cluster."
784
                     " This can take a long time to run, use wisely")
785

    
786
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
787
                           action="store_true", default=False,
788
                           help="Interactive OS reinstall, lists available"
789
                           " OS templates for selection")
790

    
791
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
792
                                 action="store_true", default=False,
793
                                 help="Remove the instance from the cluster"
794
                                 " configuration even if there are failures"
795
                                 " during the removal process")
796

    
797
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
798
                                        dest="ignore_remove_failures",
799
                                        action="store_true", default=False,
800
                                        help="Remove the instance from the"
801
                                        " cluster configuration even if there"
802
                                        " are failures during the removal"
803
                                        " process")
804

    
805
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
806
                                 action="store_true", default=False,
807
                                 help="Remove the instance from the cluster")
808

    
809
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
810
                               help="Specifies the new secondary node",
811
                               metavar="NODE", default=None,
812
                               completion_suggest=OPT_COMPL_ONE_NODE)
813

    
814
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
815
                            default=False, action="store_true",
816
                            help="Replace the disk(s) on the primary"
817
                            " node (only for the drbd template)")
818

    
819
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
820
                              default=False, action="store_true",
821
                              help="Replace the disk(s) on the secondary"
822
                              " node (only for the drbd template)")
823

    
824
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
825
                              default=False, action="store_true",
826
                              help="Lock all nodes and auto-promote as needed"
827
                              " to MC status")
828

    
829
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
830
                              default=False, action="store_true",
831
                              help="Automatically replace faulty disks"
832
                              " (only for the drbd template)")
833

    
834
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
835
                             default=False, action="store_true",
836
                             help="Ignore current recorded size"
837
                             " (useful for forcing activation when"
838
                             " the recorded size is wrong)")
839

    
840
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
841
                          metavar="<node>",
842
                          completion_suggest=OPT_COMPL_ONE_NODE)
843

    
844
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
845
                         metavar="<dir>")
846

    
847
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
848
                              help="Specify the secondary ip for the node",
849
                              metavar="ADDRESS", default=None)
850

    
851
READD_OPT = cli_option("--readd", dest="readd",
852
                       default=False, action="store_true",
853
                       help="Readd old node after replacing it")
854

    
855
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
856
                                default=True, action="store_false",
857
                                help="Disable SSH key fingerprint checking")
858

    
859

    
860
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
861
                    type="bool", default=None, metavar=_YORNO,
862
                    help="Set the master_candidate flag on the node")
863

    
864
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
865
                         type="bool", default=None,
866
                         help="Set the offline flag on the node")
867

    
868
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
869
                         type="bool", default=None,
870
                         help="Set the drained flag on the node")
871

    
872
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
873
                             type="bool", default=None, metavar=_YORNO,
874
                             help="Set the allocatable flag on a volume")
875

    
876
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
877
                               help="Disable support for lvm based instances"
878
                               " (cluster-wide)",
879
                               action="store_false", default=True)
880

    
881
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
882
                            dest="enabled_hypervisors",
883
                            help="Comma-separated list of hypervisors",
884
                            type="string", default=None)
885

    
886
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
887
                            type="keyval", default={},
888
                            help="NIC parameters")
889

    
890
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
891
                         dest="candidate_pool_size", type="int",
892
                         help="Set the candidate pool size")
893

    
894
VG_NAME_OPT = cli_option("-g", "--vg-name", dest="vg_name",
895
                         help="Enables LVM and specifies the volume group"
896
                         " name (cluster-wide) for disk allocation [xenvg]",
897
                         metavar="VG", default=None)
898

    
899
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
900
                          help="Destroy cluster", action="store_true")
901

    
902
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
903
                          help="Skip node agreement check (dangerous)",
904
                          action="store_true", default=False)
905

    
906
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
907
                            help="Specify the mac prefix for the instance IP"
908
                            " addresses, in the format XX:XX:XX",
909
                            metavar="PREFIX",
910
                            default=None)
911

    
912
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
913
                               help="Specify the node interface (cluster-wide)"
914
                               " on which the master IP address will be added "
915
                               " [%s]" % constants.DEFAULT_BRIDGE,
916
                               metavar="NETDEV",
917
                               default=constants.DEFAULT_BRIDGE)
918

    
919
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
920
                                help="Specify the default directory (cluster-"
921
                                "wide) for storing the file-based disks [%s]" %
922
                                constants.DEFAULT_FILE_STORAGE_DIR,
923
                                metavar="DIR",
924
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
925

    
926
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
927
                                   help="Don't modify /etc/hosts",
928
                                   action="store_false", default=True)
929

    
930
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
931
                                    help="Don't initialize SSH keys",
932
                                    action="store_false", default=True)
933

    
934
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
935
                             help="Enable parseable error messages",
936
                             action="store_true", default=False)
937

    
938
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
939
                          help="Skip N+1 memory redundancy tests",
940
                          action="store_true", default=False)
941

    
942
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
943
                             help="Type of reboot: soft/hard/full",
944
                             default=constants.INSTANCE_REBOOT_HARD,
945
                             metavar="<REBOOT>",
946
                             choices=list(constants.REBOOT_TYPES))
947

    
948
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
949
                                    dest="ignore_secondaries",
950
                                    default=False, action="store_true",
951
                                    help="Ignore errors from secondaries")
952

    
953
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
954
                            action="store_false", default=True,
955
                            help="Don't shutdown the instance (unsafe)")
956

    
957
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
958
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
959
                         help="Maximum time to wait")
960

    
961
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
962
                         dest="shutdown_timeout", type="int",
963
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
964
                         help="Maximum time to wait for instance shutdown")
965

    
966
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
967
                          default=None,
968
                          help=("Number of seconds between repetions of the"
969
                                " command"))
970

    
971
EARLY_RELEASE_OPT = cli_option("--early-release",
972
                               dest="early_release", default=False,
973
                               action="store_true",
974
                               help="Release the locks on the secondary"
975
                               " node(s) early")
976

    
977
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
978
                                  dest="new_cluster_cert",
979
                                  default=False, action="store_true",
980
                                  help="Generate a new cluster certificate")
981

    
982
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
983
                           default=None,
984
                           help="File containing new RAPI certificate")
985

    
986
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
987
                               default=None, action="store_true",
988
                               help=("Generate a new self-signed RAPI"
989
                                     " certificate"))
990

    
991
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
992
                                    dest="new_confd_hmac_key",
993
                                    default=False, action="store_true",
994
                                    help=("Create a new HMAC key for %s" %
995
                                          constants.CONFD))
996

    
997
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
998
                                       dest="cluster_domain_secret",
999
                                       default=None,
1000
                                       help=("Load new new cluster domain"
1001
                                             " secret from file"))
1002

    
1003
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1004
                                           dest="new_cluster_domain_secret",
1005
                                           default=False, action="store_true",
1006
                                           help=("Create a new cluster domain"
1007
                                                 " secret"))
1008

    
1009
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1010
                              dest="use_replication_network",
1011
                              help="Whether to use the replication network"
1012
                              " for talking to the nodes",
1013
                              action="store_true", default=False)
1014

    
1015
MAINTAIN_NODE_HEALTH_OPT = \
1016
    cli_option("--maintain-node-health", dest="maintain_node_health",
1017
               metavar=_YORNO, default=None, type="bool",
1018
               help="Configure the cluster to automatically maintain node"
1019
               " health, by shutting down unknown instances, shutting down"
1020
               " unknown DRBD devices, etc.")
1021

    
1022
IDENTIFY_DEFAULTS_OPT = \
1023
    cli_option("--identify-defaults", dest="identify_defaults",
1024
               default=False, action="store_true",
1025
               help="Identify which saved instance parameters are equal to"
1026
               " the current cluster defaults and set them as such, instead"
1027
               " of marking them as overridden")
1028

    
1029
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1030
                         action="store", dest="uid_pool",
1031
                         help=("A list of user-ids or user-id"
1032
                               " ranges separated by commas"))
1033

    
1034
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1035
                          action="store", dest="add_uids",
1036
                          help=("A list of user-ids or user-id"
1037
                                " ranges separated by commas, to be"
1038
                                " added to the user-id pool"))
1039

    
1040
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1041
                             action="store", dest="remove_uids",
1042
                             help=("A list of user-ids or user-id"
1043
                                   " ranges separated by commas, to be"
1044
                                   " removed from the user-id pool"))
1045

    
1046
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1047
                             action="store", dest="reserved_lvs",
1048
                             help=("A comma-separated list of reserved"
1049
                                   " logical volumes names, that will be"
1050
                                   " ignored by cluster verify"))
1051

    
1052
ROMAN_OPT = cli_option("--roman",
1053
                       dest="roman_integers", default=False,
1054
                       action="store_true",
1055
                       help="Use roman numbers for positive integers")
1056

    
1057
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1058
                             action="store", default=None,
1059
                             help="Specifies usermode helper for DRBD")
1060

    
1061
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1062
                                action="store_false", default=True,
1063
                                help="Disable support for DRBD")
1064

    
1065
PRIMARY_IP_VERSION_OPT = \
1066
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1067
               action="store", dest="primary_ip_version",
1068
               metavar="%d|%d" % (constants.IP4_VERSION,
1069
                                  constants.IP6_VERSION),
1070
               help="Cluster-wide IP version for primary IP")
1071

    
1072
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1073
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1074
                          choices=_PRIONAME_TO_VALUE.keys(),
1075
                          help="Priority for opcode processing")
1076

    
1077
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1078
                        type="bool", default=None, metavar=_YORNO,
1079
                        help="Sets the hidden flag on the OS")
1080

    
1081
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1082
                        type="bool", default=None, metavar=_YORNO,
1083
                        help="Sets the blacklisted flag on the OS")
1084

    
1085

    
1086
#: Options provided by all commands
1087
COMMON_OPTS = [DEBUG_OPT]
1088

    
1089

    
1090
def _ParseArgs(argv, commands, aliases):
1091
  """Parser for the command line arguments.
1092

1093
  This function parses the arguments and returns the function which
1094
  must be executed together with its (modified) arguments.
1095

1096
  @param argv: the command line
1097
  @param commands: dictionary with special contents, see the design
1098
      doc for cmdline handling
1099
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1100

1101
  """
1102
  if len(argv) == 0:
1103
    binary = "<command>"
1104
  else:
1105
    binary = argv[0].split("/")[-1]
1106

    
1107
  if len(argv) > 1 and argv[1] == "--version":
1108
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1109
             constants.RELEASE_VERSION)
1110
    # Quit right away. That way we don't have to care about this special
1111
    # argument. optparse.py does it the same.
1112
    sys.exit(0)
1113

    
1114
  if len(argv) < 2 or not (argv[1] in commands or
1115
                           argv[1] in aliases):
1116
    # let's do a nice thing
1117
    sortedcmds = commands.keys()
1118
    sortedcmds.sort()
1119

    
1120
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1121
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1122
    ToStdout("")
1123

    
1124
    # compute the max line length for cmd + usage
1125
    mlen = max([len(" %s" % cmd) for cmd in commands])
1126
    mlen = min(60, mlen) # should not get here...
1127

    
1128
    # and format a nice command list
1129
    ToStdout("Commands:")
1130
    for cmd in sortedcmds:
1131
      cmdstr = " %s" % (cmd,)
1132
      help_text = commands[cmd][4]
1133
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1134
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1135
      for line in help_lines:
1136
        ToStdout("%-*s   %s", mlen, "", line)
1137

    
1138
    ToStdout("")
1139

    
1140
    return None, None, None
1141

    
1142
  # get command, unalias it, and look it up in commands
1143
  cmd = argv.pop(1)
1144
  if cmd in aliases:
1145
    if cmd in commands:
1146
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1147
                                   " command" % cmd)
1148

    
1149
    if aliases[cmd] not in commands:
1150
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1151
                                   " command '%s'" % (cmd, aliases[cmd]))
1152

    
1153
    cmd = aliases[cmd]
1154

    
1155
  func, args_def, parser_opts, usage, description = commands[cmd]
1156
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1157
                        description=description,
1158
                        formatter=TitledHelpFormatter(),
1159
                        usage="%%prog %s %s" % (cmd, usage))
1160
  parser.disable_interspersed_args()
1161
  options, args = parser.parse_args()
1162

    
1163
  if not _CheckArguments(cmd, args_def, args):
1164
    return None, None, None
1165

    
1166
  return func, options, args
1167

    
1168

    
1169
def _CheckArguments(cmd, args_def, args):
1170
  """Verifies the arguments using the argument definition.
1171

1172
  Algorithm:
1173

1174
    1. Abort with error if values specified by user but none expected.
1175

1176
    1. For each argument in definition
1177

1178
      1. Keep running count of minimum number of values (min_count)
1179
      1. Keep running count of maximum number of values (max_count)
1180
      1. If it has an unlimited number of values
1181

1182
        1. Abort with error if it's not the last argument in the definition
1183

1184
    1. If last argument has limited number of values
1185

1186
      1. Abort with error if number of values doesn't match or is too large
1187

1188
    1. Abort with error if user didn't pass enough values (min_count)
1189

1190
  """
1191
  if args and not args_def:
1192
    ToStderr("Error: Command %s expects no arguments", cmd)
1193
    return False
1194

    
1195
  min_count = None
1196
  max_count = None
1197
  check_max = None
1198

    
1199
  last_idx = len(args_def) - 1
1200

    
1201
  for idx, arg in enumerate(args_def):
1202
    if min_count is None:
1203
      min_count = arg.min
1204
    elif arg.min is not None:
1205
      min_count += arg.min
1206

    
1207
    if max_count is None:
1208
      max_count = arg.max
1209
    elif arg.max is not None:
1210
      max_count += arg.max
1211

    
1212
    if idx == last_idx:
1213
      check_max = (arg.max is not None)
1214

    
1215
    elif arg.max is None:
1216
      raise errors.ProgrammerError("Only the last argument can have max=None")
1217

    
1218
  if check_max:
1219
    # Command with exact number of arguments
1220
    if (min_count is not None and max_count is not None and
1221
        min_count == max_count and len(args) != min_count):
1222
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1223
      return False
1224

    
1225
    # Command with limited number of arguments
1226
    if max_count is not None and len(args) > max_count:
1227
      ToStderr("Error: Command %s expects only %d argument(s)",
1228
               cmd, max_count)
1229
      return False
1230

    
1231
  # Command with some required arguments
1232
  if min_count is not None and len(args) < min_count:
1233
    ToStderr("Error: Command %s expects at least %d argument(s)",
1234
             cmd, min_count)
1235
    return False
1236

    
1237
  return True
1238

    
1239

    
1240
def SplitNodeOption(value):
1241
  """Splits the value of a --node option.
1242

1243
  """
1244
  if value and ':' in value:
1245
    return value.split(':', 1)
1246
  else:
1247
    return (value, None)
1248

    
1249

    
1250
def CalculateOSNames(os_name, os_variants):
1251
  """Calculates all the names an OS can be called, according to its variants.
1252

1253
  @type os_name: string
1254
  @param os_name: base name of the os
1255
  @type os_variants: list or None
1256
  @param os_variants: list of supported variants
1257
  @rtype: list
1258
  @return: list of valid names
1259

1260
  """
1261
  if os_variants:
1262
    return ['%s+%s' % (os_name, v) for v in os_variants]
1263
  else:
1264
    return [os_name]
1265

    
1266

    
1267
def ParseFields(selected, default):
1268
  """Parses the values of "--field"-like options.
1269

1270
  @type selected: string or None
1271
  @param selected: User-selected options
1272
  @type default: list
1273
  @param default: Default fields
1274

1275
  """
1276
  if selected is None:
1277
    return default
1278

    
1279
  if selected.startswith("+"):
1280
    return default + selected[1:].split(",")
1281

    
1282
  return selected.split(",")
1283

    
1284

    
1285
UsesRPC = rpc.RunWithRPC
1286

    
1287

    
1288
def AskUser(text, choices=None):
1289
  """Ask the user a question.
1290

1291
  @param text: the question to ask
1292

1293
  @param choices: list with elements tuples (input_char, return_value,
1294
      description); if not given, it will default to: [('y', True,
1295
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1296
      note that the '?' char is reserved for help
1297

1298
  @return: one of the return values from the choices list; if input is
1299
      not possible (i.e. not running with a tty, we return the last
1300
      entry from the list
1301

1302
  """
1303
  if choices is None:
1304
    choices = [('y', True, 'Perform the operation'),
1305
               ('n', False, 'Do not perform the operation')]
1306
  if not choices or not isinstance(choices, list):
1307
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1308
  for entry in choices:
1309
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1310
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1311

    
1312
  answer = choices[-1][1]
1313
  new_text = []
1314
  for line in text.splitlines():
1315
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1316
  text = "\n".join(new_text)
1317
  try:
1318
    f = file("/dev/tty", "a+")
1319
  except IOError:
1320
    return answer
1321
  try:
1322
    chars = [entry[0] for entry in choices]
1323
    chars[-1] = "[%s]" % chars[-1]
1324
    chars.append('?')
1325
    maps = dict([(entry[0], entry[1]) for entry in choices])
1326
    while True:
1327
      f.write(text)
1328
      f.write('\n')
1329
      f.write("/".join(chars))
1330
      f.write(": ")
1331
      line = f.readline(2).strip().lower()
1332
      if line in maps:
1333
        answer = maps[line]
1334
        break
1335
      elif line == '?':
1336
        for entry in choices:
1337
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1338
        f.write("\n")
1339
        continue
1340
  finally:
1341
    f.close()
1342
  return answer
1343

    
1344

    
1345
class JobSubmittedException(Exception):
1346
  """Job was submitted, client should exit.
1347

1348
  This exception has one argument, the ID of the job that was
1349
  submitted. The handler should print this ID.
1350

1351
  This is not an error, just a structured way to exit from clients.
1352

1353
  """
1354

    
1355

    
1356
def SendJob(ops, cl=None):
1357
  """Function to submit an opcode without waiting for the results.
1358

1359
  @type ops: list
1360
  @param ops: list of opcodes
1361
  @type cl: luxi.Client
1362
  @param cl: the luxi client to use for communicating with the master;
1363
             if None, a new client will be created
1364

1365
  """
1366
  if cl is None:
1367
    cl = GetClient()
1368

    
1369
  job_id = cl.SubmitJob(ops)
1370

    
1371
  return job_id
1372

    
1373

    
1374
def GenericPollJob(job_id, cbs, report_cbs):
1375
  """Generic job-polling function.
1376

1377
  @type job_id: number
1378
  @param job_id: Job ID
1379
  @type cbs: Instance of L{JobPollCbBase}
1380
  @param cbs: Data callbacks
1381
  @type report_cbs: Instance of L{JobPollReportCbBase}
1382
  @param report_cbs: Reporting callbacks
1383

1384
  """
1385
  prev_job_info = None
1386
  prev_logmsg_serial = None
1387

    
1388
  status = None
1389

    
1390
  while True:
1391
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1392
                                      prev_logmsg_serial)
1393
    if not result:
1394
      # job not found, go away!
1395
      raise errors.JobLost("Job with id %s lost" % job_id)
1396

    
1397
    if result == constants.JOB_NOTCHANGED:
1398
      report_cbs.ReportNotChanged(job_id, status)
1399

    
1400
      # Wait again
1401
      continue
1402

    
1403
    # Split result, a tuple of (field values, log entries)
1404
    (job_info, log_entries) = result
1405
    (status, ) = job_info
1406

    
1407
    if log_entries:
1408
      for log_entry in log_entries:
1409
        (serial, timestamp, log_type, message) = log_entry
1410
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1411
                                    log_type, message)
1412
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1413

    
1414
    # TODO: Handle canceled and archived jobs
1415
    elif status in (constants.JOB_STATUS_SUCCESS,
1416
                    constants.JOB_STATUS_ERROR,
1417
                    constants.JOB_STATUS_CANCELING,
1418
                    constants.JOB_STATUS_CANCELED):
1419
      break
1420

    
1421
    prev_job_info = job_info
1422

    
1423
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1424
  if not jobs:
1425
    raise errors.JobLost("Job with id %s lost" % job_id)
1426

    
1427
  status, opstatus, result = jobs[0]
1428

    
1429
  if status == constants.JOB_STATUS_SUCCESS:
1430
    return result
1431

    
1432
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1433
    raise errors.OpExecError("Job was canceled")
1434

    
1435
  has_ok = False
1436
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1437
    if status == constants.OP_STATUS_SUCCESS:
1438
      has_ok = True
1439
    elif status == constants.OP_STATUS_ERROR:
1440
      errors.MaybeRaise(msg)
1441

    
1442
      if has_ok:
1443
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1444
                                 (idx, msg))
1445

    
1446
      raise errors.OpExecError(str(msg))
1447

    
1448
  # default failure mode
1449
  raise errors.OpExecError(result)
1450

    
1451

    
1452
class JobPollCbBase:
1453
  """Base class for L{GenericPollJob} callbacks.
1454

1455
  """
1456
  def __init__(self):
1457
    """Initializes this class.
1458

1459
    """
1460

    
1461
  def WaitForJobChangeOnce(self, job_id, fields,
1462
                           prev_job_info, prev_log_serial):
1463
    """Waits for changes on a job.
1464

1465
    """
1466
    raise NotImplementedError()
1467

    
1468
  def QueryJobs(self, job_ids, fields):
1469
    """Returns the selected fields for the selected job IDs.
1470

1471
    @type job_ids: list of numbers
1472
    @param job_ids: Job IDs
1473
    @type fields: list of strings
1474
    @param fields: Fields
1475

1476
    """
1477
    raise NotImplementedError()
1478

    
1479

    
1480
class JobPollReportCbBase:
1481
  """Base class for L{GenericPollJob} reporting callbacks.
1482

1483
  """
1484
  def __init__(self):
1485
    """Initializes this class.
1486

1487
    """
1488

    
1489
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1490
    """Handles a log message.
1491

1492
    """
1493
    raise NotImplementedError()
1494

    
1495
  def ReportNotChanged(self, job_id, status):
1496
    """Called for if a job hasn't changed in a while.
1497

1498
    @type job_id: number
1499
    @param job_id: Job ID
1500
    @type status: string or None
1501
    @param status: Job status if available
1502

1503
    """
1504
    raise NotImplementedError()
1505

    
1506

    
1507
class _LuxiJobPollCb(JobPollCbBase):
1508
  def __init__(self, cl):
1509
    """Initializes this class.
1510

1511
    """
1512
    JobPollCbBase.__init__(self)
1513
    self.cl = cl
1514

    
1515
  def WaitForJobChangeOnce(self, job_id, fields,
1516
                           prev_job_info, prev_log_serial):
1517
    """Waits for changes on a job.
1518

1519
    """
1520
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1521
                                        prev_job_info, prev_log_serial)
1522

    
1523
  def QueryJobs(self, job_ids, fields):
1524
    """Returns the selected fields for the selected job IDs.
1525

1526
    """
1527
    return self.cl.QueryJobs(job_ids, fields)
1528

    
1529

    
1530
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1531
  def __init__(self, feedback_fn):
1532
    """Initializes this class.
1533

1534
    """
1535
    JobPollReportCbBase.__init__(self)
1536

    
1537
    self.feedback_fn = feedback_fn
1538

    
1539
    assert callable(feedback_fn)
1540

    
1541
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1542
    """Handles a log message.
1543

1544
    """
1545
    self.feedback_fn((timestamp, log_type, log_msg))
1546

    
1547
  def ReportNotChanged(self, job_id, status):
1548
    """Called if a job hasn't changed in a while.
1549

1550
    """
1551
    # Ignore
1552

    
1553

    
1554
class StdioJobPollReportCb(JobPollReportCbBase):
1555
  def __init__(self):
1556
    """Initializes this class.
1557

1558
    """
1559
    JobPollReportCbBase.__init__(self)
1560

    
1561
    self.notified_queued = False
1562
    self.notified_waitlock = False
1563

    
1564
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1565
    """Handles a log message.
1566

1567
    """
1568
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1569
             FormatLogMessage(log_type, log_msg))
1570

    
1571
  def ReportNotChanged(self, job_id, status):
1572
    """Called if a job hasn't changed in a while.
1573

1574
    """
1575
    if status is None:
1576
      return
1577

    
1578
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1579
      ToStderr("Job %s is waiting in queue", job_id)
1580
      self.notified_queued = True
1581

    
1582
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1583
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1584
      self.notified_waitlock = True
1585

    
1586

    
1587
def FormatLogMessage(log_type, log_msg):
1588
  """Formats a job message according to its type.
1589

1590
  """
1591
  if log_type != constants.ELOG_MESSAGE:
1592
    log_msg = str(log_msg)
1593

    
1594
  return utils.SafeEncode(log_msg)
1595

    
1596

    
1597
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1598
  """Function to poll for the result of a job.
1599

1600
  @type job_id: job identified
1601
  @param job_id: the job to poll for results
1602
  @type cl: luxi.Client
1603
  @param cl: the luxi client to use for communicating with the master;
1604
             if None, a new client will be created
1605

1606
  """
1607
  if cl is None:
1608
    cl = GetClient()
1609

    
1610
  if reporter is None:
1611
    if feedback_fn:
1612
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1613
    else:
1614
      reporter = StdioJobPollReportCb()
1615
  elif feedback_fn:
1616
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1617

    
1618
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1619

    
1620

    
1621
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1622
  """Legacy function to submit an opcode.
1623

1624
  This is just a simple wrapper over the construction of the processor
1625
  instance. It should be extended to better handle feedback and
1626
  interaction functions.
1627

1628
  """
1629
  if cl is None:
1630
    cl = GetClient()
1631

    
1632
  SetGenericOpcodeOpts([op], opts)
1633

    
1634
  job_id = SendJob([op], cl=cl)
1635

    
1636
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1637
                       reporter=reporter)
1638

    
1639
  return op_results[0]
1640

    
1641

    
1642
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1643
  """Wrapper around SubmitOpCode or SendJob.
1644

1645
  This function will decide, based on the 'opts' parameter, whether to
1646
  submit and wait for the result of the opcode (and return it), or
1647
  whether to just send the job and print its identifier. It is used in
1648
  order to simplify the implementation of the '--submit' option.
1649

1650
  It will also process the opcodes if we're sending the via SendJob
1651
  (otherwise SubmitOpCode does it).
1652

1653
  """
1654
  if opts and opts.submit_only:
1655
    job = [op]
1656
    SetGenericOpcodeOpts(job, opts)
1657
    job_id = SendJob(job, cl=cl)
1658
    raise JobSubmittedException(job_id)
1659
  else:
1660
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1661

    
1662

    
1663
def SetGenericOpcodeOpts(opcode_list, options):
1664
  """Processor for generic options.
1665

1666
  This function updates the given opcodes based on generic command
1667
  line options (like debug, dry-run, etc.).
1668

1669
  @param opcode_list: list of opcodes
1670
  @param options: command line options or None
1671
  @return: None (in-place modification)
1672

1673
  """
1674
  if not options:
1675
    return
1676
  for op in opcode_list:
1677
    op.debug_level = options.debug
1678
    if hasattr(options, "dry_run"):
1679
      op.dry_run = options.dry_run
1680
    if getattr(options, "priority", None) is not None:
1681
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1682

    
1683

    
1684
def GetClient():
1685
  # TODO: Cache object?
1686
  try:
1687
    client = luxi.Client()
1688
  except luxi.NoMasterError:
1689
    ss = ssconf.SimpleStore()
1690

    
1691
    # Try to read ssconf file
1692
    try:
1693
      ss.GetMasterNode()
1694
    except errors.ConfigurationError:
1695
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1696
                                 " not part of a cluster")
1697

    
1698
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1699
    if master != myself:
1700
      raise errors.OpPrereqError("This is not the master node, please connect"
1701
                                 " to node '%s' and rerun the command" %
1702
                                 master)
1703
    raise
1704
  return client
1705

    
1706

    
1707
def FormatError(err):
1708
  """Return a formatted error message for a given error.
1709

1710
  This function takes an exception instance and returns a tuple
1711
  consisting of two values: first, the recommended exit code, and
1712
  second, a string describing the error message (not
1713
  newline-terminated).
1714

1715
  """
1716
  retcode = 1
1717
  obuf = StringIO()
1718
  msg = str(err)
1719
  if isinstance(err, errors.ConfigurationError):
1720
    txt = "Corrupt configuration file: %s" % msg
1721
    logging.error(txt)
1722
    obuf.write(txt + "\n")
1723
    obuf.write("Aborting.")
1724
    retcode = 2
1725
  elif isinstance(err, errors.HooksAbort):
1726
    obuf.write("Failure: hooks execution failed:\n")
1727
    for node, script, out in err.args[0]:
1728
      if out:
1729
        obuf.write("  node: %s, script: %s, output: %s\n" %
1730
                   (node, script, out))
1731
      else:
1732
        obuf.write("  node: %s, script: %s (no output)\n" %
1733
                   (node, script))
1734
  elif isinstance(err, errors.HooksFailure):
1735
    obuf.write("Failure: hooks general failure: %s" % msg)
1736
  elif isinstance(err, errors.ResolverError):
1737
    this_host = netutils.Hostname.GetSysName()
1738
    if err.args[0] == this_host:
1739
      msg = "Failure: can't resolve my own hostname ('%s')"
1740
    else:
1741
      msg = "Failure: can't resolve hostname '%s'"
1742
    obuf.write(msg % err.args[0])
1743
  elif isinstance(err, errors.OpPrereqError):
1744
    if len(err.args) == 2:
1745
      obuf.write("Failure: prerequisites not met for this"
1746
               " operation:\nerror type: %s, error details:\n%s" %
1747
                 (err.args[1], err.args[0]))
1748
    else:
1749
      obuf.write("Failure: prerequisites not met for this"
1750
                 " operation:\n%s" % msg)
1751
  elif isinstance(err, errors.OpExecError):
1752
    obuf.write("Failure: command execution error:\n%s" % msg)
1753
  elif isinstance(err, errors.TagError):
1754
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1755
  elif isinstance(err, errors.JobQueueDrainError):
1756
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1757
               " accept new requests\n")
1758
  elif isinstance(err, errors.JobQueueFull):
1759
    obuf.write("Failure: the job queue is full and doesn't accept new"
1760
               " job submissions until old jobs are archived\n")
1761
  elif isinstance(err, errors.TypeEnforcementError):
1762
    obuf.write("Parameter Error: %s" % msg)
1763
  elif isinstance(err, errors.ParameterError):
1764
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1765
  elif isinstance(err, luxi.NoMasterError):
1766
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1767
               " and listening for connections?")
1768
  elif isinstance(err, luxi.TimeoutError):
1769
    obuf.write("Timeout while talking to the master daemon. Error:\n"
1770
               "%s" % msg)
1771
  elif isinstance(err, luxi.PermissionError):
1772
    obuf.write("It seems you don't have permissions to connect to the"
1773
               " master daemon.\nPlease retry as a different user.")
1774
  elif isinstance(err, luxi.ProtocolError):
1775
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1776
               "%s" % msg)
1777
  elif isinstance(err, errors.JobLost):
1778
    obuf.write("Error checking job status: %s" % msg)
1779
  elif isinstance(err, errors.GenericError):
1780
    obuf.write("Unhandled Ganeti error: %s" % msg)
1781
  elif isinstance(err, JobSubmittedException):
1782
    obuf.write("JobID: %s\n" % err.args[0])
1783
    retcode = 0
1784
  else:
1785
    obuf.write("Unhandled exception: %s" % msg)
1786
  return retcode, obuf.getvalue().rstrip('\n')
1787

    
1788

    
1789
def GenericMain(commands, override=None, aliases=None):
1790
  """Generic main function for all the gnt-* commands.
1791

1792
  Arguments:
1793
    - commands: a dictionary with a special structure, see the design doc
1794
                for command line handling.
1795
    - override: if not None, we expect a dictionary with keys that will
1796
                override command line options; this can be used to pass
1797
                options from the scripts to generic functions
1798
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1799

1800
  """
1801
  # save the program name and the entire command line for later logging
1802
  if sys.argv:
1803
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1804
    if len(sys.argv) >= 2:
1805
      binary += " " + sys.argv[1]
1806
      old_cmdline = " ".join(sys.argv[2:])
1807
    else:
1808
      old_cmdline = ""
1809
  else:
1810
    binary = "<unknown program>"
1811
    old_cmdline = ""
1812

    
1813
  if aliases is None:
1814
    aliases = {}
1815

    
1816
  try:
1817
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1818
  except errors.ParameterError, err:
1819
    result, err_msg = FormatError(err)
1820
    ToStderr(err_msg)
1821
    return 1
1822

    
1823
  if func is None: # parse error
1824
    return 1
1825

    
1826
  if override is not None:
1827
    for key, val in override.iteritems():
1828
      setattr(options, key, val)
1829

    
1830
  utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1831
                     stderr_logging=True, program=binary)
1832

    
1833
  if old_cmdline:
1834
    logging.info("run with arguments '%s'", old_cmdline)
1835
  else:
1836
    logging.info("run with no arguments")
1837

    
1838
  try:
1839
    result = func(options, args)
1840
  except (errors.GenericError, luxi.ProtocolError,
1841
          JobSubmittedException), err:
1842
    result, err_msg = FormatError(err)
1843
    logging.exception("Error during command processing")
1844
    ToStderr(err_msg)
1845

    
1846
  return result
1847

    
1848

    
1849
def ParseNicOption(optvalue):
1850
  """Parses the value of the --net option(s).
1851

1852
  """
1853
  try:
1854
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1855
  except (TypeError, ValueError), err:
1856
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1857

    
1858
  nics = [{}] * nic_max
1859
  for nidx, ndict in optvalue:
1860
    nidx = int(nidx)
1861

    
1862
    if not isinstance(ndict, dict):
1863
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1864
                                 " got %s" % (nidx, ndict))
1865

    
1866
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1867

    
1868
    nics[nidx] = ndict
1869

    
1870
  return nics
1871

    
1872

    
1873
def GenericInstanceCreate(mode, opts, args):
1874
  """Add an instance to the cluster via either creation or import.
1875

1876
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1877
  @param opts: the command line options selected by the user
1878
  @type args: list
1879
  @param args: should contain only one element, the new instance name
1880
  @rtype: int
1881
  @return: the desired exit code
1882

1883
  """
1884
  instance = args[0]
1885

    
1886
  (pnode, snode) = SplitNodeOption(opts.node)
1887

    
1888
  hypervisor = None
1889
  hvparams = {}
1890
  if opts.hypervisor:
1891
    hypervisor, hvparams = opts.hypervisor
1892

    
1893
  if opts.nics:
1894
    nics = ParseNicOption(opts.nics)
1895
  elif opts.no_nics:
1896
    # no nics
1897
    nics = []
1898
  elif mode == constants.INSTANCE_CREATE:
1899
    # default of one nic, all auto
1900
    nics = [{}]
1901
  else:
1902
    # mode == import
1903
    nics = []
1904

    
1905
  if opts.disk_template == constants.DT_DISKLESS:
1906
    if opts.disks or opts.sd_size is not None:
1907
      raise errors.OpPrereqError("Diskless instance but disk"
1908
                                 " information passed")
1909
    disks = []
1910
  else:
1911
    if (not opts.disks and not opts.sd_size
1912
        and mode == constants.INSTANCE_CREATE):
1913
      raise errors.OpPrereqError("No disk information specified")
1914
    if opts.disks and opts.sd_size is not None:
1915
      raise errors.OpPrereqError("Please use either the '--disk' or"
1916
                                 " '-s' option")
1917
    if opts.sd_size is not None:
1918
      opts.disks = [(0, {"size": opts.sd_size})]
1919

    
1920
    if opts.disks:
1921
      try:
1922
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
1923
      except ValueError, err:
1924
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
1925
      disks = [{}] * disk_max
1926
    else:
1927
      disks = []
1928
    for didx, ddict in opts.disks:
1929
      didx = int(didx)
1930
      if not isinstance(ddict, dict):
1931
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
1932
        raise errors.OpPrereqError(msg)
1933
      elif "size" in ddict:
1934
        if "adopt" in ddict:
1935
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
1936
                                     " (disk %d)" % didx)
1937
        try:
1938
          ddict["size"] = utils.ParseUnit(ddict["size"])
1939
        except ValueError, err:
1940
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
1941
                                     (didx, err))
1942
      elif "adopt" in ddict:
1943
        if mode == constants.INSTANCE_IMPORT:
1944
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
1945
                                     " import")
1946
        ddict["size"] = 0
1947
      else:
1948
        raise errors.OpPrereqError("Missing size or adoption source for"
1949
                                   " disk %d" % didx)
1950
      disks[didx] = ddict
1951

    
1952
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
1953
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
1954

    
1955
  if mode == constants.INSTANCE_CREATE:
1956
    start = opts.start
1957
    os_type = opts.os
1958
    force_variant = opts.force_variant
1959
    src_node = None
1960
    src_path = None
1961
    no_install = opts.no_install
1962
    identify_defaults = False
1963
  elif mode == constants.INSTANCE_IMPORT:
1964
    start = False
1965
    os_type = None
1966
    force_variant = False
1967
    src_node = opts.src_node
1968
    src_path = opts.src_dir
1969
    no_install = None
1970
    identify_defaults = opts.identify_defaults
1971
  else:
1972
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
1973

    
1974
  op = opcodes.OpCreateInstance(instance_name=instance,
1975
                                disks=disks,
1976
                                disk_template=opts.disk_template,
1977
                                nics=nics,
1978
                                pnode=pnode, snode=snode,
1979
                                ip_check=opts.ip_check,
1980
                                name_check=opts.name_check,
1981
                                wait_for_sync=opts.wait_for_sync,
1982
                                file_storage_dir=opts.file_storage_dir,
1983
                                file_driver=opts.file_driver,
1984
                                iallocator=opts.iallocator,
1985
                                hypervisor=hypervisor,
1986
                                hvparams=hvparams,
1987
                                beparams=opts.beparams,
1988
                                osparams=opts.osparams,
1989
                                mode=mode,
1990
                                start=start,
1991
                                os_type=os_type,
1992
                                force_variant=force_variant,
1993
                                src_node=src_node,
1994
                                src_path=src_path,
1995
                                no_install=no_install,
1996
                                identify_defaults=identify_defaults)
1997

    
1998
  SubmitOrSend(op, opts)
1999
  return 0
2000

    
2001

    
2002
class _RunWhileClusterStoppedHelper:
2003
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2004

2005
  """
2006
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2007
    """Initializes this class.
2008

2009
    @type feedback_fn: callable
2010
    @param feedback_fn: Feedback function
2011
    @type cluster_name: string
2012
    @param cluster_name: Cluster name
2013
    @type master_node: string
2014
    @param master_node Master node name
2015
    @type online_nodes: list
2016
    @param online_nodes: List of names of online nodes
2017

2018
    """
2019
    self.feedback_fn = feedback_fn
2020
    self.cluster_name = cluster_name
2021
    self.master_node = master_node
2022
    self.online_nodes = online_nodes
2023

    
2024
    self.ssh = ssh.SshRunner(self.cluster_name)
2025

    
2026
    self.nonmaster_nodes = [name for name in online_nodes
2027
                            if name != master_node]
2028

    
2029
    assert self.master_node not in self.nonmaster_nodes
2030

    
2031
  def _RunCmd(self, node_name, cmd):
2032
    """Runs a command on the local or a remote machine.
2033

2034
    @type node_name: string
2035
    @param node_name: Machine name
2036
    @type cmd: list
2037
    @param cmd: Command
2038

2039
    """
2040
    if node_name is None or node_name == self.master_node:
2041
      # No need to use SSH
2042
      result = utils.RunCmd(cmd)
2043
    else:
2044
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2045

    
2046
    if result.failed:
2047
      errmsg = ["Failed to run command %s" % result.cmd]
2048
      if node_name:
2049
        errmsg.append("on node %s" % node_name)
2050
      errmsg.append(": exitcode %s and error %s" %
2051
                    (result.exit_code, result.output))
2052
      raise errors.OpExecError(" ".join(errmsg))
2053

    
2054
  def Call(self, fn, *args):
2055
    """Call function while all daemons are stopped.
2056

2057
    @type fn: callable
2058
    @param fn: Function to be called
2059

2060
    """
2061
    # Pause watcher by acquiring an exclusive lock on watcher state file
2062
    self.feedback_fn("Blocking watcher")
2063
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2064
    try:
2065
      # TODO: Currently, this just blocks. There's no timeout.
2066
      # TODO: Should it be a shared lock?
2067
      watcher_block.Exclusive(blocking=True)
2068

    
2069
      # Stop master daemons, so that no new jobs can come in and all running
2070
      # ones are finished
2071
      self.feedback_fn("Stopping master daemons")
2072
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2073
      try:
2074
        # Stop daemons on all nodes
2075
        for node_name in self.online_nodes:
2076
          self.feedback_fn("Stopping daemons on %s" % node_name)
2077
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2078

    
2079
        # All daemons are shut down now
2080
        try:
2081
          return fn(self, *args)
2082
        except Exception, err:
2083
          _, errmsg = FormatError(err)
2084
          logging.exception("Caught exception")
2085
          self.feedback_fn(errmsg)
2086
          raise
2087
      finally:
2088
        # Start cluster again, master node last
2089
        for node_name in self.nonmaster_nodes + [self.master_node]:
2090
          self.feedback_fn("Starting daemons on %s" % node_name)
2091
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2092
    finally:
2093
      # Resume watcher
2094
      watcher_block.Close()
2095

    
2096

    
2097
def RunWhileClusterStopped(feedback_fn, fn, *args):
2098
  """Calls a function while all cluster daemons are stopped.
2099

2100
  @type feedback_fn: callable
2101
  @param feedback_fn: Feedback function
2102
  @type fn: callable
2103
  @param fn: Function to be called when daemons are stopped
2104

2105
  """
2106
  feedback_fn("Gathering cluster information")
2107

    
2108
  # This ensures we're running on the master daemon
2109
  cl = GetClient()
2110

    
2111
  (cluster_name, master_node) = \
2112
    cl.QueryConfigValues(["cluster_name", "master_node"])
2113

    
2114
  online_nodes = GetOnlineNodes([], cl=cl)
2115

    
2116
  # Don't keep a reference to the client. The master daemon will go away.
2117
  del cl
2118

    
2119
  assert master_node in online_nodes
2120

    
2121
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2122
                                       online_nodes).Call(fn, *args)
2123

    
2124

    
2125
def GenerateTable(headers, fields, separator, data,
2126
                  numfields=None, unitfields=None,
2127
                  units=None):
2128
  """Prints a table with headers and different fields.
2129

2130
  @type headers: dict
2131
  @param headers: dictionary mapping field names to headers for
2132
      the table
2133
  @type fields: list
2134
  @param fields: the field names corresponding to each row in
2135
      the data field
2136
  @param separator: the separator to be used; if this is None,
2137
      the default 'smart' algorithm is used which computes optimal
2138
      field width, otherwise just the separator is used between
2139
      each field
2140
  @type data: list
2141
  @param data: a list of lists, each sublist being one row to be output
2142
  @type numfields: list
2143
  @param numfields: a list with the fields that hold numeric
2144
      values and thus should be right-aligned
2145
  @type unitfields: list
2146
  @param unitfields: a list with the fields that hold numeric
2147
      values that should be formatted with the units field
2148
  @type units: string or None
2149
  @param units: the units we should use for formatting, or None for
2150
      automatic choice (human-readable for non-separator usage, otherwise
2151
      megabytes); this is a one-letter string
2152

2153
  """
2154
  if units is None:
2155
    if separator:
2156
      units = "m"
2157
    else:
2158
      units = "h"
2159

    
2160
  if numfields is None:
2161
    numfields = []
2162
  if unitfields is None:
2163
    unitfields = []
2164

    
2165
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2166
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2167

    
2168
  format_fields = []
2169
  for field in fields:
2170
    if headers and field not in headers:
2171
      # TODO: handle better unknown fields (either revert to old
2172
      # style of raising exception, or deal more intelligently with
2173
      # variable fields)
2174
      headers[field] = field
2175
    if separator is not None:
2176
      format_fields.append("%s")
2177
    elif numfields.Matches(field):
2178
      format_fields.append("%*s")
2179
    else:
2180
      format_fields.append("%-*s")
2181

    
2182
  if separator is None:
2183
    mlens = [0 for name in fields]
2184
    format_str = ' '.join(format_fields)
2185
  else:
2186
    format_str = separator.replace("%", "%%").join(format_fields)
2187

    
2188
  for row in data:
2189
    if row is None:
2190
      continue
2191
    for idx, val in enumerate(row):
2192
      if unitfields.Matches(fields[idx]):
2193
        try:
2194
          val = int(val)
2195
        except (TypeError, ValueError):
2196
          pass
2197
        else:
2198
          val = row[idx] = utils.FormatUnit(val, units)
2199
      val = row[idx] = str(val)
2200
      if separator is None:
2201
        mlens[idx] = max(mlens[idx], len(val))
2202

    
2203
  result = []
2204
  if headers:
2205
    args = []
2206
    for idx, name in enumerate(fields):
2207
      hdr = headers[name]
2208
      if separator is None:
2209
        mlens[idx] = max(mlens[idx], len(hdr))
2210
        args.append(mlens[idx])
2211
      args.append(hdr)
2212
    result.append(format_str % tuple(args))
2213

    
2214
  if separator is None:
2215
    assert len(mlens) == len(fields)
2216

    
2217
    if fields and not numfields.Matches(fields[-1]):
2218
      mlens[-1] = 0
2219

    
2220
  for line in data:
2221
    args = []
2222
    if line is None:
2223
      line = ['-' for _ in fields]
2224
    for idx in range(len(fields)):
2225
      if separator is None:
2226
        args.append(mlens[idx])
2227
      args.append(line[idx])
2228
    result.append(format_str % tuple(args))
2229

    
2230
  return result
2231

    
2232

    
2233
def FormatTimestamp(ts):
2234
  """Formats a given timestamp.
2235

2236
  @type ts: timestamp
2237
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2238

2239
  @rtype: string
2240
  @return: a string with the formatted timestamp
2241

2242
  """
2243
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2244
    return '?'
2245
  sec, usec = ts
2246
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2247

    
2248

    
2249
def ParseTimespec(value):
2250
  """Parse a time specification.
2251

2252
  The following suffixed will be recognized:
2253

2254
    - s: seconds
2255
    - m: minutes
2256
    - h: hours
2257
    - d: day
2258
    - w: weeks
2259

2260
  Without any suffix, the value will be taken to be in seconds.
2261

2262
  """
2263
  value = str(value)
2264
  if not value:
2265
    raise errors.OpPrereqError("Empty time specification passed")
2266
  suffix_map = {
2267
    's': 1,
2268
    'm': 60,
2269
    'h': 3600,
2270
    'd': 86400,
2271
    'w': 604800,
2272
    }
2273
  if value[-1] not in suffix_map:
2274
    try:
2275
      value = int(value)
2276
    except (TypeError, ValueError):
2277
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2278
  else:
2279
    multiplier = suffix_map[value[-1]]
2280
    value = value[:-1]
2281
    if not value: # no data left after stripping the suffix
2282
      raise errors.OpPrereqError("Invalid time specification (only"
2283
                                 " suffix passed)")
2284
    try:
2285
      value = int(value) * multiplier
2286
    except (TypeError, ValueError):
2287
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2288
  return value
2289

    
2290

    
2291
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2292
                   filter_master=False):
2293
  """Returns the names of online nodes.
2294

2295
  This function will also log a warning on stderr with the names of
2296
  the online nodes.
2297

2298
  @param nodes: if not empty, use only this subset of nodes (minus the
2299
      offline ones)
2300
  @param cl: if not None, luxi client to use
2301
  @type nowarn: boolean
2302
  @param nowarn: by default, this function will output a note with the
2303
      offline nodes that are skipped; if this parameter is True the
2304
      note is not displayed
2305
  @type secondary_ips: boolean
2306
  @param secondary_ips: if True, return the secondary IPs instead of the
2307
      names, useful for doing network traffic over the replication interface
2308
      (if any)
2309
  @type filter_master: boolean
2310
  @param filter_master: if True, do not return the master node in the list
2311
      (useful in coordination with secondary_ips where we cannot check our
2312
      node name against the list)
2313

2314
  """
2315
  if cl is None:
2316
    cl = GetClient()
2317

    
2318
  if secondary_ips:
2319
    name_idx = 2
2320
  else:
2321
    name_idx = 0
2322

    
2323
  if filter_master:
2324
    master_node = cl.QueryConfigValues(["master_node"])[0]
2325
    filter_fn = lambda x: x != master_node
2326
  else:
2327
    filter_fn = lambda _: True
2328

    
2329
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2330
                         use_locking=False)
2331
  offline = [row[0] for row in result if row[1]]
2332
  if offline and not nowarn:
2333
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2334
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2335

    
2336

    
2337
def _ToStream(stream, txt, *args):
2338
  """Write a message to a stream, bypassing the logging system
2339

2340
  @type stream: file object
2341
  @param stream: the file to which we should write
2342
  @type txt: str
2343
  @param txt: the message
2344

2345
  """
2346
  if args:
2347
    args = tuple(args)
2348
    stream.write(txt % args)
2349
  else:
2350
    stream.write(txt)
2351
  stream.write('\n')
2352
  stream.flush()
2353

    
2354

    
2355
def ToStdout(txt, *args):
2356
  """Write a message to stdout only, bypassing the logging system
2357

2358
  This is just a wrapper over _ToStream.
2359

2360
  @type txt: str
2361
  @param txt: the message
2362

2363
  """
2364
  _ToStream(sys.stdout, txt, *args)
2365

    
2366

    
2367
def ToStderr(txt, *args):
2368
  """Write a message to stderr only, bypassing the logging system
2369

2370
  This is just a wrapper over _ToStream.
2371

2372
  @type txt: str
2373
  @param txt: the message
2374

2375
  """
2376
  _ToStream(sys.stderr, txt, *args)
2377

    
2378

    
2379
class JobExecutor(object):
2380
  """Class which manages the submission and execution of multiple jobs.
2381

2382
  Note that instances of this class should not be reused between
2383
  GetResults() calls.
2384

2385
  """
2386
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2387
    self.queue = []
2388
    if cl is None:
2389
      cl = GetClient()
2390
    self.cl = cl
2391
    self.verbose = verbose
2392
    self.jobs = []
2393
    self.opts = opts
2394
    self.feedback_fn = feedback_fn
2395

    
2396
  def QueueJob(self, name, *ops):
2397
    """Record a job for later submit.
2398

2399
    @type name: string
2400
    @param name: a description of the job, will be used in WaitJobSet
2401
    """
2402
    SetGenericOpcodeOpts(ops, self.opts)
2403
    self.queue.append((name, ops))
2404

    
2405
  def SubmitPending(self, each=False):
2406
    """Submit all pending jobs.
2407

2408
    """
2409
    if each:
2410
      results = []
2411
      for row in self.queue:
2412
        # SubmitJob will remove the success status, but raise an exception if
2413
        # the submission fails, so we'll notice that anyway.
2414
        results.append([True, self.cl.SubmitJob(row[1])])
2415
    else:
2416
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2417
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2418
                                                            self.queue)):
2419
      self.jobs.append((idx, status, data, name))
2420

    
2421
  def _ChooseJob(self):
2422
    """Choose a non-waiting/queued job to poll next.
2423

2424
    """
2425
    assert self.jobs, "_ChooseJob called with empty job list"
2426

    
2427
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2428
    assert result
2429

    
2430
    for job_data, status in zip(self.jobs, result):
2431
      if (isinstance(status, list) and status and
2432
          status[0] in (constants.JOB_STATUS_QUEUED,
2433
                        constants.JOB_STATUS_WAITLOCK,
2434
                        constants.JOB_STATUS_CANCELING)):
2435
        # job is still present and waiting
2436
        continue
2437
      # good candidate found (either running job or lost job)
2438
      self.jobs.remove(job_data)
2439
      return job_data
2440

    
2441
    # no job found
2442
    return self.jobs.pop(0)
2443

    
2444
  def GetResults(self):
2445
    """Wait for and return the results of all jobs.
2446

2447
    @rtype: list
2448
    @return: list of tuples (success, job results), in the same order
2449
        as the submitted jobs; if a job has failed, instead of the result
2450
        there will be the error message
2451

2452
    """
2453
    if not self.jobs:
2454
      self.SubmitPending()
2455
    results = []
2456
    if self.verbose:
2457
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2458
      if ok_jobs:
2459
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2460

    
2461
    # first, remove any non-submitted jobs
2462
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2463
    for idx, _, jid, name in failures:
2464
      ToStderr("Failed to submit job for %s: %s", name, jid)
2465
      results.append((idx, False, jid))
2466

    
2467
    while self.jobs:
2468
      (idx, _, jid, name) = self._ChooseJob()
2469
      ToStdout("Waiting for job %s for %s...", jid, name)
2470
      try:
2471
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2472
        success = True
2473
      except errors.JobLost, err:
2474
        _, job_result = FormatError(err)
2475
        ToStderr("Job %s for %s has been archived, cannot check its result",
2476
                 jid, name)
2477
        success = False
2478
      except (errors.GenericError, luxi.ProtocolError), err:
2479
        _, job_result = FormatError(err)
2480
        success = False
2481
        # the error message will always be shown, verbose or not
2482
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2483

    
2484
      results.append((idx, success, job_result))
2485

    
2486
    # sort based on the index, then drop it
2487
    results.sort()
2488
    results = [i[1:] for i in results]
2489

    
2490
    return results
2491

    
2492
  def WaitOrShow(self, wait):
2493
    """Wait for job results or only print the job IDs.
2494

2495
    @type wait: boolean
2496
    @param wait: whether to wait or not
2497

2498
    """
2499
    if wait:
2500
      return self.GetResults()
2501
    else:
2502
      if not self.jobs:
2503
        self.SubmitPending()
2504
      for _, status, result, name in self.jobs:
2505
        if status:
2506
          ToStdout("%s: %s", result, name)
2507
        else:
2508
          ToStderr("Failure for %s: %s", name, result)
2509
      return [row[1:3] for row in self.jobs]