Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ b883637f

History | View | Annotate | Download (81.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42

    
43
from optparse import (OptionParser, TitledHelpFormatter,
44
                      Option, OptionValueError)
45

    
46

    
47
__all__ = [
48
  # Command line options
49
  "ADD_UIDS_OPT",
50
  "ALLOCATABLE_OPT",
51
  "ALL_OPT",
52
  "AUTO_PROMOTE_OPT",
53
  "AUTO_REPLACE_OPT",
54
  "BACKEND_OPT",
55
  "BLK_OS_OPT",
56
  "CLEANUP_OPT",
57
  "CLUSTER_DOMAIN_SECRET_OPT",
58
  "CONFIRM_OPT",
59
  "CP_SIZE_OPT",
60
  "DEBUG_OPT",
61
  "DEBUG_SIMERR_OPT",
62
  "DISKIDX_OPT",
63
  "DISK_OPT",
64
  "DISK_TEMPLATE_OPT",
65
  "DRAINED_OPT",
66
  "DRY_RUN_OPT",
67
  "DRBD_HELPER_OPT",
68
  "EARLY_RELEASE_OPT",
69
  "ENABLED_HV_OPT",
70
  "ERROR_CODES_OPT",
71
  "FIELDS_OPT",
72
  "FILESTORE_DIR_OPT",
73
  "FILESTORE_DRIVER_OPT",
74
  "FORCE_OPT",
75
  "FORCE_VARIANT_OPT",
76
  "GLOBAL_FILEDIR_OPT",
77
  "HID_OS_OPT",
78
  "HVLIST_OPT",
79
  "HVOPTS_OPT",
80
  "HYPERVISOR_OPT",
81
  "IALLOCATOR_OPT",
82
  "DEFAULT_IALLOCATOR_OPT",
83
  "IDENTIFY_DEFAULTS_OPT",
84
  "IGNORE_CONSIST_OPT",
85
  "IGNORE_FAILURES_OPT",
86
  "IGNORE_OFFLINE_OPT",
87
  "IGNORE_REMOVE_FAILURES_OPT",
88
  "IGNORE_SECONDARIES_OPT",
89
  "IGNORE_SIZE_OPT",
90
  "INTERVAL_OPT",
91
  "MAC_PREFIX_OPT",
92
  "MAINTAIN_NODE_HEALTH_OPT",
93
  "MASTER_NETDEV_OPT",
94
  "MC_OPT",
95
  "MIGRATION_MODE_OPT",
96
  "NET_OPT",
97
  "NEW_CLUSTER_CERT_OPT",
98
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
99
  "NEW_CONFD_HMAC_KEY_OPT",
100
  "NEW_RAPI_CERT_OPT",
101
  "NEW_SECONDARY_OPT",
102
  "NIC_PARAMS_OPT",
103
  "NODE_LIST_OPT",
104
  "NODE_PLACEMENT_OPT",
105
  "NODEGROUP_OPT",
106
  "NODRBD_STORAGE_OPT",
107
  "NOHDR_OPT",
108
  "NOIPCHECK_OPT",
109
  "NO_INSTALL_OPT",
110
  "NONAMECHECK_OPT",
111
  "NOLVM_STORAGE_OPT",
112
  "NOMODIFY_ETCHOSTS_OPT",
113
  "NOMODIFY_SSH_SETUP_OPT",
114
  "NONICS_OPT",
115
  "NONLIVE_OPT",
116
  "NONPLUS1_OPT",
117
  "NOSHUTDOWN_OPT",
118
  "NOSTART_OPT",
119
  "NOSSH_KEYCHECK_OPT",
120
  "NOVOTING_OPT",
121
  "NWSYNC_OPT",
122
  "ON_PRIMARY_OPT",
123
  "ON_SECONDARY_OPT",
124
  "OFFLINE_OPT",
125
  "OSPARAMS_OPT",
126
  "OS_OPT",
127
  "OS_SIZE_OPT",
128
  "PREALLOC_WIPE_DISKS_OPT",
129
  "PRIMARY_IP_VERSION_OPT",
130
  "PRIORITY_OPT",
131
  "RAPI_CERT_OPT",
132
  "READD_OPT",
133
  "REBOOT_TYPE_OPT",
134
  "REMOVE_INSTANCE_OPT",
135
  "REMOVE_UIDS_OPT",
136
  "RESERVED_LVS_OPT",
137
  "ROMAN_OPT",
138
  "SECONDARY_IP_OPT",
139
  "SELECT_OS_OPT",
140
  "SEP_OPT",
141
  "SHOWCMD_OPT",
142
  "SHUTDOWN_TIMEOUT_OPT",
143
  "SINGLE_NODE_OPT",
144
  "SRC_DIR_OPT",
145
  "SRC_NODE_OPT",
146
  "SUBMIT_OPT",
147
  "STATIC_OPT",
148
  "SYNC_OPT",
149
  "TAG_SRC_OPT",
150
  "TIMEOUT_OPT",
151
  "UIDPOOL_OPT",
152
  "USEUNITS_OPT",
153
  "USE_REPL_NET_OPT",
154
  "VERBOSE_OPT",
155
  "VG_NAME_OPT",
156
  "YES_DOIT_OPT",
157
  # Generic functions for CLI programs
158
  "GenericMain",
159
  "GenericInstanceCreate",
160
  "GetClient",
161
  "GetOnlineNodes",
162
  "JobExecutor",
163
  "JobSubmittedException",
164
  "ParseTimespec",
165
  "RunWhileClusterStopped",
166
  "SubmitOpCode",
167
  "SubmitOrSend",
168
  "UsesRPC",
169
  # Formatting functions
170
  "ToStderr", "ToStdout",
171
  "FormatError",
172
  "GenerateTable",
173
  "AskUser",
174
  "FormatTimestamp",
175
  "FormatLogMessage",
176
  # Tags functions
177
  "ListTags",
178
  "AddTags",
179
  "RemoveTags",
180
  # command line options support infrastructure
181
  "ARGS_MANY_INSTANCES",
182
  "ARGS_MANY_NODES",
183
  "ARGS_NONE",
184
  "ARGS_ONE_INSTANCE",
185
  "ARGS_ONE_NODE",
186
  "ARGS_ONE_OS",
187
  "ArgChoice",
188
  "ArgCommand",
189
  "ArgFile",
190
  "ArgHost",
191
  "ArgInstance",
192
  "ArgJobId",
193
  "ArgNode",
194
  "ArgOs",
195
  "ArgSuggest",
196
  "ArgUnknown",
197
  "OPT_COMPL_INST_ADD_NODES",
198
  "OPT_COMPL_MANY_NODES",
199
  "OPT_COMPL_ONE_IALLOCATOR",
200
  "OPT_COMPL_ONE_INSTANCE",
201
  "OPT_COMPL_ONE_NODE",
202
  "OPT_COMPL_ONE_NODEGROUP",
203
  "OPT_COMPL_ONE_OS",
204
  "cli_option",
205
  "SplitNodeOption",
206
  "CalculateOSNames",
207
  "ParseFields",
208
  ]
209

    
210
NO_PREFIX = "no_"
211
UN_PREFIX = "-"
212

    
213
#: Priorities (sorted)
214
_PRIORITY_NAMES = [
215
  ("low", constants.OP_PRIO_LOW),
216
  ("normal", constants.OP_PRIO_NORMAL),
217
  ("high", constants.OP_PRIO_HIGH),
218
  ]
219

    
220
#: Priority dictionary for easier lookup
221
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
222
# we migrate to Python 2.6
223
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
224

    
225

    
226
class _Argument:
227
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
228
    self.min = min
229
    self.max = max
230

    
231
  def __repr__(self):
232
    return ("<%s min=%s max=%s>" %
233
            (self.__class__.__name__, self.min, self.max))
234

    
235

    
236
class ArgSuggest(_Argument):
237
  """Suggesting argument.
238

239
  Value can be any of the ones passed to the constructor.
240

241
  """
242
  # pylint: disable-msg=W0622
243
  def __init__(self, min=0, max=None, choices=None):
244
    _Argument.__init__(self, min=min, max=max)
245
    self.choices = choices
246

    
247
  def __repr__(self):
248
    return ("<%s min=%s max=%s choices=%r>" %
249
            (self.__class__.__name__, self.min, self.max, self.choices))
250

    
251

    
252
class ArgChoice(ArgSuggest):
253
  """Choice argument.
254

255
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
256
  but value must be one of the choices.
257

258
  """
259

    
260

    
261
class ArgUnknown(_Argument):
262
  """Unknown argument to program (e.g. determined at runtime).
263

264
  """
265

    
266

    
267
class ArgInstance(_Argument):
268
  """Instances argument.
269

270
  """
271

    
272

    
273
class ArgNode(_Argument):
274
  """Node argument.
275

276
  """
277

    
278
class ArgJobId(_Argument):
279
  """Job ID argument.
280

281
  """
282

    
283

    
284
class ArgFile(_Argument):
285
  """File path argument.
286

287
  """
288

    
289

    
290
class ArgCommand(_Argument):
291
  """Command argument.
292

293
  """
294

    
295

    
296
class ArgHost(_Argument):
297
  """Host argument.
298

299
  """
300

    
301

    
302
class ArgOs(_Argument):
303
  """OS argument.
304

305
  """
306

    
307

    
308
ARGS_NONE = []
309
ARGS_MANY_INSTANCES = [ArgInstance()]
310
ARGS_MANY_NODES = [ArgNode()]
311
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
312
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
313
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
314

    
315

    
316
def _ExtractTagsObject(opts, args):
317
  """Extract the tag type object.
318

319
  Note that this function will modify its args parameter.
320

321
  """
322
  if not hasattr(opts, "tag_type"):
323
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
324
  kind = opts.tag_type
325
  if kind == constants.TAG_CLUSTER:
326
    retval = kind, kind
327
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
328
    if not args:
329
      raise errors.OpPrereqError("no arguments passed to the command")
330
    name = args.pop(0)
331
    retval = kind, name
332
  else:
333
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
334
  return retval
335

    
336

    
337
def _ExtendTags(opts, args):
338
  """Extend the args if a source file has been given.
339

340
  This function will extend the tags with the contents of the file
341
  passed in the 'tags_source' attribute of the opts parameter. A file
342
  named '-' will be replaced by stdin.
343

344
  """
345
  fname = opts.tags_source
346
  if fname is None:
347
    return
348
  if fname == "-":
349
    new_fh = sys.stdin
350
  else:
351
    new_fh = open(fname, "r")
352
  new_data = []
353
  try:
354
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
355
    # because of python bug 1633941
356
    while True:
357
      line = new_fh.readline()
358
      if not line:
359
        break
360
      new_data.append(line.strip())
361
  finally:
362
    new_fh.close()
363
  args.extend(new_data)
364

    
365

    
366
def ListTags(opts, args):
367
  """List the tags on a given object.
368

369
  This is a generic implementation that knows how to deal with all
370
  three cases of tag objects (cluster, node, instance). The opts
371
  argument is expected to contain a tag_type field denoting what
372
  object type we work on.
373

374
  """
375
  kind, name = _ExtractTagsObject(opts, args)
376
  cl = GetClient()
377
  result = cl.QueryTags(kind, name)
378
  result = list(result)
379
  result.sort()
380
  for tag in result:
381
    ToStdout(tag)
382

    
383

    
384
def AddTags(opts, args):
385
  """Add tags on a given object.
386

387
  This is a generic implementation that knows how to deal with all
388
  three cases of tag objects (cluster, node, instance). The opts
389
  argument is expected to contain a tag_type field denoting what
390
  object type we work on.
391

392
  """
393
  kind, name = _ExtractTagsObject(opts, args)
394
  _ExtendTags(opts, args)
395
  if not args:
396
    raise errors.OpPrereqError("No tags to be added")
397
  op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
398
  SubmitOpCode(op, opts=opts)
399

    
400

    
401
def RemoveTags(opts, args):
402
  """Remove tags from a given object.
403

404
  This is a generic implementation that knows how to deal with all
405
  three cases of tag objects (cluster, node, instance). The opts
406
  argument is expected to contain a tag_type field denoting what
407
  object type we work on.
408

409
  """
410
  kind, name = _ExtractTagsObject(opts, args)
411
  _ExtendTags(opts, args)
412
  if not args:
413
    raise errors.OpPrereqError("No tags to be removed")
414
  op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
415
  SubmitOpCode(op, opts=opts)
416

    
417

    
418
def check_unit(option, opt, value): # pylint: disable-msg=W0613
419
  """OptParsers custom converter for units.
420

421
  """
422
  try:
423
    return utils.ParseUnit(value)
424
  except errors.UnitParseError, err:
425
    raise OptionValueError("option %s: %s" % (opt, err))
426

    
427

    
428
def _SplitKeyVal(opt, data):
429
  """Convert a KeyVal string into a dict.
430

431
  This function will convert a key=val[,...] string into a dict. Empty
432
  values will be converted specially: keys which have the prefix 'no_'
433
  will have the value=False and the prefix stripped, the others will
434
  have value=True.
435

436
  @type opt: string
437
  @param opt: a string holding the option name for which we process the
438
      data, used in building error messages
439
  @type data: string
440
  @param data: a string of the format key=val,key=val,...
441
  @rtype: dict
442
  @return: {key=val, key=val}
443
  @raises errors.ParameterError: if there are duplicate keys
444

445
  """
446
  kv_dict = {}
447
  if data:
448
    for elem in utils.UnescapeAndSplit(data, sep=","):
449
      if "=" in elem:
450
        key, val = elem.split("=", 1)
451
      else:
452
        if elem.startswith(NO_PREFIX):
453
          key, val = elem[len(NO_PREFIX):], False
454
        elif elem.startswith(UN_PREFIX):
455
          key, val = elem[len(UN_PREFIX):], None
456
        else:
457
          key, val = elem, True
458
      if key in kv_dict:
459
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
460
                                    (key, opt))
461
      kv_dict[key] = val
462
  return kv_dict
463

    
464

    
465
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
466
  """Custom parser for ident:key=val,key=val options.
467

468
  This will store the parsed values as a tuple (ident, {key: val}). As such,
469
  multiple uses of this option via action=append is possible.
470

471
  """
472
  if ":" not in value:
473
    ident, rest = value, ''
474
  else:
475
    ident, rest = value.split(":", 1)
476

    
477
  if ident.startswith(NO_PREFIX):
478
    if rest:
479
      msg = "Cannot pass options when removing parameter groups: %s" % value
480
      raise errors.ParameterError(msg)
481
    retval = (ident[len(NO_PREFIX):], False)
482
  elif ident.startswith(UN_PREFIX):
483
    if rest:
484
      msg = "Cannot pass options when removing parameter groups: %s" % value
485
      raise errors.ParameterError(msg)
486
    retval = (ident[len(UN_PREFIX):], None)
487
  else:
488
    kv_dict = _SplitKeyVal(opt, rest)
489
    retval = (ident, kv_dict)
490
  return retval
491

    
492

    
493
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
494
  """Custom parser class for key=val,key=val options.
495

496
  This will store the parsed values as a dict {key: val}.
497

498
  """
499
  return _SplitKeyVal(opt, value)
500

    
501

    
502
def check_bool(option, opt, value): # pylint: disable-msg=W0613
503
  """Custom parser for yes/no options.
504

505
  This will store the parsed value as either True or False.
506

507
  """
508
  value = value.lower()
509
  if value == constants.VALUE_FALSE or value == "no":
510
    return False
511
  elif value == constants.VALUE_TRUE or value == "yes":
512
    return True
513
  else:
514
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
515

    
516

    
517
# completion_suggestion is normally a list. Using numeric values not evaluating
518
# to False for dynamic completion.
519
(OPT_COMPL_MANY_NODES,
520
 OPT_COMPL_ONE_NODE,
521
 OPT_COMPL_ONE_INSTANCE,
522
 OPT_COMPL_ONE_OS,
523
 OPT_COMPL_ONE_IALLOCATOR,
524
 OPT_COMPL_INST_ADD_NODES,
525
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
526

    
527
OPT_COMPL_ALL = frozenset([
528
  OPT_COMPL_MANY_NODES,
529
  OPT_COMPL_ONE_NODE,
530
  OPT_COMPL_ONE_INSTANCE,
531
  OPT_COMPL_ONE_OS,
532
  OPT_COMPL_ONE_IALLOCATOR,
533
  OPT_COMPL_INST_ADD_NODES,
534
  OPT_COMPL_ONE_NODEGROUP,
535
  ])
536

    
537

    
538
class CliOption(Option):
539
  """Custom option class for optparse.
540

541
  """
542
  ATTRS = Option.ATTRS + [
543
    "completion_suggest",
544
    ]
545
  TYPES = Option.TYPES + (
546
    "identkeyval",
547
    "keyval",
548
    "unit",
549
    "bool",
550
    )
551
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
552
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
553
  TYPE_CHECKER["keyval"] = check_key_val
554
  TYPE_CHECKER["unit"] = check_unit
555
  TYPE_CHECKER["bool"] = check_bool
556

    
557

    
558
# optparse.py sets make_option, so we do it for our own option class, too
559
cli_option = CliOption
560

    
561

    
562
_YORNO = "yes|no"
563

    
564
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
565
                       help="Increase debugging level")
566

    
567
NOHDR_OPT = cli_option("--no-headers", default=False,
568
                       action="store_true", dest="no_headers",
569
                       help="Don't display column headers")
570

    
571
SEP_OPT = cli_option("--separator", default=None,
572
                     action="store", dest="separator",
573
                     help=("Separator between output fields"
574
                           " (defaults to one space)"))
575

    
576
USEUNITS_OPT = cli_option("--units", default=None,
577
                          dest="units", choices=('h', 'm', 'g', 't'),
578
                          help="Specify units for output (one of hmgt)")
579

    
580
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
581
                        type="string", metavar="FIELDS",
582
                        help="Comma separated list of output fields")
583

    
584
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
585
                       default=False, help="Force the operation")
586

    
587
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
588
                         default=False, help="Do not require confirmation")
589

    
590
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
591
                                  action="store_true", default=False,
592
                                  help=("Ignore offline nodes and do as much"
593
                                        " as possible"))
594

    
595
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
596
                         default=None, help="File with tag names")
597

    
598
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
599
                        default=False, action="store_true",
600
                        help=("Submit the job and return the job ID, but"
601
                              " don't wait for the job to finish"))
602

    
603
SYNC_OPT = cli_option("--sync", dest="do_locking",
604
                      default=False, action="store_true",
605
                      help=("Grab locks while doing the queries"
606
                            " in order to ensure more consistent results"))
607

    
608
DRY_RUN_OPT = cli_option("--dry-run", default=False,
609
                         action="store_true",
610
                         help=("Do not execute the operation, just run the"
611
                               " check steps and verify it it could be"
612
                               " executed"))
613

    
614
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
615
                         action="store_true",
616
                         help="Increase the verbosity of the operation")
617

    
618
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
619
                              action="store_true", dest="simulate_errors",
620
                              help="Debugging option that makes the operation"
621
                              " treat most runtime checks as failed")
622

    
623
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
624
                        default=True, action="store_false",
625
                        help="Don't wait for sync (DANGEROUS!)")
626

    
627
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
628
                               help="Custom disk setup (diskless, file,"
629
                               " plain or drbd)",
630
                               default=None, metavar="TEMPL",
631
                               choices=list(constants.DISK_TEMPLATES))
632

    
633
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
634
                        help="Do not create any network cards for"
635
                        " the instance")
636

    
637
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
638
                               help="Relative path under default cluster-wide"
639
                               " file storage dir to store file-based disks",
640
                               default=None, metavar="<DIR>")
641

    
642
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
643
                                  help="Driver to use for image files",
644
                                  default="loop", metavar="<DRIVER>",
645
                                  choices=list(constants.FILE_DRIVER))
646

    
647
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
648
                            help="Select nodes for the instance automatically"
649
                            " using the <NAME> iallocator plugin",
650
                            default=None, type="string",
651
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
652

    
653
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
654
                            metavar="<NAME>",
655
                            help="Set the default instance allocator plugin",
656
                            default=None, type="string",
657
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
658

    
659
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
660
                    metavar="<os>",
661
                    completion_suggest=OPT_COMPL_ONE_OS)
662

    
663
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
664
                         type="keyval", default={},
665
                         help="OS parameters")
666

    
667
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
668
                               action="store_true", default=False,
669
                               help="Force an unknown variant")
670

    
671
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
672
                            action="store_true", default=False,
673
                            help="Do not install the OS (will"
674
                            " enable no-start)")
675

    
676
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
677
                         type="keyval", default={},
678
                         help="Backend parameters")
679

    
680
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
681
                         default={}, dest="hvparams",
682
                         help="Hypervisor parameters")
683

    
684
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
685
                            help="Hypervisor and hypervisor options, in the"
686
                            " format hypervisor:option=value,option=value,...",
687
                            default=None, type="identkeyval")
688

    
689
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
690
                        help="Hypervisor and hypervisor options, in the"
691
                        " format hypervisor:option=value,option=value,...",
692
                        default=[], action="append", type="identkeyval")
693

    
694
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
695
                           action="store_false",
696
                           help="Don't check that the instance's IP"
697
                           " is alive")
698

    
699
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
700
                             default=True, action="store_false",
701
                             help="Don't check that the instance's name"
702
                             " is resolvable")
703

    
704
NET_OPT = cli_option("--net",
705
                     help="NIC parameters", default=[],
706
                     dest="nics", action="append", type="identkeyval")
707

    
708
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
709
                      dest="disks", action="append", type="identkeyval")
710

    
711
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
712
                         help="Comma-separated list of disks"
713
                         " indices to act on (e.g. 0,2) (optional,"
714
                         " defaults to all disks)")
715

    
716
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
717
                         help="Enforces a single-disk configuration using the"
718
                         " given disk size, in MiB unless a suffix is used",
719
                         default=None, type="unit", metavar="<size>")
720

    
721
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
722
                                dest="ignore_consistency",
723
                                action="store_true", default=False,
724
                                help="Ignore the consistency of the disks on"
725
                                " the secondary")
726

    
727
NONLIVE_OPT = cli_option("--non-live", dest="live",
728
                         default=True, action="store_false",
729
                         help="Do a non-live migration (this usually means"
730
                         " freeze the instance, save the state, transfer and"
731
                         " only then resume running on the secondary node)")
732

    
733
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
734
                                default=None,
735
                                choices=list(constants.HT_MIGRATION_MODES),
736
                                help="Override default migration mode (choose"
737
                                " either live or non-live")
738

    
739
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
740
                                help="Target node and optional secondary node",
741
                                metavar="<pnode>[:<snode>]",
742
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
743

    
744
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
745
                           action="append", metavar="<node>",
746
                           help="Use only this node (can be used multiple"
747
                           " times, if not given defaults to all nodes)",
748
                           completion_suggest=OPT_COMPL_ONE_NODE)
749

    
750
NODEGROUP_OPT = cli_option("-g", "--node-group",
751
                           dest="nodegroup",
752
                           help="Node group (name or uuid)",
753
                           metavar="<nodegroup>",
754
                           default=None, type="string",
755
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
756

    
757
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
758
                             metavar="<node>",
759
                             completion_suggest=OPT_COMPL_ONE_NODE)
760

    
761
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
762
                         action="store_false",
763
                         help="Don't start the instance after creation")
764

    
765
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
766
                         action="store_true", default=False,
767
                         help="Show command instead of executing it")
768

    
769
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
770
                         default=False, action="store_true",
771
                         help="Instead of performing the migration, try to"
772
                         " recover from a failed cleanup. This is safe"
773
                         " to run even if the instance is healthy, but it"
774
                         " will create extra replication traffic and "
775
                         " disrupt briefly the replication (like during the"
776
                         " migration")
777

    
778
STATIC_OPT = cli_option("-s", "--static", dest="static",
779
                        action="store_true", default=False,
780
                        help="Only show configuration data, not runtime data")
781

    
782
ALL_OPT = cli_option("--all", dest="show_all",
783
                     default=False, action="store_true",
784
                     help="Show info on all instances on the cluster."
785
                     " This can take a long time to run, use wisely")
786

    
787
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
788
                           action="store_true", default=False,
789
                           help="Interactive OS reinstall, lists available"
790
                           " OS templates for selection")
791

    
792
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
793
                                 action="store_true", default=False,
794
                                 help="Remove the instance from the cluster"
795
                                 " configuration even if there are failures"
796
                                 " during the removal process")
797

    
798
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
799
                                        dest="ignore_remove_failures",
800
                                        action="store_true", default=False,
801
                                        help="Remove the instance from the"
802
                                        " cluster configuration even if there"
803
                                        " are failures during the removal"
804
                                        " process")
805

    
806
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
807
                                 action="store_true", default=False,
808
                                 help="Remove the instance from the cluster")
809

    
810
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
811
                               help="Specifies the new secondary node",
812
                               metavar="NODE", default=None,
813
                               completion_suggest=OPT_COMPL_ONE_NODE)
814

    
815
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
816
                            default=False, action="store_true",
817
                            help="Replace the disk(s) on the primary"
818
                            " node (only for the drbd template)")
819

    
820
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
821
                              default=False, action="store_true",
822
                              help="Replace the disk(s) on the secondary"
823
                              " node (only for the drbd template)")
824

    
825
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
826
                              default=False, action="store_true",
827
                              help="Lock all nodes and auto-promote as needed"
828
                              " to MC status")
829

    
830
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
831
                              default=False, action="store_true",
832
                              help="Automatically replace faulty disks"
833
                              " (only for the drbd template)")
834

    
835
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
836
                             default=False, action="store_true",
837
                             help="Ignore current recorded size"
838
                             " (useful for forcing activation when"
839
                             " the recorded size is wrong)")
840

    
841
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
842
                          metavar="<node>",
843
                          completion_suggest=OPT_COMPL_ONE_NODE)
844

    
845
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
846
                         metavar="<dir>")
847

    
848
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
849
                              help="Specify the secondary ip for the node",
850
                              metavar="ADDRESS", default=None)
851

    
852
READD_OPT = cli_option("--readd", dest="readd",
853
                       default=False, action="store_true",
854
                       help="Readd old node after replacing it")
855

    
856
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
857
                                default=True, action="store_false",
858
                                help="Disable SSH key fingerprint checking")
859

    
860

    
861
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
862
                    type="bool", default=None, metavar=_YORNO,
863
                    help="Set the master_candidate flag on the node")
864

    
865
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
866
                         type="bool", default=None,
867
                         help="Set the offline flag on the node")
868

    
869
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
870
                         type="bool", default=None,
871
                         help="Set the drained flag on the node")
872

    
873
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
874
                             type="bool", default=None, metavar=_YORNO,
875
                             help="Set the allocatable flag on a volume")
876

    
877
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
878
                               help="Disable support for lvm based instances"
879
                               " (cluster-wide)",
880
                               action="store_false", default=True)
881

    
882
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
883
                            dest="enabled_hypervisors",
884
                            help="Comma-separated list of hypervisors",
885
                            type="string", default=None)
886

    
887
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
888
                            type="keyval", default={},
889
                            help="NIC parameters")
890

    
891
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
892
                         dest="candidate_pool_size", type="int",
893
                         help="Set the candidate pool size")
894

    
895
VG_NAME_OPT = cli_option("-g", "--vg-name", dest="vg_name",
896
                         help="Enables LVM and specifies the volume group"
897
                         " name (cluster-wide) for disk allocation [xenvg]",
898
                         metavar="VG", default=None)
899

    
900
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
901
                          help="Destroy cluster", action="store_true")
902

    
903
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
904
                          help="Skip node agreement check (dangerous)",
905
                          action="store_true", default=False)
906

    
907
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
908
                            help="Specify the mac prefix for the instance IP"
909
                            " addresses, in the format XX:XX:XX",
910
                            metavar="PREFIX",
911
                            default=None)
912

    
913
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
914
                               help="Specify the node interface (cluster-wide)"
915
                               " on which the master IP address will be added "
916
                               " [%s]" % constants.DEFAULT_BRIDGE,
917
                               metavar="NETDEV",
918
                               default=constants.DEFAULT_BRIDGE)
919

    
920
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
921
                                help="Specify the default directory (cluster-"
922
                                "wide) for storing the file-based disks [%s]" %
923
                                constants.DEFAULT_FILE_STORAGE_DIR,
924
                                metavar="DIR",
925
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
926

    
927
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
928
                                   help="Don't modify /etc/hosts",
929
                                   action="store_false", default=True)
930

    
931
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
932
                                    help="Don't initialize SSH keys",
933
                                    action="store_false", default=True)
934

    
935
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
936
                             help="Enable parseable error messages",
937
                             action="store_true", default=False)
938

    
939
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
940
                          help="Skip N+1 memory redundancy tests",
941
                          action="store_true", default=False)
942

    
943
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
944
                             help="Type of reboot: soft/hard/full",
945
                             default=constants.INSTANCE_REBOOT_HARD,
946
                             metavar="<REBOOT>",
947
                             choices=list(constants.REBOOT_TYPES))
948

    
949
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
950
                                    dest="ignore_secondaries",
951
                                    default=False, action="store_true",
952
                                    help="Ignore errors from secondaries")
953

    
954
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
955
                            action="store_false", default=True,
956
                            help="Don't shutdown the instance (unsafe)")
957

    
958
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
959
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
960
                         help="Maximum time to wait")
961

    
962
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
963
                         dest="shutdown_timeout", type="int",
964
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
965
                         help="Maximum time to wait for instance shutdown")
966

    
967
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
968
                          default=None,
969
                          help=("Number of seconds between repetions of the"
970
                                " command"))
971

    
972
EARLY_RELEASE_OPT = cli_option("--early-release",
973
                               dest="early_release", default=False,
974
                               action="store_true",
975
                               help="Release the locks on the secondary"
976
                               " node(s) early")
977

    
978
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
979
                                  dest="new_cluster_cert",
980
                                  default=False, action="store_true",
981
                                  help="Generate a new cluster certificate")
982

    
983
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
984
                           default=None,
985
                           help="File containing new RAPI certificate")
986

    
987
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
988
                               default=None, action="store_true",
989
                               help=("Generate a new self-signed RAPI"
990
                                     " certificate"))
991

    
992
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
993
                                    dest="new_confd_hmac_key",
994
                                    default=False, action="store_true",
995
                                    help=("Create a new HMAC key for %s" %
996
                                          constants.CONFD))
997

    
998
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
999
                                       dest="cluster_domain_secret",
1000
                                       default=None,
1001
                                       help=("Load new new cluster domain"
1002
                                             " secret from file"))
1003

    
1004
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1005
                                           dest="new_cluster_domain_secret",
1006
                                           default=False, action="store_true",
1007
                                           help=("Create a new cluster domain"
1008
                                                 " secret"))
1009

    
1010
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1011
                              dest="use_replication_network",
1012
                              help="Whether to use the replication network"
1013
                              " for talking to the nodes",
1014
                              action="store_true", default=False)
1015

    
1016
MAINTAIN_NODE_HEALTH_OPT = \
1017
    cli_option("--maintain-node-health", dest="maintain_node_health",
1018
               metavar=_YORNO, default=None, type="bool",
1019
               help="Configure the cluster to automatically maintain node"
1020
               " health, by shutting down unknown instances, shutting down"
1021
               " unknown DRBD devices, etc.")
1022

    
1023
IDENTIFY_DEFAULTS_OPT = \
1024
    cli_option("--identify-defaults", dest="identify_defaults",
1025
               default=False, action="store_true",
1026
               help="Identify which saved instance parameters are equal to"
1027
               " the current cluster defaults and set them as such, instead"
1028
               " of marking them as overridden")
1029

    
1030
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1031
                         action="store", dest="uid_pool",
1032
                         help=("A list of user-ids or user-id"
1033
                               " ranges separated by commas"))
1034

    
1035
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1036
                          action="store", dest="add_uids",
1037
                          help=("A list of user-ids or user-id"
1038
                                " ranges separated by commas, to be"
1039
                                " added to the user-id pool"))
1040

    
1041
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1042
                             action="store", dest="remove_uids",
1043
                             help=("A list of user-ids or user-id"
1044
                                   " ranges separated by commas, to be"
1045
                                   " removed from the user-id pool"))
1046

    
1047
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1048
                             action="store", dest="reserved_lvs",
1049
                             help=("A comma-separated list of reserved"
1050
                                   " logical volumes names, that will be"
1051
                                   " ignored by cluster verify"))
1052

    
1053
ROMAN_OPT = cli_option("--roman",
1054
                       dest="roman_integers", default=False,
1055
                       action="store_true",
1056
                       help="Use roman numbers for positive integers")
1057

    
1058
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1059
                             action="store", default=None,
1060
                             help="Specifies usermode helper for DRBD")
1061

    
1062
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1063
                                action="store_false", default=True,
1064
                                help="Disable support for DRBD")
1065

    
1066
PRIMARY_IP_VERSION_OPT = \
1067
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1068
               action="store", dest="primary_ip_version",
1069
               metavar="%d|%d" % (constants.IP4_VERSION,
1070
                                  constants.IP6_VERSION),
1071
               help="Cluster-wide IP version for primary IP")
1072

    
1073
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1074
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1075
                          choices=_PRIONAME_TO_VALUE.keys(),
1076
                          help="Priority for opcode processing")
1077

    
1078
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1079
                        type="bool", default=None, metavar=_YORNO,
1080
                        help="Sets the hidden flag on the OS")
1081

    
1082
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1083
                        type="bool", default=None, metavar=_YORNO,
1084
                        help="Sets the blacklisted flag on the OS")
1085

    
1086
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1087
                                     type="bool", metavar=_YORNO,
1088
                                     dest="prealloc_wipe_disks",
1089
                                     help=("Wipe disks prior to instance"
1090
                                           " creation"))
1091

    
1092

    
1093
#: Options provided by all commands
1094
COMMON_OPTS = [DEBUG_OPT]
1095

    
1096

    
1097
def _ParseArgs(argv, commands, aliases):
1098
  """Parser for the command line arguments.
1099

1100
  This function parses the arguments and returns the function which
1101
  must be executed together with its (modified) arguments.
1102

1103
  @param argv: the command line
1104
  @param commands: dictionary with special contents, see the design
1105
      doc for cmdline handling
1106
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1107

1108
  """
1109
  if len(argv) == 0:
1110
    binary = "<command>"
1111
  else:
1112
    binary = argv[0].split("/")[-1]
1113

    
1114
  if len(argv) > 1 and argv[1] == "--version":
1115
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1116
             constants.RELEASE_VERSION)
1117
    # Quit right away. That way we don't have to care about this special
1118
    # argument. optparse.py does it the same.
1119
    sys.exit(0)
1120

    
1121
  if len(argv) < 2 or not (argv[1] in commands or
1122
                           argv[1] in aliases):
1123
    # let's do a nice thing
1124
    sortedcmds = commands.keys()
1125
    sortedcmds.sort()
1126

    
1127
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1128
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1129
    ToStdout("")
1130

    
1131
    # compute the max line length for cmd + usage
1132
    mlen = max([len(" %s" % cmd) for cmd in commands])
1133
    mlen = min(60, mlen) # should not get here...
1134

    
1135
    # and format a nice command list
1136
    ToStdout("Commands:")
1137
    for cmd in sortedcmds:
1138
      cmdstr = " %s" % (cmd,)
1139
      help_text = commands[cmd][4]
1140
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1141
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1142
      for line in help_lines:
1143
        ToStdout("%-*s   %s", mlen, "", line)
1144

    
1145
    ToStdout("")
1146

    
1147
    return None, None, None
1148

    
1149
  # get command, unalias it, and look it up in commands
1150
  cmd = argv.pop(1)
1151
  if cmd in aliases:
1152
    if cmd in commands:
1153
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1154
                                   " command" % cmd)
1155

    
1156
    if aliases[cmd] not in commands:
1157
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1158
                                   " command '%s'" % (cmd, aliases[cmd]))
1159

    
1160
    cmd = aliases[cmd]
1161

    
1162
  func, args_def, parser_opts, usage, description = commands[cmd]
1163
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1164
                        description=description,
1165
                        formatter=TitledHelpFormatter(),
1166
                        usage="%%prog %s %s" % (cmd, usage))
1167
  parser.disable_interspersed_args()
1168
  options, args = parser.parse_args()
1169

    
1170
  if not _CheckArguments(cmd, args_def, args):
1171
    return None, None, None
1172

    
1173
  return func, options, args
1174

    
1175

    
1176
def _CheckArguments(cmd, args_def, args):
1177
  """Verifies the arguments using the argument definition.
1178

1179
  Algorithm:
1180

1181
    1. Abort with error if values specified by user but none expected.
1182

1183
    1. For each argument in definition
1184

1185
      1. Keep running count of minimum number of values (min_count)
1186
      1. Keep running count of maximum number of values (max_count)
1187
      1. If it has an unlimited number of values
1188

1189
        1. Abort with error if it's not the last argument in the definition
1190

1191
    1. If last argument has limited number of values
1192

1193
      1. Abort with error if number of values doesn't match or is too large
1194

1195
    1. Abort with error if user didn't pass enough values (min_count)
1196

1197
  """
1198
  if args and not args_def:
1199
    ToStderr("Error: Command %s expects no arguments", cmd)
1200
    return False
1201

    
1202
  min_count = None
1203
  max_count = None
1204
  check_max = None
1205

    
1206
  last_idx = len(args_def) - 1
1207

    
1208
  for idx, arg in enumerate(args_def):
1209
    if min_count is None:
1210
      min_count = arg.min
1211
    elif arg.min is not None:
1212
      min_count += arg.min
1213

    
1214
    if max_count is None:
1215
      max_count = arg.max
1216
    elif arg.max is not None:
1217
      max_count += arg.max
1218

    
1219
    if idx == last_idx:
1220
      check_max = (arg.max is not None)
1221

    
1222
    elif arg.max is None:
1223
      raise errors.ProgrammerError("Only the last argument can have max=None")
1224

    
1225
  if check_max:
1226
    # Command with exact number of arguments
1227
    if (min_count is not None and max_count is not None and
1228
        min_count == max_count and len(args) != min_count):
1229
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1230
      return False
1231

    
1232
    # Command with limited number of arguments
1233
    if max_count is not None and len(args) > max_count:
1234
      ToStderr("Error: Command %s expects only %d argument(s)",
1235
               cmd, max_count)
1236
      return False
1237

    
1238
  # Command with some required arguments
1239
  if min_count is not None and len(args) < min_count:
1240
    ToStderr("Error: Command %s expects at least %d argument(s)",
1241
             cmd, min_count)
1242
    return False
1243

    
1244
  return True
1245

    
1246

    
1247
def SplitNodeOption(value):
1248
  """Splits the value of a --node option.
1249

1250
  """
1251
  if value and ':' in value:
1252
    return value.split(':', 1)
1253
  else:
1254
    return (value, None)
1255

    
1256

    
1257
def CalculateOSNames(os_name, os_variants):
1258
  """Calculates all the names an OS can be called, according to its variants.
1259

1260
  @type os_name: string
1261
  @param os_name: base name of the os
1262
  @type os_variants: list or None
1263
  @param os_variants: list of supported variants
1264
  @rtype: list
1265
  @return: list of valid names
1266

1267
  """
1268
  if os_variants:
1269
    return ['%s+%s' % (os_name, v) for v in os_variants]
1270
  else:
1271
    return [os_name]
1272

    
1273

    
1274
def ParseFields(selected, default):
1275
  """Parses the values of "--field"-like options.
1276

1277
  @type selected: string or None
1278
  @param selected: User-selected options
1279
  @type default: list
1280
  @param default: Default fields
1281

1282
  """
1283
  if selected is None:
1284
    return default
1285

    
1286
  if selected.startswith("+"):
1287
    return default + selected[1:].split(",")
1288

    
1289
  return selected.split(",")
1290

    
1291

    
1292
UsesRPC = rpc.RunWithRPC
1293

    
1294

    
1295
def AskUser(text, choices=None):
1296
  """Ask the user a question.
1297

1298
  @param text: the question to ask
1299

1300
  @param choices: list with elements tuples (input_char, return_value,
1301
      description); if not given, it will default to: [('y', True,
1302
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1303
      note that the '?' char is reserved for help
1304

1305
  @return: one of the return values from the choices list; if input is
1306
      not possible (i.e. not running with a tty, we return the last
1307
      entry from the list
1308

1309
  """
1310
  if choices is None:
1311
    choices = [('y', True, 'Perform the operation'),
1312
               ('n', False, 'Do not perform the operation')]
1313
  if not choices or not isinstance(choices, list):
1314
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1315
  for entry in choices:
1316
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1317
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1318

    
1319
  answer = choices[-1][1]
1320
  new_text = []
1321
  for line in text.splitlines():
1322
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1323
  text = "\n".join(new_text)
1324
  try:
1325
    f = file("/dev/tty", "a+")
1326
  except IOError:
1327
    return answer
1328
  try:
1329
    chars = [entry[0] for entry in choices]
1330
    chars[-1] = "[%s]" % chars[-1]
1331
    chars.append('?')
1332
    maps = dict([(entry[0], entry[1]) for entry in choices])
1333
    while True:
1334
      f.write(text)
1335
      f.write('\n')
1336
      f.write("/".join(chars))
1337
      f.write(": ")
1338
      line = f.readline(2).strip().lower()
1339
      if line in maps:
1340
        answer = maps[line]
1341
        break
1342
      elif line == '?':
1343
        for entry in choices:
1344
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1345
        f.write("\n")
1346
        continue
1347
  finally:
1348
    f.close()
1349
  return answer
1350

    
1351

    
1352
class JobSubmittedException(Exception):
1353
  """Job was submitted, client should exit.
1354

1355
  This exception has one argument, the ID of the job that was
1356
  submitted. The handler should print this ID.
1357

1358
  This is not an error, just a structured way to exit from clients.
1359

1360
  """
1361

    
1362

    
1363
def SendJob(ops, cl=None):
1364
  """Function to submit an opcode without waiting for the results.
1365

1366
  @type ops: list
1367
  @param ops: list of opcodes
1368
  @type cl: luxi.Client
1369
  @param cl: the luxi client to use for communicating with the master;
1370
             if None, a new client will be created
1371

1372
  """
1373
  if cl is None:
1374
    cl = GetClient()
1375

    
1376
  job_id = cl.SubmitJob(ops)
1377

    
1378
  return job_id
1379

    
1380

    
1381
def GenericPollJob(job_id, cbs, report_cbs):
1382
  """Generic job-polling function.
1383

1384
  @type job_id: number
1385
  @param job_id: Job ID
1386
  @type cbs: Instance of L{JobPollCbBase}
1387
  @param cbs: Data callbacks
1388
  @type report_cbs: Instance of L{JobPollReportCbBase}
1389
  @param report_cbs: Reporting callbacks
1390

1391
  """
1392
  prev_job_info = None
1393
  prev_logmsg_serial = None
1394

    
1395
  status = None
1396

    
1397
  while True:
1398
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1399
                                      prev_logmsg_serial)
1400
    if not result:
1401
      # job not found, go away!
1402
      raise errors.JobLost("Job with id %s lost" % job_id)
1403

    
1404
    if result == constants.JOB_NOTCHANGED:
1405
      report_cbs.ReportNotChanged(job_id, status)
1406

    
1407
      # Wait again
1408
      continue
1409

    
1410
    # Split result, a tuple of (field values, log entries)
1411
    (job_info, log_entries) = result
1412
    (status, ) = job_info
1413

    
1414
    if log_entries:
1415
      for log_entry in log_entries:
1416
        (serial, timestamp, log_type, message) = log_entry
1417
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1418
                                    log_type, message)
1419
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1420

    
1421
    # TODO: Handle canceled and archived jobs
1422
    elif status in (constants.JOB_STATUS_SUCCESS,
1423
                    constants.JOB_STATUS_ERROR,
1424
                    constants.JOB_STATUS_CANCELING,
1425
                    constants.JOB_STATUS_CANCELED):
1426
      break
1427

    
1428
    prev_job_info = job_info
1429

    
1430
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1431
  if not jobs:
1432
    raise errors.JobLost("Job with id %s lost" % job_id)
1433

    
1434
  status, opstatus, result = jobs[0]
1435

    
1436
  if status == constants.JOB_STATUS_SUCCESS:
1437
    return result
1438

    
1439
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1440
    raise errors.OpExecError("Job was canceled")
1441

    
1442
  has_ok = False
1443
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1444
    if status == constants.OP_STATUS_SUCCESS:
1445
      has_ok = True
1446
    elif status == constants.OP_STATUS_ERROR:
1447
      errors.MaybeRaise(msg)
1448

    
1449
      if has_ok:
1450
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1451
                                 (idx, msg))
1452

    
1453
      raise errors.OpExecError(str(msg))
1454

    
1455
  # default failure mode
1456
  raise errors.OpExecError(result)
1457

    
1458

    
1459
class JobPollCbBase:
1460
  """Base class for L{GenericPollJob} callbacks.
1461

1462
  """
1463
  def __init__(self):
1464
    """Initializes this class.
1465

1466
    """
1467

    
1468
  def WaitForJobChangeOnce(self, job_id, fields,
1469
                           prev_job_info, prev_log_serial):
1470
    """Waits for changes on a job.
1471

1472
    """
1473
    raise NotImplementedError()
1474

    
1475
  def QueryJobs(self, job_ids, fields):
1476
    """Returns the selected fields for the selected job IDs.
1477

1478
    @type job_ids: list of numbers
1479
    @param job_ids: Job IDs
1480
    @type fields: list of strings
1481
    @param fields: Fields
1482

1483
    """
1484
    raise NotImplementedError()
1485

    
1486

    
1487
class JobPollReportCbBase:
1488
  """Base class for L{GenericPollJob} reporting callbacks.
1489

1490
  """
1491
  def __init__(self):
1492
    """Initializes this class.
1493

1494
    """
1495

    
1496
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1497
    """Handles a log message.
1498

1499
    """
1500
    raise NotImplementedError()
1501

    
1502
  def ReportNotChanged(self, job_id, status):
1503
    """Called for if a job hasn't changed in a while.
1504

1505
    @type job_id: number
1506
    @param job_id: Job ID
1507
    @type status: string or None
1508
    @param status: Job status if available
1509

1510
    """
1511
    raise NotImplementedError()
1512

    
1513

    
1514
class _LuxiJobPollCb(JobPollCbBase):
1515
  def __init__(self, cl):
1516
    """Initializes this class.
1517

1518
    """
1519
    JobPollCbBase.__init__(self)
1520
    self.cl = cl
1521

    
1522
  def WaitForJobChangeOnce(self, job_id, fields,
1523
                           prev_job_info, prev_log_serial):
1524
    """Waits for changes on a job.
1525

1526
    """
1527
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1528
                                        prev_job_info, prev_log_serial)
1529

    
1530
  def QueryJobs(self, job_ids, fields):
1531
    """Returns the selected fields for the selected job IDs.
1532

1533
    """
1534
    return self.cl.QueryJobs(job_ids, fields)
1535

    
1536

    
1537
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1538
  def __init__(self, feedback_fn):
1539
    """Initializes this class.
1540

1541
    """
1542
    JobPollReportCbBase.__init__(self)
1543

    
1544
    self.feedback_fn = feedback_fn
1545

    
1546
    assert callable(feedback_fn)
1547

    
1548
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1549
    """Handles a log message.
1550

1551
    """
1552
    self.feedback_fn((timestamp, log_type, log_msg))
1553

    
1554
  def ReportNotChanged(self, job_id, status):
1555
    """Called if a job hasn't changed in a while.
1556

1557
    """
1558
    # Ignore
1559

    
1560

    
1561
class StdioJobPollReportCb(JobPollReportCbBase):
1562
  def __init__(self):
1563
    """Initializes this class.
1564

1565
    """
1566
    JobPollReportCbBase.__init__(self)
1567

    
1568
    self.notified_queued = False
1569
    self.notified_waitlock = False
1570

    
1571
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1572
    """Handles a log message.
1573

1574
    """
1575
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1576
             FormatLogMessage(log_type, log_msg))
1577

    
1578
  def ReportNotChanged(self, job_id, status):
1579
    """Called if a job hasn't changed in a while.
1580

1581
    """
1582
    if status is None:
1583
      return
1584

    
1585
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1586
      ToStderr("Job %s is waiting in queue", job_id)
1587
      self.notified_queued = True
1588

    
1589
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1590
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1591
      self.notified_waitlock = True
1592

    
1593

    
1594
def FormatLogMessage(log_type, log_msg):
1595
  """Formats a job message according to its type.
1596

1597
  """
1598
  if log_type != constants.ELOG_MESSAGE:
1599
    log_msg = str(log_msg)
1600

    
1601
  return utils.SafeEncode(log_msg)
1602

    
1603

    
1604
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1605
  """Function to poll for the result of a job.
1606

1607
  @type job_id: job identified
1608
  @param job_id: the job to poll for results
1609
  @type cl: luxi.Client
1610
  @param cl: the luxi client to use for communicating with the master;
1611
             if None, a new client will be created
1612

1613
  """
1614
  if cl is None:
1615
    cl = GetClient()
1616

    
1617
  if reporter is None:
1618
    if feedback_fn:
1619
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1620
    else:
1621
      reporter = StdioJobPollReportCb()
1622
  elif feedback_fn:
1623
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1624

    
1625
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1626

    
1627

    
1628
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1629
  """Legacy function to submit an opcode.
1630

1631
  This is just a simple wrapper over the construction of the processor
1632
  instance. It should be extended to better handle feedback and
1633
  interaction functions.
1634

1635
  """
1636
  if cl is None:
1637
    cl = GetClient()
1638

    
1639
  SetGenericOpcodeOpts([op], opts)
1640

    
1641
  job_id = SendJob([op], cl=cl)
1642

    
1643
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1644
                       reporter=reporter)
1645

    
1646
  return op_results[0]
1647

    
1648

    
1649
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1650
  """Wrapper around SubmitOpCode or SendJob.
1651

1652
  This function will decide, based on the 'opts' parameter, whether to
1653
  submit and wait for the result of the opcode (and return it), or
1654
  whether to just send the job and print its identifier. It is used in
1655
  order to simplify the implementation of the '--submit' option.
1656

1657
  It will also process the opcodes if we're sending the via SendJob
1658
  (otherwise SubmitOpCode does it).
1659

1660
  """
1661
  if opts and opts.submit_only:
1662
    job = [op]
1663
    SetGenericOpcodeOpts(job, opts)
1664
    job_id = SendJob(job, cl=cl)
1665
    raise JobSubmittedException(job_id)
1666
  else:
1667
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1668

    
1669

    
1670
def SetGenericOpcodeOpts(opcode_list, options):
1671
  """Processor for generic options.
1672

1673
  This function updates the given opcodes based on generic command
1674
  line options (like debug, dry-run, etc.).
1675

1676
  @param opcode_list: list of opcodes
1677
  @param options: command line options or None
1678
  @return: None (in-place modification)
1679

1680
  """
1681
  if not options:
1682
    return
1683
  for op in opcode_list:
1684
    op.debug_level = options.debug
1685
    if hasattr(options, "dry_run"):
1686
      op.dry_run = options.dry_run
1687
    if getattr(options, "priority", None) is not None:
1688
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1689

    
1690

    
1691
def GetClient():
1692
  # TODO: Cache object?
1693
  try:
1694
    client = luxi.Client()
1695
  except luxi.NoMasterError:
1696
    ss = ssconf.SimpleStore()
1697

    
1698
    # Try to read ssconf file
1699
    try:
1700
      ss.GetMasterNode()
1701
    except errors.ConfigurationError:
1702
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1703
                                 " not part of a cluster")
1704

    
1705
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1706
    if master != myself:
1707
      raise errors.OpPrereqError("This is not the master node, please connect"
1708
                                 " to node '%s' and rerun the command" %
1709
                                 master)
1710
    raise
1711
  return client
1712

    
1713

    
1714
def FormatError(err):
1715
  """Return a formatted error message for a given error.
1716

1717
  This function takes an exception instance and returns a tuple
1718
  consisting of two values: first, the recommended exit code, and
1719
  second, a string describing the error message (not
1720
  newline-terminated).
1721

1722
  """
1723
  retcode = 1
1724
  obuf = StringIO()
1725
  msg = str(err)
1726
  if isinstance(err, errors.ConfigurationError):
1727
    txt = "Corrupt configuration file: %s" % msg
1728
    logging.error(txt)
1729
    obuf.write(txt + "\n")
1730
    obuf.write("Aborting.")
1731
    retcode = 2
1732
  elif isinstance(err, errors.HooksAbort):
1733
    obuf.write("Failure: hooks execution failed:\n")
1734
    for node, script, out in err.args[0]:
1735
      if out:
1736
        obuf.write("  node: %s, script: %s, output: %s\n" %
1737
                   (node, script, out))
1738
      else:
1739
        obuf.write("  node: %s, script: %s (no output)\n" %
1740
                   (node, script))
1741
  elif isinstance(err, errors.HooksFailure):
1742
    obuf.write("Failure: hooks general failure: %s" % msg)
1743
  elif isinstance(err, errors.ResolverError):
1744
    this_host = netutils.Hostname.GetSysName()
1745
    if err.args[0] == this_host:
1746
      msg = "Failure: can't resolve my own hostname ('%s')"
1747
    else:
1748
      msg = "Failure: can't resolve hostname '%s'"
1749
    obuf.write(msg % err.args[0])
1750
  elif isinstance(err, errors.OpPrereqError):
1751
    if len(err.args) == 2:
1752
      obuf.write("Failure: prerequisites not met for this"
1753
               " operation:\nerror type: %s, error details:\n%s" %
1754
                 (err.args[1], err.args[0]))
1755
    else:
1756
      obuf.write("Failure: prerequisites not met for this"
1757
                 " operation:\n%s" % msg)
1758
  elif isinstance(err, errors.OpExecError):
1759
    obuf.write("Failure: command execution error:\n%s" % msg)
1760
  elif isinstance(err, errors.TagError):
1761
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1762
  elif isinstance(err, errors.JobQueueDrainError):
1763
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1764
               " accept new requests\n")
1765
  elif isinstance(err, errors.JobQueueFull):
1766
    obuf.write("Failure: the job queue is full and doesn't accept new"
1767
               " job submissions until old jobs are archived\n")
1768
  elif isinstance(err, errors.TypeEnforcementError):
1769
    obuf.write("Parameter Error: %s" % msg)
1770
  elif isinstance(err, errors.ParameterError):
1771
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1772
  elif isinstance(err, luxi.NoMasterError):
1773
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1774
               " and listening for connections?")
1775
  elif isinstance(err, luxi.TimeoutError):
1776
    obuf.write("Timeout while talking to the master daemon. Error:\n"
1777
               "%s" % msg)
1778
  elif isinstance(err, luxi.PermissionError):
1779
    obuf.write("It seems you don't have permissions to connect to the"
1780
               " master daemon.\nPlease retry as a different user.")
1781
  elif isinstance(err, luxi.ProtocolError):
1782
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1783
               "%s" % msg)
1784
  elif isinstance(err, errors.JobLost):
1785
    obuf.write("Error checking job status: %s" % msg)
1786
  elif isinstance(err, errors.GenericError):
1787
    obuf.write("Unhandled Ganeti error: %s" % msg)
1788
  elif isinstance(err, JobSubmittedException):
1789
    obuf.write("JobID: %s\n" % err.args[0])
1790
    retcode = 0
1791
  else:
1792
    obuf.write("Unhandled exception: %s" % msg)
1793
  return retcode, obuf.getvalue().rstrip('\n')
1794

    
1795

    
1796
def GenericMain(commands, override=None, aliases=None):
1797
  """Generic main function for all the gnt-* commands.
1798

1799
  Arguments:
1800
    - commands: a dictionary with a special structure, see the design doc
1801
                for command line handling.
1802
    - override: if not None, we expect a dictionary with keys that will
1803
                override command line options; this can be used to pass
1804
                options from the scripts to generic functions
1805
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1806

1807
  """
1808
  # save the program name and the entire command line for later logging
1809
  if sys.argv:
1810
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1811
    if len(sys.argv) >= 2:
1812
      binary += " " + sys.argv[1]
1813
      old_cmdline = " ".join(sys.argv[2:])
1814
    else:
1815
      old_cmdline = ""
1816
  else:
1817
    binary = "<unknown program>"
1818
    old_cmdline = ""
1819

    
1820
  if aliases is None:
1821
    aliases = {}
1822

    
1823
  try:
1824
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1825
  except errors.ParameterError, err:
1826
    result, err_msg = FormatError(err)
1827
    ToStderr(err_msg)
1828
    return 1
1829

    
1830
  if func is None: # parse error
1831
    return 1
1832

    
1833
  if override is not None:
1834
    for key, val in override.iteritems():
1835
      setattr(options, key, val)
1836

    
1837
  utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1838
                     stderr_logging=True, program=binary)
1839

    
1840
  if old_cmdline:
1841
    logging.info("run with arguments '%s'", old_cmdline)
1842
  else:
1843
    logging.info("run with no arguments")
1844

    
1845
  try:
1846
    result = func(options, args)
1847
  except (errors.GenericError, luxi.ProtocolError,
1848
          JobSubmittedException), err:
1849
    result, err_msg = FormatError(err)
1850
    logging.exception("Error during command processing")
1851
    ToStderr(err_msg)
1852

    
1853
  return result
1854

    
1855

    
1856
def ParseNicOption(optvalue):
1857
  """Parses the value of the --net option(s).
1858

1859
  """
1860
  try:
1861
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1862
  except (TypeError, ValueError), err:
1863
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1864

    
1865
  nics = [{}] * nic_max
1866
  for nidx, ndict in optvalue:
1867
    nidx = int(nidx)
1868

    
1869
    if not isinstance(ndict, dict):
1870
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1871
                                 " got %s" % (nidx, ndict))
1872

    
1873
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1874

    
1875
    nics[nidx] = ndict
1876

    
1877
  return nics
1878

    
1879

    
1880
def GenericInstanceCreate(mode, opts, args):
1881
  """Add an instance to the cluster via either creation or import.
1882

1883
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1884
  @param opts: the command line options selected by the user
1885
  @type args: list
1886
  @param args: should contain only one element, the new instance name
1887
  @rtype: int
1888
  @return: the desired exit code
1889

1890
  """
1891
  instance = args[0]
1892

    
1893
  (pnode, snode) = SplitNodeOption(opts.node)
1894

    
1895
  hypervisor = None
1896
  hvparams = {}
1897
  if opts.hypervisor:
1898
    hypervisor, hvparams = opts.hypervisor
1899

    
1900
  if opts.nics:
1901
    nics = ParseNicOption(opts.nics)
1902
  elif opts.no_nics:
1903
    # no nics
1904
    nics = []
1905
  elif mode == constants.INSTANCE_CREATE:
1906
    # default of one nic, all auto
1907
    nics = [{}]
1908
  else:
1909
    # mode == import
1910
    nics = []
1911

    
1912
  if opts.disk_template == constants.DT_DISKLESS:
1913
    if opts.disks or opts.sd_size is not None:
1914
      raise errors.OpPrereqError("Diskless instance but disk"
1915
                                 " information passed")
1916
    disks = []
1917
  else:
1918
    if (not opts.disks and not opts.sd_size
1919
        and mode == constants.INSTANCE_CREATE):
1920
      raise errors.OpPrereqError("No disk information specified")
1921
    if opts.disks and opts.sd_size is not None:
1922
      raise errors.OpPrereqError("Please use either the '--disk' or"
1923
                                 " '-s' option")
1924
    if opts.sd_size is not None:
1925
      opts.disks = [(0, {"size": opts.sd_size})]
1926

    
1927
    if opts.disks:
1928
      try:
1929
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
1930
      except ValueError, err:
1931
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
1932
      disks = [{}] * disk_max
1933
    else:
1934
      disks = []
1935
    for didx, ddict in opts.disks:
1936
      didx = int(didx)
1937
      if not isinstance(ddict, dict):
1938
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
1939
        raise errors.OpPrereqError(msg)
1940
      elif "size" in ddict:
1941
        if "adopt" in ddict:
1942
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
1943
                                     " (disk %d)" % didx)
1944
        try:
1945
          ddict["size"] = utils.ParseUnit(ddict["size"])
1946
        except ValueError, err:
1947
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
1948
                                     (didx, err))
1949
      elif "adopt" in ddict:
1950
        if mode == constants.INSTANCE_IMPORT:
1951
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
1952
                                     " import")
1953
        ddict["size"] = 0
1954
      else:
1955
        raise errors.OpPrereqError("Missing size or adoption source for"
1956
                                   " disk %d" % didx)
1957
      disks[didx] = ddict
1958

    
1959
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
1960
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
1961

    
1962
  if mode == constants.INSTANCE_CREATE:
1963
    start = opts.start
1964
    os_type = opts.os
1965
    force_variant = opts.force_variant
1966
    src_node = None
1967
    src_path = None
1968
    no_install = opts.no_install
1969
    identify_defaults = False
1970
  elif mode == constants.INSTANCE_IMPORT:
1971
    start = False
1972
    os_type = None
1973
    force_variant = False
1974
    src_node = opts.src_node
1975
    src_path = opts.src_dir
1976
    no_install = None
1977
    identify_defaults = opts.identify_defaults
1978
  else:
1979
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
1980

    
1981
  op = opcodes.OpCreateInstance(instance_name=instance,
1982
                                disks=disks,
1983
                                disk_template=opts.disk_template,
1984
                                nics=nics,
1985
                                pnode=pnode, snode=snode,
1986
                                ip_check=opts.ip_check,
1987
                                name_check=opts.name_check,
1988
                                wait_for_sync=opts.wait_for_sync,
1989
                                file_storage_dir=opts.file_storage_dir,
1990
                                file_driver=opts.file_driver,
1991
                                iallocator=opts.iallocator,
1992
                                hypervisor=hypervisor,
1993
                                hvparams=hvparams,
1994
                                beparams=opts.beparams,
1995
                                osparams=opts.osparams,
1996
                                mode=mode,
1997
                                start=start,
1998
                                os_type=os_type,
1999
                                force_variant=force_variant,
2000
                                src_node=src_node,
2001
                                src_path=src_path,
2002
                                no_install=no_install,
2003
                                identify_defaults=identify_defaults)
2004

    
2005
  SubmitOrSend(op, opts)
2006
  return 0
2007

    
2008

    
2009
class _RunWhileClusterStoppedHelper:
2010
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2011

2012
  """
2013
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2014
    """Initializes this class.
2015

2016
    @type feedback_fn: callable
2017
    @param feedback_fn: Feedback function
2018
    @type cluster_name: string
2019
    @param cluster_name: Cluster name
2020
    @type master_node: string
2021
    @param master_node Master node name
2022
    @type online_nodes: list
2023
    @param online_nodes: List of names of online nodes
2024

2025
    """
2026
    self.feedback_fn = feedback_fn
2027
    self.cluster_name = cluster_name
2028
    self.master_node = master_node
2029
    self.online_nodes = online_nodes
2030

    
2031
    self.ssh = ssh.SshRunner(self.cluster_name)
2032

    
2033
    self.nonmaster_nodes = [name for name in online_nodes
2034
                            if name != master_node]
2035

    
2036
    assert self.master_node not in self.nonmaster_nodes
2037

    
2038
  def _RunCmd(self, node_name, cmd):
2039
    """Runs a command on the local or a remote machine.
2040

2041
    @type node_name: string
2042
    @param node_name: Machine name
2043
    @type cmd: list
2044
    @param cmd: Command
2045

2046
    """
2047
    if node_name is None or node_name == self.master_node:
2048
      # No need to use SSH
2049
      result = utils.RunCmd(cmd)
2050
    else:
2051
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2052

    
2053
    if result.failed:
2054
      errmsg = ["Failed to run command %s" % result.cmd]
2055
      if node_name:
2056
        errmsg.append("on node %s" % node_name)
2057
      errmsg.append(": exitcode %s and error %s" %
2058
                    (result.exit_code, result.output))
2059
      raise errors.OpExecError(" ".join(errmsg))
2060

    
2061
  def Call(self, fn, *args):
2062
    """Call function while all daemons are stopped.
2063

2064
    @type fn: callable
2065
    @param fn: Function to be called
2066

2067
    """
2068
    # Pause watcher by acquiring an exclusive lock on watcher state file
2069
    self.feedback_fn("Blocking watcher")
2070
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2071
    try:
2072
      # TODO: Currently, this just blocks. There's no timeout.
2073
      # TODO: Should it be a shared lock?
2074
      watcher_block.Exclusive(blocking=True)
2075

    
2076
      # Stop master daemons, so that no new jobs can come in and all running
2077
      # ones are finished
2078
      self.feedback_fn("Stopping master daemons")
2079
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2080
      try:
2081
        # Stop daemons on all nodes
2082
        for node_name in self.online_nodes:
2083
          self.feedback_fn("Stopping daemons on %s" % node_name)
2084
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2085

    
2086
        # All daemons are shut down now
2087
        try:
2088
          return fn(self, *args)
2089
        except Exception, err:
2090
          _, errmsg = FormatError(err)
2091
          logging.exception("Caught exception")
2092
          self.feedback_fn(errmsg)
2093
          raise
2094
      finally:
2095
        # Start cluster again, master node last
2096
        for node_name in self.nonmaster_nodes + [self.master_node]:
2097
          self.feedback_fn("Starting daemons on %s" % node_name)
2098
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2099
    finally:
2100
      # Resume watcher
2101
      watcher_block.Close()
2102

    
2103

    
2104
def RunWhileClusterStopped(feedback_fn, fn, *args):
2105
  """Calls a function while all cluster daemons are stopped.
2106

2107
  @type feedback_fn: callable
2108
  @param feedback_fn: Feedback function
2109
  @type fn: callable
2110
  @param fn: Function to be called when daemons are stopped
2111

2112
  """
2113
  feedback_fn("Gathering cluster information")
2114

    
2115
  # This ensures we're running on the master daemon
2116
  cl = GetClient()
2117

    
2118
  (cluster_name, master_node) = \
2119
    cl.QueryConfigValues(["cluster_name", "master_node"])
2120

    
2121
  online_nodes = GetOnlineNodes([], cl=cl)
2122

    
2123
  # Don't keep a reference to the client. The master daemon will go away.
2124
  del cl
2125

    
2126
  assert master_node in online_nodes
2127

    
2128
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2129
                                       online_nodes).Call(fn, *args)
2130

    
2131

    
2132
def GenerateTable(headers, fields, separator, data,
2133
                  numfields=None, unitfields=None,
2134
                  units=None):
2135
  """Prints a table with headers and different fields.
2136

2137
  @type headers: dict
2138
  @param headers: dictionary mapping field names to headers for
2139
      the table
2140
  @type fields: list
2141
  @param fields: the field names corresponding to each row in
2142
      the data field
2143
  @param separator: the separator to be used; if this is None,
2144
      the default 'smart' algorithm is used which computes optimal
2145
      field width, otherwise just the separator is used between
2146
      each field
2147
  @type data: list
2148
  @param data: a list of lists, each sublist being one row to be output
2149
  @type numfields: list
2150
  @param numfields: a list with the fields that hold numeric
2151
      values and thus should be right-aligned
2152
  @type unitfields: list
2153
  @param unitfields: a list with the fields that hold numeric
2154
      values that should be formatted with the units field
2155
  @type units: string or None
2156
  @param units: the units we should use for formatting, or None for
2157
      automatic choice (human-readable for non-separator usage, otherwise
2158
      megabytes); this is a one-letter string
2159

2160
  """
2161
  if units is None:
2162
    if separator:
2163
      units = "m"
2164
    else:
2165
      units = "h"
2166

    
2167
  if numfields is None:
2168
    numfields = []
2169
  if unitfields is None:
2170
    unitfields = []
2171

    
2172
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2173
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2174

    
2175
  format_fields = []
2176
  for field in fields:
2177
    if headers and field not in headers:
2178
      # TODO: handle better unknown fields (either revert to old
2179
      # style of raising exception, or deal more intelligently with
2180
      # variable fields)
2181
      headers[field] = field
2182
    if separator is not None:
2183
      format_fields.append("%s")
2184
    elif numfields.Matches(field):
2185
      format_fields.append("%*s")
2186
    else:
2187
      format_fields.append("%-*s")
2188

    
2189
  if separator is None:
2190
    mlens = [0 for name in fields]
2191
    format_str = ' '.join(format_fields)
2192
  else:
2193
    format_str = separator.replace("%", "%%").join(format_fields)
2194

    
2195
  for row in data:
2196
    if row is None:
2197
      continue
2198
    for idx, val in enumerate(row):
2199
      if unitfields.Matches(fields[idx]):
2200
        try:
2201
          val = int(val)
2202
        except (TypeError, ValueError):
2203
          pass
2204
        else:
2205
          val = row[idx] = utils.FormatUnit(val, units)
2206
      val = row[idx] = str(val)
2207
      if separator is None:
2208
        mlens[idx] = max(mlens[idx], len(val))
2209

    
2210
  result = []
2211
  if headers:
2212
    args = []
2213
    for idx, name in enumerate(fields):
2214
      hdr = headers[name]
2215
      if separator is None:
2216
        mlens[idx] = max(mlens[idx], len(hdr))
2217
        args.append(mlens[idx])
2218
      args.append(hdr)
2219
    result.append(format_str % tuple(args))
2220

    
2221
  if separator is None:
2222
    assert len(mlens) == len(fields)
2223

    
2224
    if fields and not numfields.Matches(fields[-1]):
2225
      mlens[-1] = 0
2226

    
2227
  for line in data:
2228
    args = []
2229
    if line is None:
2230
      line = ['-' for _ in fields]
2231
    for idx in range(len(fields)):
2232
      if separator is None:
2233
        args.append(mlens[idx])
2234
      args.append(line[idx])
2235
    result.append(format_str % tuple(args))
2236

    
2237
  return result
2238

    
2239

    
2240
def FormatTimestamp(ts):
2241
  """Formats a given timestamp.
2242

2243
  @type ts: timestamp
2244
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2245

2246
  @rtype: string
2247
  @return: a string with the formatted timestamp
2248

2249
  """
2250
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2251
    return '?'
2252
  sec, usec = ts
2253
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2254

    
2255

    
2256
def ParseTimespec(value):
2257
  """Parse a time specification.
2258

2259
  The following suffixed will be recognized:
2260

2261
    - s: seconds
2262
    - m: minutes
2263
    - h: hours
2264
    - d: day
2265
    - w: weeks
2266

2267
  Without any suffix, the value will be taken to be in seconds.
2268

2269
  """
2270
  value = str(value)
2271
  if not value:
2272
    raise errors.OpPrereqError("Empty time specification passed")
2273
  suffix_map = {
2274
    's': 1,
2275
    'm': 60,
2276
    'h': 3600,
2277
    'd': 86400,
2278
    'w': 604800,
2279
    }
2280
  if value[-1] not in suffix_map:
2281
    try:
2282
      value = int(value)
2283
    except (TypeError, ValueError):
2284
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2285
  else:
2286
    multiplier = suffix_map[value[-1]]
2287
    value = value[:-1]
2288
    if not value: # no data left after stripping the suffix
2289
      raise errors.OpPrereqError("Invalid time specification (only"
2290
                                 " suffix passed)")
2291
    try:
2292
      value = int(value) * multiplier
2293
    except (TypeError, ValueError):
2294
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2295
  return value
2296

    
2297

    
2298
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2299
                   filter_master=False):
2300
  """Returns the names of online nodes.
2301

2302
  This function will also log a warning on stderr with the names of
2303
  the online nodes.
2304

2305
  @param nodes: if not empty, use only this subset of nodes (minus the
2306
      offline ones)
2307
  @param cl: if not None, luxi client to use
2308
  @type nowarn: boolean
2309
  @param nowarn: by default, this function will output a note with the
2310
      offline nodes that are skipped; if this parameter is True the
2311
      note is not displayed
2312
  @type secondary_ips: boolean
2313
  @param secondary_ips: if True, return the secondary IPs instead of the
2314
      names, useful for doing network traffic over the replication interface
2315
      (if any)
2316
  @type filter_master: boolean
2317
  @param filter_master: if True, do not return the master node in the list
2318
      (useful in coordination with secondary_ips where we cannot check our
2319
      node name against the list)
2320

2321
  """
2322
  if cl is None:
2323
    cl = GetClient()
2324

    
2325
  if secondary_ips:
2326
    name_idx = 2
2327
  else:
2328
    name_idx = 0
2329

    
2330
  if filter_master:
2331
    master_node = cl.QueryConfigValues(["master_node"])[0]
2332
    filter_fn = lambda x: x != master_node
2333
  else:
2334
    filter_fn = lambda _: True
2335

    
2336
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2337
                         use_locking=False)
2338
  offline = [row[0] for row in result if row[1]]
2339
  if offline and not nowarn:
2340
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2341
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2342

    
2343

    
2344
def _ToStream(stream, txt, *args):
2345
  """Write a message to a stream, bypassing the logging system
2346

2347
  @type stream: file object
2348
  @param stream: the file to which we should write
2349
  @type txt: str
2350
  @param txt: the message
2351

2352
  """
2353
  if args:
2354
    args = tuple(args)
2355
    stream.write(txt % args)
2356
  else:
2357
    stream.write(txt)
2358
  stream.write('\n')
2359
  stream.flush()
2360

    
2361

    
2362
def ToStdout(txt, *args):
2363
  """Write a message to stdout only, bypassing the logging system
2364

2365
  This is just a wrapper over _ToStream.
2366

2367
  @type txt: str
2368
  @param txt: the message
2369

2370
  """
2371
  _ToStream(sys.stdout, txt, *args)
2372

    
2373

    
2374
def ToStderr(txt, *args):
2375
  """Write a message to stderr only, bypassing the logging system
2376

2377
  This is just a wrapper over _ToStream.
2378

2379
  @type txt: str
2380
  @param txt: the message
2381

2382
  """
2383
  _ToStream(sys.stderr, txt, *args)
2384

    
2385

    
2386
class JobExecutor(object):
2387
  """Class which manages the submission and execution of multiple jobs.
2388

2389
  Note that instances of this class should not be reused between
2390
  GetResults() calls.
2391

2392
  """
2393
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2394
    self.queue = []
2395
    if cl is None:
2396
      cl = GetClient()
2397
    self.cl = cl
2398
    self.verbose = verbose
2399
    self.jobs = []
2400
    self.opts = opts
2401
    self.feedback_fn = feedback_fn
2402

    
2403
  def QueueJob(self, name, *ops):
2404
    """Record a job for later submit.
2405

2406
    @type name: string
2407
    @param name: a description of the job, will be used in WaitJobSet
2408
    """
2409
    SetGenericOpcodeOpts(ops, self.opts)
2410
    self.queue.append((name, ops))
2411

    
2412
  def SubmitPending(self, each=False):
2413
    """Submit all pending jobs.
2414

2415
    """
2416
    if each:
2417
      results = []
2418
      for row in self.queue:
2419
        # SubmitJob will remove the success status, but raise an exception if
2420
        # the submission fails, so we'll notice that anyway.
2421
        results.append([True, self.cl.SubmitJob(row[1])])
2422
    else:
2423
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2424
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2425
                                                            self.queue)):
2426
      self.jobs.append((idx, status, data, name))
2427

    
2428
  def _ChooseJob(self):
2429
    """Choose a non-waiting/queued job to poll next.
2430

2431
    """
2432
    assert self.jobs, "_ChooseJob called with empty job list"
2433

    
2434
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2435
    assert result
2436

    
2437
    for job_data, status in zip(self.jobs, result):
2438
      if (isinstance(status, list) and status and
2439
          status[0] in (constants.JOB_STATUS_QUEUED,
2440
                        constants.JOB_STATUS_WAITLOCK,
2441
                        constants.JOB_STATUS_CANCELING)):
2442
        # job is still present and waiting
2443
        continue
2444
      # good candidate found (either running job or lost job)
2445
      self.jobs.remove(job_data)
2446
      return job_data
2447

    
2448
    # no job found
2449
    return self.jobs.pop(0)
2450

    
2451
  def GetResults(self):
2452
    """Wait for and return the results of all jobs.
2453

2454
    @rtype: list
2455
    @return: list of tuples (success, job results), in the same order
2456
        as the submitted jobs; if a job has failed, instead of the result
2457
        there will be the error message
2458

2459
    """
2460
    if not self.jobs:
2461
      self.SubmitPending()
2462
    results = []
2463
    if self.verbose:
2464
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2465
      if ok_jobs:
2466
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2467

    
2468
    # first, remove any non-submitted jobs
2469
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2470
    for idx, _, jid, name in failures:
2471
      ToStderr("Failed to submit job for %s: %s", name, jid)
2472
      results.append((idx, False, jid))
2473

    
2474
    while self.jobs:
2475
      (idx, _, jid, name) = self._ChooseJob()
2476
      ToStdout("Waiting for job %s for %s...", jid, name)
2477
      try:
2478
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2479
        success = True
2480
      except errors.JobLost, err:
2481
        _, job_result = FormatError(err)
2482
        ToStderr("Job %s for %s has been archived, cannot check its result",
2483
                 jid, name)
2484
        success = False
2485
      except (errors.GenericError, luxi.ProtocolError), err:
2486
        _, job_result = FormatError(err)
2487
        success = False
2488
        # the error message will always be shown, verbose or not
2489
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2490

    
2491
      results.append((idx, success, job_result))
2492

    
2493
    # sort based on the index, then drop it
2494
    results.sort()
2495
    results = [i[1:] for i in results]
2496

    
2497
    return results
2498

    
2499
  def WaitOrShow(self, wait):
2500
    """Wait for job results or only print the job IDs.
2501

2502
    @type wait: boolean
2503
    @param wait: whether to wait or not
2504

2505
    """
2506
    if wait:
2507
      return self.GetResults()
2508
    else:
2509
      if not self.jobs:
2510
        self.SubmitPending()
2511
      for _, status, result, name in self.jobs:
2512
        if status:
2513
          ToStdout("%s: %s", result, name)
2514
        else:
2515
          ToStderr("Failure for %s: %s", name, result)
2516
      return [row[1:3] for row in self.jobs]