Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 667dbd6b

History | View | Annotate | Download (82.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42

    
43
from optparse import (OptionParser, TitledHelpFormatter,
44
                      Option, OptionValueError)
45

    
46

    
47
__all__ = [
48
  # Command line options
49
  "ADD_UIDS_OPT",
50
  "ALLOCATABLE_OPT",
51
  "ALL_OPT",
52
  "AUTO_PROMOTE_OPT",
53
  "AUTO_REPLACE_OPT",
54
  "BACKEND_OPT",
55
  "BLK_OS_OPT",
56
  "CAPAB_MASTER_OPT",
57
  "CAPAB_VM_OPT",
58
  "CLEANUP_OPT",
59
  "CLUSTER_DOMAIN_SECRET_OPT",
60
  "CONFIRM_OPT",
61
  "CP_SIZE_OPT",
62
  "DEBUG_OPT",
63
  "DEBUG_SIMERR_OPT",
64
  "DISKIDX_OPT",
65
  "DISK_OPT",
66
  "DISK_TEMPLATE_OPT",
67
  "DRAINED_OPT",
68
  "DRY_RUN_OPT",
69
  "DRBD_HELPER_OPT",
70
  "EARLY_RELEASE_OPT",
71
  "ENABLED_HV_OPT",
72
  "ERROR_CODES_OPT",
73
  "FIELDS_OPT",
74
  "FILESTORE_DIR_OPT",
75
  "FILESTORE_DRIVER_OPT",
76
  "FORCE_OPT",
77
  "FORCE_VARIANT_OPT",
78
  "GLOBAL_FILEDIR_OPT",
79
  "HID_OS_OPT",
80
  "HVLIST_OPT",
81
  "HVOPTS_OPT",
82
  "HYPERVISOR_OPT",
83
  "IALLOCATOR_OPT",
84
  "DEFAULT_IALLOCATOR_OPT",
85
  "IDENTIFY_DEFAULTS_OPT",
86
  "IGNORE_CONSIST_OPT",
87
  "IGNORE_FAILURES_OPT",
88
  "IGNORE_OFFLINE_OPT",
89
  "IGNORE_REMOVE_FAILURES_OPT",
90
  "IGNORE_SECONDARIES_OPT",
91
  "IGNORE_SIZE_OPT",
92
  "INTERVAL_OPT",
93
  "MAC_PREFIX_OPT",
94
  "MAINTAIN_NODE_HEALTH_OPT",
95
  "MASTER_NETDEV_OPT",
96
  "MC_OPT",
97
  "MIGRATION_MODE_OPT",
98
  "NET_OPT",
99
  "NEW_CLUSTER_CERT_OPT",
100
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
101
  "NEW_CONFD_HMAC_KEY_OPT",
102
  "NEW_RAPI_CERT_OPT",
103
  "NEW_SECONDARY_OPT",
104
  "NIC_PARAMS_OPT",
105
  "NODE_LIST_OPT",
106
  "NODE_PLACEMENT_OPT",
107
  "NODEGROUP_OPT",
108
  "NODE_PARAMS_OPT",
109
  "NODRBD_STORAGE_OPT",
110
  "NOHDR_OPT",
111
  "NOIPCHECK_OPT",
112
  "NO_INSTALL_OPT",
113
  "NONAMECHECK_OPT",
114
  "NOLVM_STORAGE_OPT",
115
  "NOMODIFY_ETCHOSTS_OPT",
116
  "NOMODIFY_SSH_SETUP_OPT",
117
  "NONICS_OPT",
118
  "NONLIVE_OPT",
119
  "NONPLUS1_OPT",
120
  "NOSHUTDOWN_OPT",
121
  "NOSTART_OPT",
122
  "NOSSH_KEYCHECK_OPT",
123
  "NOVOTING_OPT",
124
  "NWSYNC_OPT",
125
  "ON_PRIMARY_OPT",
126
  "ON_SECONDARY_OPT",
127
  "OFFLINE_OPT",
128
  "OSPARAMS_OPT",
129
  "OS_OPT",
130
  "OS_SIZE_OPT",
131
  "PREALLOC_WIPE_DISKS_OPT",
132
  "PRIMARY_IP_VERSION_OPT",
133
  "PRIORITY_OPT",
134
  "RAPI_CERT_OPT",
135
  "READD_OPT",
136
  "REBOOT_TYPE_OPT",
137
  "REMOVE_INSTANCE_OPT",
138
  "REMOVE_UIDS_OPT",
139
  "RESERVED_LVS_OPT",
140
  "ROMAN_OPT",
141
  "SECONDARY_IP_OPT",
142
  "SELECT_OS_OPT",
143
  "SEP_OPT",
144
  "SHOWCMD_OPT",
145
  "SHUTDOWN_TIMEOUT_OPT",
146
  "SINGLE_NODE_OPT",
147
  "SRC_DIR_OPT",
148
  "SRC_NODE_OPT",
149
  "SUBMIT_OPT",
150
  "STATIC_OPT",
151
  "SYNC_OPT",
152
  "TAG_SRC_OPT",
153
  "TIMEOUT_OPT",
154
  "UIDPOOL_OPT",
155
  "USEUNITS_OPT",
156
  "USE_REPL_NET_OPT",
157
  "VERBOSE_OPT",
158
  "VG_NAME_OPT",
159
  "YES_DOIT_OPT",
160
  # Generic functions for CLI programs
161
  "GenericMain",
162
  "GenericInstanceCreate",
163
  "GetClient",
164
  "GetOnlineNodes",
165
  "JobExecutor",
166
  "JobSubmittedException",
167
  "ParseTimespec",
168
  "RunWhileClusterStopped",
169
  "SubmitOpCode",
170
  "SubmitOrSend",
171
  "UsesRPC",
172
  # Formatting functions
173
  "ToStderr", "ToStdout",
174
  "FormatError",
175
  "GenerateTable",
176
  "AskUser",
177
  "FormatTimestamp",
178
  "FormatLogMessage",
179
  # Tags functions
180
  "ListTags",
181
  "AddTags",
182
  "RemoveTags",
183
  # command line options support infrastructure
184
  "ARGS_MANY_INSTANCES",
185
  "ARGS_MANY_NODES",
186
  "ARGS_MANY_GROUPS",
187
  "ARGS_NONE",
188
  "ARGS_ONE_INSTANCE",
189
  "ARGS_ONE_NODE",
190
  "ARGS_ONE_GROUP",
191
  "ARGS_ONE_OS",
192
  "ArgChoice",
193
  "ArgCommand",
194
  "ArgFile",
195
  "ArgGroup",
196
  "ArgHost",
197
  "ArgInstance",
198
  "ArgJobId",
199
  "ArgNode",
200
  "ArgOs",
201
  "ArgSuggest",
202
  "ArgUnknown",
203
  "OPT_COMPL_INST_ADD_NODES",
204
  "OPT_COMPL_MANY_NODES",
205
  "OPT_COMPL_ONE_IALLOCATOR",
206
  "OPT_COMPL_ONE_INSTANCE",
207
  "OPT_COMPL_ONE_NODE",
208
  "OPT_COMPL_ONE_NODEGROUP",
209
  "OPT_COMPL_ONE_OS",
210
  "cli_option",
211
  "SplitNodeOption",
212
  "CalculateOSNames",
213
  "ParseFields",
214
  "COMMON_CREATE_OPTS",
215
  ]
216

    
217
NO_PREFIX = "no_"
218
UN_PREFIX = "-"
219

    
220
#: Priorities (sorted)
221
_PRIORITY_NAMES = [
222
  ("low", constants.OP_PRIO_LOW),
223
  ("normal", constants.OP_PRIO_NORMAL),
224
  ("high", constants.OP_PRIO_HIGH),
225
  ]
226

    
227
#: Priority dictionary for easier lookup
228
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
229
# we migrate to Python 2.6
230
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
231

    
232

    
233
class _Argument:
234
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
235
    self.min = min
236
    self.max = max
237

    
238
  def __repr__(self):
239
    return ("<%s min=%s max=%s>" %
240
            (self.__class__.__name__, self.min, self.max))
241

    
242

    
243
class ArgSuggest(_Argument):
244
  """Suggesting argument.
245

246
  Value can be any of the ones passed to the constructor.
247

248
  """
249
  # pylint: disable-msg=W0622
250
  def __init__(self, min=0, max=None, choices=None):
251
    _Argument.__init__(self, min=min, max=max)
252
    self.choices = choices
253

    
254
  def __repr__(self):
255
    return ("<%s min=%s max=%s choices=%r>" %
256
            (self.__class__.__name__, self.min, self.max, self.choices))
257

    
258

    
259
class ArgChoice(ArgSuggest):
260
  """Choice argument.
261

262
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
263
  but value must be one of the choices.
264

265
  """
266

    
267

    
268
class ArgUnknown(_Argument):
269
  """Unknown argument to program (e.g. determined at runtime).
270

271
  """
272

    
273

    
274
class ArgInstance(_Argument):
275
  """Instances argument.
276

277
  """
278

    
279

    
280
class ArgNode(_Argument):
281
  """Node argument.
282

283
  """
284

    
285

    
286
class ArgGroup(_Argument):
287
  """Node group argument.
288

289
  """
290

    
291

    
292
class ArgJobId(_Argument):
293
  """Job ID argument.
294

295
  """
296

    
297

    
298
class ArgFile(_Argument):
299
  """File path argument.
300

301
  """
302

    
303

    
304
class ArgCommand(_Argument):
305
  """Command argument.
306

307
  """
308

    
309

    
310
class ArgHost(_Argument):
311
  """Host argument.
312

313
  """
314

    
315

    
316
class ArgOs(_Argument):
317
  """OS argument.
318

319
  """
320

    
321

    
322
ARGS_NONE = []
323
ARGS_MANY_INSTANCES = [ArgInstance()]
324
ARGS_MANY_NODES = [ArgNode()]
325
ARGS_MANY_GROUPS = [ArgGroup()]
326
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
327
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
328
ARGS_ONE_GROUP = [ArgInstance(min=1, max=1)]
329
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
330

    
331

    
332
def _ExtractTagsObject(opts, args):
333
  """Extract the tag type object.
334

335
  Note that this function will modify its args parameter.
336

337
  """
338
  if not hasattr(opts, "tag_type"):
339
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
340
  kind = opts.tag_type
341
  if kind == constants.TAG_CLUSTER:
342
    retval = kind, kind
343
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
344
    if not args:
345
      raise errors.OpPrereqError("no arguments passed to the command")
346
    name = args.pop(0)
347
    retval = kind, name
348
  else:
349
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
350
  return retval
351

    
352

    
353
def _ExtendTags(opts, args):
354
  """Extend the args if a source file has been given.
355

356
  This function will extend the tags with the contents of the file
357
  passed in the 'tags_source' attribute of the opts parameter. A file
358
  named '-' will be replaced by stdin.
359

360
  """
361
  fname = opts.tags_source
362
  if fname is None:
363
    return
364
  if fname == "-":
365
    new_fh = sys.stdin
366
  else:
367
    new_fh = open(fname, "r")
368
  new_data = []
369
  try:
370
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
371
    # because of python bug 1633941
372
    while True:
373
      line = new_fh.readline()
374
      if not line:
375
        break
376
      new_data.append(line.strip())
377
  finally:
378
    new_fh.close()
379
  args.extend(new_data)
380

    
381

    
382
def ListTags(opts, args):
383
  """List the tags on a given object.
384

385
  This is a generic implementation that knows how to deal with all
386
  three cases of tag objects (cluster, node, instance). The opts
387
  argument is expected to contain a tag_type field denoting what
388
  object type we work on.
389

390
  """
391
  kind, name = _ExtractTagsObject(opts, args)
392
  cl = GetClient()
393
  result = cl.QueryTags(kind, name)
394
  result = list(result)
395
  result.sort()
396
  for tag in result:
397
    ToStdout(tag)
398

    
399

    
400
def AddTags(opts, args):
401
  """Add tags on a given object.
402

403
  This is a generic implementation that knows how to deal with all
404
  three cases of tag objects (cluster, node, instance). The opts
405
  argument is expected to contain a tag_type field denoting what
406
  object type we work on.
407

408
  """
409
  kind, name = _ExtractTagsObject(opts, args)
410
  _ExtendTags(opts, args)
411
  if not args:
412
    raise errors.OpPrereqError("No tags to be added")
413
  op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
414
  SubmitOpCode(op, opts=opts)
415

    
416

    
417
def RemoveTags(opts, args):
418
  """Remove tags from a given object.
419

420
  This is a generic implementation that knows how to deal with all
421
  three cases of tag objects (cluster, node, instance). The opts
422
  argument is expected to contain a tag_type field denoting what
423
  object type we work on.
424

425
  """
426
  kind, name = _ExtractTagsObject(opts, args)
427
  _ExtendTags(opts, args)
428
  if not args:
429
    raise errors.OpPrereqError("No tags to be removed")
430
  op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
431
  SubmitOpCode(op, opts=opts)
432

    
433

    
434
def check_unit(option, opt, value): # pylint: disable-msg=W0613
435
  """OptParsers custom converter for units.
436

437
  """
438
  try:
439
    return utils.ParseUnit(value)
440
  except errors.UnitParseError, err:
441
    raise OptionValueError("option %s: %s" % (opt, err))
442

    
443

    
444
def _SplitKeyVal(opt, data):
445
  """Convert a KeyVal string into a dict.
446

447
  This function will convert a key=val[,...] string into a dict. Empty
448
  values will be converted specially: keys which have the prefix 'no_'
449
  will have the value=False and the prefix stripped, the others will
450
  have value=True.
451

452
  @type opt: string
453
  @param opt: a string holding the option name for which we process the
454
      data, used in building error messages
455
  @type data: string
456
  @param data: a string of the format key=val,key=val,...
457
  @rtype: dict
458
  @return: {key=val, key=val}
459
  @raises errors.ParameterError: if there are duplicate keys
460

461
  """
462
  kv_dict = {}
463
  if data:
464
    for elem in utils.UnescapeAndSplit(data, sep=","):
465
      if "=" in elem:
466
        key, val = elem.split("=", 1)
467
      else:
468
        if elem.startswith(NO_PREFIX):
469
          key, val = elem[len(NO_PREFIX):], False
470
        elif elem.startswith(UN_PREFIX):
471
          key, val = elem[len(UN_PREFIX):], None
472
        else:
473
          key, val = elem, True
474
      if key in kv_dict:
475
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
476
                                    (key, opt))
477
      kv_dict[key] = val
478
  return kv_dict
479

    
480

    
481
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
482
  """Custom parser for ident:key=val,key=val options.
483

484
  This will store the parsed values as a tuple (ident, {key: val}). As such,
485
  multiple uses of this option via action=append is possible.
486

487
  """
488
  if ":" not in value:
489
    ident, rest = value, ''
490
  else:
491
    ident, rest = value.split(":", 1)
492

    
493
  if ident.startswith(NO_PREFIX):
494
    if rest:
495
      msg = "Cannot pass options when removing parameter groups: %s" % value
496
      raise errors.ParameterError(msg)
497
    retval = (ident[len(NO_PREFIX):], False)
498
  elif ident.startswith(UN_PREFIX):
499
    if rest:
500
      msg = "Cannot pass options when removing parameter groups: %s" % value
501
      raise errors.ParameterError(msg)
502
    retval = (ident[len(UN_PREFIX):], None)
503
  else:
504
    kv_dict = _SplitKeyVal(opt, rest)
505
    retval = (ident, kv_dict)
506
  return retval
507

    
508

    
509
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
510
  """Custom parser class for key=val,key=val options.
511

512
  This will store the parsed values as a dict {key: val}.
513

514
  """
515
  return _SplitKeyVal(opt, value)
516

    
517

    
518
def check_bool(option, opt, value): # pylint: disable-msg=W0613
519
  """Custom parser for yes/no options.
520

521
  This will store the parsed value as either True or False.
522

523
  """
524
  value = value.lower()
525
  if value == constants.VALUE_FALSE or value == "no":
526
    return False
527
  elif value == constants.VALUE_TRUE or value == "yes":
528
    return True
529
  else:
530
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
531

    
532

    
533
# completion_suggestion is normally a list. Using numeric values not evaluating
534
# to False for dynamic completion.
535
(OPT_COMPL_MANY_NODES,
536
 OPT_COMPL_ONE_NODE,
537
 OPT_COMPL_ONE_INSTANCE,
538
 OPT_COMPL_ONE_OS,
539
 OPT_COMPL_ONE_IALLOCATOR,
540
 OPT_COMPL_INST_ADD_NODES,
541
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
542

    
543
OPT_COMPL_ALL = frozenset([
544
  OPT_COMPL_MANY_NODES,
545
  OPT_COMPL_ONE_NODE,
546
  OPT_COMPL_ONE_INSTANCE,
547
  OPT_COMPL_ONE_OS,
548
  OPT_COMPL_ONE_IALLOCATOR,
549
  OPT_COMPL_INST_ADD_NODES,
550
  OPT_COMPL_ONE_NODEGROUP,
551
  ])
552

    
553

    
554
class CliOption(Option):
555
  """Custom option class for optparse.
556

557
  """
558
  ATTRS = Option.ATTRS + [
559
    "completion_suggest",
560
    ]
561
  TYPES = Option.TYPES + (
562
    "identkeyval",
563
    "keyval",
564
    "unit",
565
    "bool",
566
    )
567
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
568
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
569
  TYPE_CHECKER["keyval"] = check_key_val
570
  TYPE_CHECKER["unit"] = check_unit
571
  TYPE_CHECKER["bool"] = check_bool
572

    
573

    
574
# optparse.py sets make_option, so we do it for our own option class, too
575
cli_option = CliOption
576

    
577

    
578
_YORNO = "yes|no"
579

    
580
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
581
                       help="Increase debugging level")
582

    
583
NOHDR_OPT = cli_option("--no-headers", default=False,
584
                       action="store_true", dest="no_headers",
585
                       help="Don't display column headers")
586

    
587
SEP_OPT = cli_option("--separator", default=None,
588
                     action="store", dest="separator",
589
                     help=("Separator between output fields"
590
                           " (defaults to one space)"))
591

    
592
USEUNITS_OPT = cli_option("--units", default=None,
593
                          dest="units", choices=('h', 'm', 'g', 't'),
594
                          help="Specify units for output (one of hmgt)")
595

    
596
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
597
                        type="string", metavar="FIELDS",
598
                        help="Comma separated list of output fields")
599

    
600
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
601
                       default=False, help="Force the operation")
602

    
603
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
604
                         default=False, help="Do not require confirmation")
605

    
606
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
607
                                  action="store_true", default=False,
608
                                  help=("Ignore offline nodes and do as much"
609
                                        " as possible"))
610

    
611
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
612
                         default=None, help="File with tag names")
613

    
614
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
615
                        default=False, action="store_true",
616
                        help=("Submit the job and return the job ID, but"
617
                              " don't wait for the job to finish"))
618

    
619
SYNC_OPT = cli_option("--sync", dest="do_locking",
620
                      default=False, action="store_true",
621
                      help=("Grab locks while doing the queries"
622
                            " in order to ensure more consistent results"))
623

    
624
DRY_RUN_OPT = cli_option("--dry-run", default=False,
625
                         action="store_true",
626
                         help=("Do not execute the operation, just run the"
627
                               " check steps and verify it it could be"
628
                               " executed"))
629

    
630
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
631
                         action="store_true",
632
                         help="Increase the verbosity of the operation")
633

    
634
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
635
                              action="store_true", dest="simulate_errors",
636
                              help="Debugging option that makes the operation"
637
                              " treat most runtime checks as failed")
638

    
639
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
640
                        default=True, action="store_false",
641
                        help="Don't wait for sync (DANGEROUS!)")
642

    
643
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
644
                               help="Custom disk setup (diskless, file,"
645
                               " plain or drbd)",
646
                               default=None, metavar="TEMPL",
647
                               choices=list(constants.DISK_TEMPLATES))
648

    
649
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
650
                        help="Do not create any network cards for"
651
                        " the instance")
652

    
653
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
654
                               help="Relative path under default cluster-wide"
655
                               " file storage dir to store file-based disks",
656
                               default=None, metavar="<DIR>")
657

    
658
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
659
                                  help="Driver to use for image files",
660
                                  default="loop", metavar="<DRIVER>",
661
                                  choices=list(constants.FILE_DRIVER))
662

    
663
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
664
                            help="Select nodes for the instance automatically"
665
                            " using the <NAME> iallocator plugin",
666
                            default=None, type="string",
667
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
668

    
669
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
670
                            metavar="<NAME>",
671
                            help="Set the default instance allocator plugin",
672
                            default=None, type="string",
673
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
674

    
675
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
676
                    metavar="<os>",
677
                    completion_suggest=OPT_COMPL_ONE_OS)
678

    
679
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
680
                         type="keyval", default={},
681
                         help="OS parameters")
682

    
683
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
684
                               action="store_true", default=False,
685
                               help="Force an unknown variant")
686

    
687
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
688
                            action="store_true", default=False,
689
                            help="Do not install the OS (will"
690
                            " enable no-start)")
691

    
692
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
693
                         type="keyval", default={},
694
                         help="Backend parameters")
695

    
696
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
697
                         default={}, dest="hvparams",
698
                         help="Hypervisor parameters")
699

    
700
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
701
                            help="Hypervisor and hypervisor options, in the"
702
                            " format hypervisor:option=value,option=value,...",
703
                            default=None, type="identkeyval")
704

    
705
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
706
                        help="Hypervisor and hypervisor options, in the"
707
                        " format hypervisor:option=value,option=value,...",
708
                        default=[], action="append", type="identkeyval")
709

    
710
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
711
                           action="store_false",
712
                           help="Don't check that the instance's IP"
713
                           " is alive")
714

    
715
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
716
                             default=True, action="store_false",
717
                             help="Don't check that the instance's name"
718
                             " is resolvable")
719

    
720
NET_OPT = cli_option("--net",
721
                     help="NIC parameters", default=[],
722
                     dest="nics", action="append", type="identkeyval")
723

    
724
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
725
                      dest="disks", action="append", type="identkeyval")
726

    
727
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
728
                         help="Comma-separated list of disks"
729
                         " indices to act on (e.g. 0,2) (optional,"
730
                         " defaults to all disks)")
731

    
732
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
733
                         help="Enforces a single-disk configuration using the"
734
                         " given disk size, in MiB unless a suffix is used",
735
                         default=None, type="unit", metavar="<size>")
736

    
737
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
738
                                dest="ignore_consistency",
739
                                action="store_true", default=False,
740
                                help="Ignore the consistency of the disks on"
741
                                " the secondary")
742

    
743
NONLIVE_OPT = cli_option("--non-live", dest="live",
744
                         default=True, action="store_false",
745
                         help="Do a non-live migration (this usually means"
746
                         " freeze the instance, save the state, transfer and"
747
                         " only then resume running on the secondary node)")
748

    
749
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
750
                                default=None,
751
                                choices=list(constants.HT_MIGRATION_MODES),
752
                                help="Override default migration mode (choose"
753
                                " either live or non-live")
754

    
755
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
756
                                help="Target node and optional secondary node",
757
                                metavar="<pnode>[:<snode>]",
758
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
759

    
760
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
761
                           action="append", metavar="<node>",
762
                           help="Use only this node (can be used multiple"
763
                           " times, if not given defaults to all nodes)",
764
                           completion_suggest=OPT_COMPL_ONE_NODE)
765

    
766
NODEGROUP_OPT = cli_option("-g", "--node-group",
767
                           dest="nodegroup",
768
                           help="Node group (name or uuid)",
769
                           metavar="<nodegroup>",
770
                           default=None, type="string",
771
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
772

    
773
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
774
                             metavar="<node>",
775
                             completion_suggest=OPT_COMPL_ONE_NODE)
776

    
777
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
778
                         action="store_false",
779
                         help="Don't start the instance after creation")
780

    
781
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
782
                         action="store_true", default=False,
783
                         help="Show command instead of executing it")
784

    
785
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
786
                         default=False, action="store_true",
787
                         help="Instead of performing the migration, try to"
788
                         " recover from a failed cleanup. This is safe"
789
                         " to run even if the instance is healthy, but it"
790
                         " will create extra replication traffic and "
791
                         " disrupt briefly the replication (like during the"
792
                         " migration")
793

    
794
STATIC_OPT = cli_option("-s", "--static", dest="static",
795
                        action="store_true", default=False,
796
                        help="Only show configuration data, not runtime data")
797

    
798
ALL_OPT = cli_option("--all", dest="show_all",
799
                     default=False, action="store_true",
800
                     help="Show info on all instances on the cluster."
801
                     " This can take a long time to run, use wisely")
802

    
803
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
804
                           action="store_true", default=False,
805
                           help="Interactive OS reinstall, lists available"
806
                           " OS templates for selection")
807

    
808
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
809
                                 action="store_true", default=False,
810
                                 help="Remove the instance from the cluster"
811
                                 " configuration even if there are failures"
812
                                 " during the removal process")
813

    
814
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
815
                                        dest="ignore_remove_failures",
816
                                        action="store_true", default=False,
817
                                        help="Remove the instance from the"
818
                                        " cluster configuration even if there"
819
                                        " are failures during the removal"
820
                                        " process")
821

    
822
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
823
                                 action="store_true", default=False,
824
                                 help="Remove the instance from the cluster")
825

    
826
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
827
                               help="Specifies the new secondary node",
828
                               metavar="NODE", default=None,
829
                               completion_suggest=OPT_COMPL_ONE_NODE)
830

    
831
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
832
                            default=False, action="store_true",
833
                            help="Replace the disk(s) on the primary"
834
                            " node (only for the drbd template)")
835

    
836
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
837
                              default=False, action="store_true",
838
                              help="Replace the disk(s) on the secondary"
839
                              " node (only for the drbd template)")
840

    
841
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
842
                              default=False, action="store_true",
843
                              help="Lock all nodes and auto-promote as needed"
844
                              " to MC status")
845

    
846
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
847
                              default=False, action="store_true",
848
                              help="Automatically replace faulty disks"
849
                              " (only for the drbd template)")
850

    
851
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
852
                             default=False, action="store_true",
853
                             help="Ignore current recorded size"
854
                             " (useful for forcing activation when"
855
                             " the recorded size is wrong)")
856

    
857
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
858
                          metavar="<node>",
859
                          completion_suggest=OPT_COMPL_ONE_NODE)
860

    
861
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
862
                         metavar="<dir>")
863

    
864
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
865
                              help="Specify the secondary ip for the node",
866
                              metavar="ADDRESS", default=None)
867

    
868
READD_OPT = cli_option("--readd", dest="readd",
869
                       default=False, action="store_true",
870
                       help="Readd old node after replacing it")
871

    
872
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
873
                                default=True, action="store_false",
874
                                help="Disable SSH key fingerprint checking")
875

    
876

    
877
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
878
                    type="bool", default=None, metavar=_YORNO,
879
                    help="Set the master_candidate flag on the node")
880

    
881
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
882
                         type="bool", default=None,
883
                         help="Set the offline flag on the node")
884

    
885
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
886
                         type="bool", default=None,
887
                         help="Set the drained flag on the node")
888

    
889
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
890
                    type="bool", default=None, metavar=_YORNO,
891
                    help="Set the master_capable flag on the node")
892

    
893
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
894
                    type="bool", default=None, metavar=_YORNO,
895
                    help="Set the vm_capable flag on the node")
896

    
897
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
898
                             type="bool", default=None, metavar=_YORNO,
899
                             help="Set the allocatable flag on a volume")
900

    
901
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
902
                               help="Disable support for lvm based instances"
903
                               " (cluster-wide)",
904
                               action="store_false", default=True)
905

    
906
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
907
                            dest="enabled_hypervisors",
908
                            help="Comma-separated list of hypervisors",
909
                            type="string", default=None)
910

    
911
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
912
                            type="keyval", default={},
913
                            help="NIC parameters")
914

    
915
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
916
                         dest="candidate_pool_size", type="int",
917
                         help="Set the candidate pool size")
918

    
919
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
920
                         help="Enables LVM and specifies the volume group"
921
                         " name (cluster-wide) for disk allocation [xenvg]",
922
                         metavar="VG", default=None)
923

    
924
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
925
                          help="Destroy cluster", action="store_true")
926

    
927
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
928
                          help="Skip node agreement check (dangerous)",
929
                          action="store_true", default=False)
930

    
931
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
932
                            help="Specify the mac prefix for the instance IP"
933
                            " addresses, in the format XX:XX:XX",
934
                            metavar="PREFIX",
935
                            default=None)
936

    
937
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
938
                               help="Specify the node interface (cluster-wide)"
939
                               " on which the master IP address will be added "
940
                               " [%s]" % constants.DEFAULT_BRIDGE,
941
                               metavar="NETDEV",
942
                               default=constants.DEFAULT_BRIDGE)
943

    
944
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
945
                                help="Specify the default directory (cluster-"
946
                                "wide) for storing the file-based disks [%s]" %
947
                                constants.DEFAULT_FILE_STORAGE_DIR,
948
                                metavar="DIR",
949
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
950

    
951
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
952
                                   help="Don't modify /etc/hosts",
953
                                   action="store_false", default=True)
954

    
955
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
956
                                    help="Don't initialize SSH keys",
957
                                    action="store_false", default=True)
958

    
959
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
960
                             help="Enable parseable error messages",
961
                             action="store_true", default=False)
962

    
963
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
964
                          help="Skip N+1 memory redundancy tests",
965
                          action="store_true", default=False)
966

    
967
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
968
                             help="Type of reboot: soft/hard/full",
969
                             default=constants.INSTANCE_REBOOT_HARD,
970
                             metavar="<REBOOT>",
971
                             choices=list(constants.REBOOT_TYPES))
972

    
973
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
974
                                    dest="ignore_secondaries",
975
                                    default=False, action="store_true",
976
                                    help="Ignore errors from secondaries")
977

    
978
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
979
                            action="store_false", default=True,
980
                            help="Don't shutdown the instance (unsafe)")
981

    
982
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
983
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
984
                         help="Maximum time to wait")
985

    
986
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
987
                         dest="shutdown_timeout", type="int",
988
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
989
                         help="Maximum time to wait for instance shutdown")
990

    
991
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
992
                          default=None,
993
                          help=("Number of seconds between repetions of the"
994
                                " command"))
995

    
996
EARLY_RELEASE_OPT = cli_option("--early-release",
997
                               dest="early_release", default=False,
998
                               action="store_true",
999
                               help="Release the locks on the secondary"
1000
                               " node(s) early")
1001

    
1002
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1003
                                  dest="new_cluster_cert",
1004
                                  default=False, action="store_true",
1005
                                  help="Generate a new cluster certificate")
1006

    
1007
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1008
                           default=None,
1009
                           help="File containing new RAPI certificate")
1010

    
1011
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1012
                               default=None, action="store_true",
1013
                               help=("Generate a new self-signed RAPI"
1014
                                     " certificate"))
1015

    
1016
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1017
                                    dest="new_confd_hmac_key",
1018
                                    default=False, action="store_true",
1019
                                    help=("Create a new HMAC key for %s" %
1020
                                          constants.CONFD))
1021

    
1022
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1023
                                       dest="cluster_domain_secret",
1024
                                       default=None,
1025
                                       help=("Load new new cluster domain"
1026
                                             " secret from file"))
1027

    
1028
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1029
                                           dest="new_cluster_domain_secret",
1030
                                           default=False, action="store_true",
1031
                                           help=("Create a new cluster domain"
1032
                                                 " secret"))
1033

    
1034
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1035
                              dest="use_replication_network",
1036
                              help="Whether to use the replication network"
1037
                              " for talking to the nodes",
1038
                              action="store_true", default=False)
1039

    
1040
MAINTAIN_NODE_HEALTH_OPT = \
1041
    cli_option("--maintain-node-health", dest="maintain_node_health",
1042
               metavar=_YORNO, default=None, type="bool",
1043
               help="Configure the cluster to automatically maintain node"
1044
               " health, by shutting down unknown instances, shutting down"
1045
               " unknown DRBD devices, etc.")
1046

    
1047
IDENTIFY_DEFAULTS_OPT = \
1048
    cli_option("--identify-defaults", dest="identify_defaults",
1049
               default=False, action="store_true",
1050
               help="Identify which saved instance parameters are equal to"
1051
               " the current cluster defaults and set them as such, instead"
1052
               " of marking them as overridden")
1053

    
1054
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1055
                         action="store", dest="uid_pool",
1056
                         help=("A list of user-ids or user-id"
1057
                               " ranges separated by commas"))
1058

    
1059
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1060
                          action="store", dest="add_uids",
1061
                          help=("A list of user-ids or user-id"
1062
                                " ranges separated by commas, to be"
1063
                                " added to the user-id pool"))
1064

    
1065
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1066
                             action="store", dest="remove_uids",
1067
                             help=("A list of user-ids or user-id"
1068
                                   " ranges separated by commas, to be"
1069
                                   " removed from the user-id pool"))
1070

    
1071
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1072
                             action="store", dest="reserved_lvs",
1073
                             help=("A comma-separated list of reserved"
1074
                                   " logical volumes names, that will be"
1075
                                   " ignored by cluster verify"))
1076

    
1077
ROMAN_OPT = cli_option("--roman",
1078
                       dest="roman_integers", default=False,
1079
                       action="store_true",
1080
                       help="Use roman numbers for positive integers")
1081

    
1082
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1083
                             action="store", default=None,
1084
                             help="Specifies usermode helper for DRBD")
1085

    
1086
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1087
                                action="store_false", default=True,
1088
                                help="Disable support for DRBD")
1089

    
1090
PRIMARY_IP_VERSION_OPT = \
1091
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1092
               action="store", dest="primary_ip_version",
1093
               metavar="%d|%d" % (constants.IP4_VERSION,
1094
                                  constants.IP6_VERSION),
1095
               help="Cluster-wide IP version for primary IP")
1096

    
1097
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1098
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1099
                          choices=_PRIONAME_TO_VALUE.keys(),
1100
                          help="Priority for opcode processing")
1101

    
1102
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1103
                        type="bool", default=None, metavar=_YORNO,
1104
                        help="Sets the hidden flag on the OS")
1105

    
1106
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1107
                        type="bool", default=None, metavar=_YORNO,
1108
                        help="Sets the blacklisted flag on the OS")
1109

    
1110
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1111
                                     type="bool", metavar=_YORNO,
1112
                                     dest="prealloc_wipe_disks",
1113
                                     help=("Wipe disks prior to instance"
1114
                                           " creation"))
1115

    
1116
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1117
                             type="keyval", default=None,
1118
                             help="Node parameters")
1119

    
1120

    
1121
#: Options provided by all commands
1122
COMMON_OPTS = [DEBUG_OPT]
1123

    
1124
# common options for creating instances. add and import then add their own
1125
# specific ones.
1126
COMMON_CREATE_OPTS = [
1127
  BACKEND_OPT,
1128
  DISK_OPT,
1129
  DISK_TEMPLATE_OPT,
1130
  FILESTORE_DIR_OPT,
1131
  FILESTORE_DRIVER_OPT,
1132
  HYPERVISOR_OPT,
1133
  IALLOCATOR_OPT,
1134
  NET_OPT,
1135
  NODE_PLACEMENT_OPT,
1136
  NOIPCHECK_OPT,
1137
  NONAMECHECK_OPT,
1138
  NONICS_OPT,
1139
  NWSYNC_OPT,
1140
  OSPARAMS_OPT,
1141
  OS_SIZE_OPT,
1142
  SUBMIT_OPT,
1143
  DRY_RUN_OPT,
1144
  PRIORITY_OPT,
1145
  ]
1146

    
1147

    
1148
def _ParseArgs(argv, commands, aliases):
1149
  """Parser for the command line arguments.
1150

1151
  This function parses the arguments and returns the function which
1152
  must be executed together with its (modified) arguments.
1153

1154
  @param argv: the command line
1155
  @param commands: dictionary with special contents, see the design
1156
      doc for cmdline handling
1157
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1158

1159
  """
1160
  if len(argv) == 0:
1161
    binary = "<command>"
1162
  else:
1163
    binary = argv[0].split("/")[-1]
1164

    
1165
  if len(argv) > 1 and argv[1] == "--version":
1166
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1167
             constants.RELEASE_VERSION)
1168
    # Quit right away. That way we don't have to care about this special
1169
    # argument. optparse.py does it the same.
1170
    sys.exit(0)
1171

    
1172
  if len(argv) < 2 or not (argv[1] in commands or
1173
                           argv[1] in aliases):
1174
    # let's do a nice thing
1175
    sortedcmds = commands.keys()
1176
    sortedcmds.sort()
1177

    
1178
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1179
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1180
    ToStdout("")
1181

    
1182
    # compute the max line length for cmd + usage
1183
    mlen = max([len(" %s" % cmd) for cmd in commands])
1184
    mlen = min(60, mlen) # should not get here...
1185

    
1186
    # and format a nice command list
1187
    ToStdout("Commands:")
1188
    for cmd in sortedcmds:
1189
      cmdstr = " %s" % (cmd,)
1190
      help_text = commands[cmd][4]
1191
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1192
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1193
      for line in help_lines:
1194
        ToStdout("%-*s   %s", mlen, "", line)
1195

    
1196
    ToStdout("")
1197

    
1198
    return None, None, None
1199

    
1200
  # get command, unalias it, and look it up in commands
1201
  cmd = argv.pop(1)
1202
  if cmd in aliases:
1203
    if cmd in commands:
1204
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1205
                                   " command" % cmd)
1206

    
1207
    if aliases[cmd] not in commands:
1208
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1209
                                   " command '%s'" % (cmd, aliases[cmd]))
1210

    
1211
    cmd = aliases[cmd]
1212

    
1213
  func, args_def, parser_opts, usage, description = commands[cmd]
1214
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1215
                        description=description,
1216
                        formatter=TitledHelpFormatter(),
1217
                        usage="%%prog %s %s" % (cmd, usage))
1218
  parser.disable_interspersed_args()
1219
  options, args = parser.parse_args()
1220

    
1221
  if not _CheckArguments(cmd, args_def, args):
1222
    return None, None, None
1223

    
1224
  return func, options, args
1225

    
1226

    
1227
def _CheckArguments(cmd, args_def, args):
1228
  """Verifies the arguments using the argument definition.
1229

1230
  Algorithm:
1231

1232
    1. Abort with error if values specified by user but none expected.
1233

1234
    1. For each argument in definition
1235

1236
      1. Keep running count of minimum number of values (min_count)
1237
      1. Keep running count of maximum number of values (max_count)
1238
      1. If it has an unlimited number of values
1239

1240
        1. Abort with error if it's not the last argument in the definition
1241

1242
    1. If last argument has limited number of values
1243

1244
      1. Abort with error if number of values doesn't match or is too large
1245

1246
    1. Abort with error if user didn't pass enough values (min_count)
1247

1248
  """
1249
  if args and not args_def:
1250
    ToStderr("Error: Command %s expects no arguments", cmd)
1251
    return False
1252

    
1253
  min_count = None
1254
  max_count = None
1255
  check_max = None
1256

    
1257
  last_idx = len(args_def) - 1
1258

    
1259
  for idx, arg in enumerate(args_def):
1260
    if min_count is None:
1261
      min_count = arg.min
1262
    elif arg.min is not None:
1263
      min_count += arg.min
1264

    
1265
    if max_count is None:
1266
      max_count = arg.max
1267
    elif arg.max is not None:
1268
      max_count += arg.max
1269

    
1270
    if idx == last_idx:
1271
      check_max = (arg.max is not None)
1272

    
1273
    elif arg.max is None:
1274
      raise errors.ProgrammerError("Only the last argument can have max=None")
1275

    
1276
  if check_max:
1277
    # Command with exact number of arguments
1278
    if (min_count is not None and max_count is not None and
1279
        min_count == max_count and len(args) != min_count):
1280
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1281
      return False
1282

    
1283
    # Command with limited number of arguments
1284
    if max_count is not None and len(args) > max_count:
1285
      ToStderr("Error: Command %s expects only %d argument(s)",
1286
               cmd, max_count)
1287
      return False
1288

    
1289
  # Command with some required arguments
1290
  if min_count is not None and len(args) < min_count:
1291
    ToStderr("Error: Command %s expects at least %d argument(s)",
1292
             cmd, min_count)
1293
    return False
1294

    
1295
  return True
1296

    
1297

    
1298
def SplitNodeOption(value):
1299
  """Splits the value of a --node option.
1300

1301
  """
1302
  if value and ':' in value:
1303
    return value.split(':', 1)
1304
  else:
1305
    return (value, None)
1306

    
1307

    
1308
def CalculateOSNames(os_name, os_variants):
1309
  """Calculates all the names an OS can be called, according to its variants.
1310

1311
  @type os_name: string
1312
  @param os_name: base name of the os
1313
  @type os_variants: list or None
1314
  @param os_variants: list of supported variants
1315
  @rtype: list
1316
  @return: list of valid names
1317

1318
  """
1319
  if os_variants:
1320
    return ['%s+%s' % (os_name, v) for v in os_variants]
1321
  else:
1322
    return [os_name]
1323

    
1324

    
1325
def ParseFields(selected, default):
1326
  """Parses the values of "--field"-like options.
1327

1328
  @type selected: string or None
1329
  @param selected: User-selected options
1330
  @type default: list
1331
  @param default: Default fields
1332

1333
  """
1334
  if selected is None:
1335
    return default
1336

    
1337
  if selected.startswith("+"):
1338
    return default + selected[1:].split(",")
1339

    
1340
  return selected.split(",")
1341

    
1342

    
1343
UsesRPC = rpc.RunWithRPC
1344

    
1345

    
1346
def AskUser(text, choices=None):
1347
  """Ask the user a question.
1348

1349
  @param text: the question to ask
1350

1351
  @param choices: list with elements tuples (input_char, return_value,
1352
      description); if not given, it will default to: [('y', True,
1353
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1354
      note that the '?' char is reserved for help
1355

1356
  @return: one of the return values from the choices list; if input is
1357
      not possible (i.e. not running with a tty, we return the last
1358
      entry from the list
1359

1360
  """
1361
  if choices is None:
1362
    choices = [('y', True, 'Perform the operation'),
1363
               ('n', False, 'Do not perform the operation')]
1364
  if not choices or not isinstance(choices, list):
1365
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1366
  for entry in choices:
1367
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1368
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1369

    
1370
  answer = choices[-1][1]
1371
  new_text = []
1372
  for line in text.splitlines():
1373
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1374
  text = "\n".join(new_text)
1375
  try:
1376
    f = file("/dev/tty", "a+")
1377
  except IOError:
1378
    return answer
1379
  try:
1380
    chars = [entry[0] for entry in choices]
1381
    chars[-1] = "[%s]" % chars[-1]
1382
    chars.append('?')
1383
    maps = dict([(entry[0], entry[1]) for entry in choices])
1384
    while True:
1385
      f.write(text)
1386
      f.write('\n')
1387
      f.write("/".join(chars))
1388
      f.write(": ")
1389
      line = f.readline(2).strip().lower()
1390
      if line in maps:
1391
        answer = maps[line]
1392
        break
1393
      elif line == '?':
1394
        for entry in choices:
1395
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1396
        f.write("\n")
1397
        continue
1398
  finally:
1399
    f.close()
1400
  return answer
1401

    
1402

    
1403
class JobSubmittedException(Exception):
1404
  """Job was submitted, client should exit.
1405

1406
  This exception has one argument, the ID of the job that was
1407
  submitted. The handler should print this ID.
1408

1409
  This is not an error, just a structured way to exit from clients.
1410

1411
  """
1412

    
1413

    
1414
def SendJob(ops, cl=None):
1415
  """Function to submit an opcode without waiting for the results.
1416

1417
  @type ops: list
1418
  @param ops: list of opcodes
1419
  @type cl: luxi.Client
1420
  @param cl: the luxi client to use for communicating with the master;
1421
             if None, a new client will be created
1422

1423
  """
1424
  if cl is None:
1425
    cl = GetClient()
1426

    
1427
  job_id = cl.SubmitJob(ops)
1428

    
1429
  return job_id
1430

    
1431

    
1432
def GenericPollJob(job_id, cbs, report_cbs):
1433
  """Generic job-polling function.
1434

1435
  @type job_id: number
1436
  @param job_id: Job ID
1437
  @type cbs: Instance of L{JobPollCbBase}
1438
  @param cbs: Data callbacks
1439
  @type report_cbs: Instance of L{JobPollReportCbBase}
1440
  @param report_cbs: Reporting callbacks
1441

1442
  """
1443
  prev_job_info = None
1444
  prev_logmsg_serial = None
1445

    
1446
  status = None
1447

    
1448
  while True:
1449
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1450
                                      prev_logmsg_serial)
1451
    if not result:
1452
      # job not found, go away!
1453
      raise errors.JobLost("Job with id %s lost" % job_id)
1454

    
1455
    if result == constants.JOB_NOTCHANGED:
1456
      report_cbs.ReportNotChanged(job_id, status)
1457

    
1458
      # Wait again
1459
      continue
1460

    
1461
    # Split result, a tuple of (field values, log entries)
1462
    (job_info, log_entries) = result
1463
    (status, ) = job_info
1464

    
1465
    if log_entries:
1466
      for log_entry in log_entries:
1467
        (serial, timestamp, log_type, message) = log_entry
1468
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1469
                                    log_type, message)
1470
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1471

    
1472
    # TODO: Handle canceled and archived jobs
1473
    elif status in (constants.JOB_STATUS_SUCCESS,
1474
                    constants.JOB_STATUS_ERROR,
1475
                    constants.JOB_STATUS_CANCELING,
1476
                    constants.JOB_STATUS_CANCELED):
1477
      break
1478

    
1479
    prev_job_info = job_info
1480

    
1481
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1482
  if not jobs:
1483
    raise errors.JobLost("Job with id %s lost" % job_id)
1484

    
1485
  status, opstatus, result = jobs[0]
1486

    
1487
  if status == constants.JOB_STATUS_SUCCESS:
1488
    return result
1489

    
1490
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1491
    raise errors.OpExecError("Job was canceled")
1492

    
1493
  has_ok = False
1494
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1495
    if status == constants.OP_STATUS_SUCCESS:
1496
      has_ok = True
1497
    elif status == constants.OP_STATUS_ERROR:
1498
      errors.MaybeRaise(msg)
1499

    
1500
      if has_ok:
1501
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1502
                                 (idx, msg))
1503

    
1504
      raise errors.OpExecError(str(msg))
1505

    
1506
  # default failure mode
1507
  raise errors.OpExecError(result)
1508

    
1509

    
1510
class JobPollCbBase:
1511
  """Base class for L{GenericPollJob} callbacks.
1512

1513
  """
1514
  def __init__(self):
1515
    """Initializes this class.
1516

1517
    """
1518

    
1519
  def WaitForJobChangeOnce(self, job_id, fields,
1520
                           prev_job_info, prev_log_serial):
1521
    """Waits for changes on a job.
1522

1523
    """
1524
    raise NotImplementedError()
1525

    
1526
  def QueryJobs(self, job_ids, fields):
1527
    """Returns the selected fields for the selected job IDs.
1528

1529
    @type job_ids: list of numbers
1530
    @param job_ids: Job IDs
1531
    @type fields: list of strings
1532
    @param fields: Fields
1533

1534
    """
1535
    raise NotImplementedError()
1536

    
1537

    
1538
class JobPollReportCbBase:
1539
  """Base class for L{GenericPollJob} reporting callbacks.
1540

1541
  """
1542
  def __init__(self):
1543
    """Initializes this class.
1544

1545
    """
1546

    
1547
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1548
    """Handles a log message.
1549

1550
    """
1551
    raise NotImplementedError()
1552

    
1553
  def ReportNotChanged(self, job_id, status):
1554
    """Called for if a job hasn't changed in a while.
1555

1556
    @type job_id: number
1557
    @param job_id: Job ID
1558
    @type status: string or None
1559
    @param status: Job status if available
1560

1561
    """
1562
    raise NotImplementedError()
1563

    
1564

    
1565
class _LuxiJobPollCb(JobPollCbBase):
1566
  def __init__(self, cl):
1567
    """Initializes this class.
1568

1569
    """
1570
    JobPollCbBase.__init__(self)
1571
    self.cl = cl
1572

    
1573
  def WaitForJobChangeOnce(self, job_id, fields,
1574
                           prev_job_info, prev_log_serial):
1575
    """Waits for changes on a job.
1576

1577
    """
1578
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1579
                                        prev_job_info, prev_log_serial)
1580

    
1581
  def QueryJobs(self, job_ids, fields):
1582
    """Returns the selected fields for the selected job IDs.
1583

1584
    """
1585
    return self.cl.QueryJobs(job_ids, fields)
1586

    
1587

    
1588
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1589
  def __init__(self, feedback_fn):
1590
    """Initializes this class.
1591

1592
    """
1593
    JobPollReportCbBase.__init__(self)
1594

    
1595
    self.feedback_fn = feedback_fn
1596

    
1597
    assert callable(feedback_fn)
1598

    
1599
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1600
    """Handles a log message.
1601

1602
    """
1603
    self.feedback_fn((timestamp, log_type, log_msg))
1604

    
1605
  def ReportNotChanged(self, job_id, status):
1606
    """Called if a job hasn't changed in a while.
1607

1608
    """
1609
    # Ignore
1610

    
1611

    
1612
class StdioJobPollReportCb(JobPollReportCbBase):
1613
  def __init__(self):
1614
    """Initializes this class.
1615

1616
    """
1617
    JobPollReportCbBase.__init__(self)
1618

    
1619
    self.notified_queued = False
1620
    self.notified_waitlock = False
1621

    
1622
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1623
    """Handles a log message.
1624

1625
    """
1626
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1627
             FormatLogMessage(log_type, log_msg))
1628

    
1629
  def ReportNotChanged(self, job_id, status):
1630
    """Called if a job hasn't changed in a while.
1631

1632
    """
1633
    if status is None:
1634
      return
1635

    
1636
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1637
      ToStderr("Job %s is waiting in queue", job_id)
1638
      self.notified_queued = True
1639

    
1640
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1641
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1642
      self.notified_waitlock = True
1643

    
1644

    
1645
def FormatLogMessage(log_type, log_msg):
1646
  """Formats a job message according to its type.
1647

1648
  """
1649
  if log_type != constants.ELOG_MESSAGE:
1650
    log_msg = str(log_msg)
1651

    
1652
  return utils.SafeEncode(log_msg)
1653

    
1654

    
1655
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1656
  """Function to poll for the result of a job.
1657

1658
  @type job_id: job identified
1659
  @param job_id: the job to poll for results
1660
  @type cl: luxi.Client
1661
  @param cl: the luxi client to use for communicating with the master;
1662
             if None, a new client will be created
1663

1664
  """
1665
  if cl is None:
1666
    cl = GetClient()
1667

    
1668
  if reporter is None:
1669
    if feedback_fn:
1670
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1671
    else:
1672
      reporter = StdioJobPollReportCb()
1673
  elif feedback_fn:
1674
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1675

    
1676
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1677

    
1678

    
1679
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1680
  """Legacy function to submit an opcode.
1681

1682
  This is just a simple wrapper over the construction of the processor
1683
  instance. It should be extended to better handle feedback and
1684
  interaction functions.
1685

1686
  """
1687
  if cl is None:
1688
    cl = GetClient()
1689

    
1690
  SetGenericOpcodeOpts([op], opts)
1691

    
1692
  job_id = SendJob([op], cl=cl)
1693

    
1694
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1695
                       reporter=reporter)
1696

    
1697
  return op_results[0]
1698

    
1699

    
1700
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1701
  """Wrapper around SubmitOpCode or SendJob.
1702

1703
  This function will decide, based on the 'opts' parameter, whether to
1704
  submit and wait for the result of the opcode (and return it), or
1705
  whether to just send the job and print its identifier. It is used in
1706
  order to simplify the implementation of the '--submit' option.
1707

1708
  It will also process the opcodes if we're sending the via SendJob
1709
  (otherwise SubmitOpCode does it).
1710

1711
  """
1712
  if opts and opts.submit_only:
1713
    job = [op]
1714
    SetGenericOpcodeOpts(job, opts)
1715
    job_id = SendJob(job, cl=cl)
1716
    raise JobSubmittedException(job_id)
1717
  else:
1718
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1719

    
1720

    
1721
def SetGenericOpcodeOpts(opcode_list, options):
1722
  """Processor for generic options.
1723

1724
  This function updates the given opcodes based on generic command
1725
  line options (like debug, dry-run, etc.).
1726

1727
  @param opcode_list: list of opcodes
1728
  @param options: command line options or None
1729
  @return: None (in-place modification)
1730

1731
  """
1732
  if not options:
1733
    return
1734
  for op in opcode_list:
1735
    op.debug_level = options.debug
1736
    if hasattr(options, "dry_run"):
1737
      op.dry_run = options.dry_run
1738
    if getattr(options, "priority", None) is not None:
1739
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1740

    
1741

    
1742
def GetClient():
1743
  # TODO: Cache object?
1744
  try:
1745
    client = luxi.Client()
1746
  except luxi.NoMasterError:
1747
    ss = ssconf.SimpleStore()
1748

    
1749
    # Try to read ssconf file
1750
    try:
1751
      ss.GetMasterNode()
1752
    except errors.ConfigurationError:
1753
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1754
                                 " not part of a cluster")
1755

    
1756
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1757
    if master != myself:
1758
      raise errors.OpPrereqError("This is not the master node, please connect"
1759
                                 " to node '%s' and rerun the command" %
1760
                                 master)
1761
    raise
1762
  return client
1763

    
1764

    
1765
def FormatError(err):
1766
  """Return a formatted error message for a given error.
1767

1768
  This function takes an exception instance and returns a tuple
1769
  consisting of two values: first, the recommended exit code, and
1770
  second, a string describing the error message (not
1771
  newline-terminated).
1772

1773
  """
1774
  retcode = 1
1775
  obuf = StringIO()
1776
  msg = str(err)
1777
  if isinstance(err, errors.ConfigurationError):
1778
    txt = "Corrupt configuration file: %s" % msg
1779
    logging.error(txt)
1780
    obuf.write(txt + "\n")
1781
    obuf.write("Aborting.")
1782
    retcode = 2
1783
  elif isinstance(err, errors.HooksAbort):
1784
    obuf.write("Failure: hooks execution failed:\n")
1785
    for node, script, out in err.args[0]:
1786
      if out:
1787
        obuf.write("  node: %s, script: %s, output: %s\n" %
1788
                   (node, script, out))
1789
      else:
1790
        obuf.write("  node: %s, script: %s (no output)\n" %
1791
                   (node, script))
1792
  elif isinstance(err, errors.HooksFailure):
1793
    obuf.write("Failure: hooks general failure: %s" % msg)
1794
  elif isinstance(err, errors.ResolverError):
1795
    this_host = netutils.Hostname.GetSysName()
1796
    if err.args[0] == this_host:
1797
      msg = "Failure: can't resolve my own hostname ('%s')"
1798
    else:
1799
      msg = "Failure: can't resolve hostname '%s'"
1800
    obuf.write(msg % err.args[0])
1801
  elif isinstance(err, errors.OpPrereqError):
1802
    if len(err.args) == 2:
1803
      obuf.write("Failure: prerequisites not met for this"
1804
               " operation:\nerror type: %s, error details:\n%s" %
1805
                 (err.args[1], err.args[0]))
1806
    else:
1807
      obuf.write("Failure: prerequisites not met for this"
1808
                 " operation:\n%s" % msg)
1809
  elif isinstance(err, errors.OpExecError):
1810
    obuf.write("Failure: command execution error:\n%s" % msg)
1811
  elif isinstance(err, errors.TagError):
1812
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1813
  elif isinstance(err, errors.JobQueueDrainError):
1814
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1815
               " accept new requests\n")
1816
  elif isinstance(err, errors.JobQueueFull):
1817
    obuf.write("Failure: the job queue is full and doesn't accept new"
1818
               " job submissions until old jobs are archived\n")
1819
  elif isinstance(err, errors.TypeEnforcementError):
1820
    obuf.write("Parameter Error: %s" % msg)
1821
  elif isinstance(err, errors.ParameterError):
1822
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1823
  elif isinstance(err, luxi.NoMasterError):
1824
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1825
               " and listening for connections?")
1826
  elif isinstance(err, luxi.TimeoutError):
1827
    obuf.write("Timeout while talking to the master daemon. Error:\n"
1828
               "%s" % msg)
1829
  elif isinstance(err, luxi.PermissionError):
1830
    obuf.write("It seems you don't have permissions to connect to the"
1831
               " master daemon.\nPlease retry as a different user.")
1832
  elif isinstance(err, luxi.ProtocolError):
1833
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1834
               "%s" % msg)
1835
  elif isinstance(err, errors.JobLost):
1836
    obuf.write("Error checking job status: %s" % msg)
1837
  elif isinstance(err, errors.GenericError):
1838
    obuf.write("Unhandled Ganeti error: %s" % msg)
1839
  elif isinstance(err, JobSubmittedException):
1840
    obuf.write("JobID: %s\n" % err.args[0])
1841
    retcode = 0
1842
  else:
1843
    obuf.write("Unhandled exception: %s" % msg)
1844
  return retcode, obuf.getvalue().rstrip('\n')
1845

    
1846

    
1847
def GenericMain(commands, override=None, aliases=None):
1848
  """Generic main function for all the gnt-* commands.
1849

1850
  Arguments:
1851
    - commands: a dictionary with a special structure, see the design doc
1852
                for command line handling.
1853
    - override: if not None, we expect a dictionary with keys that will
1854
                override command line options; this can be used to pass
1855
                options from the scripts to generic functions
1856
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1857

1858
  """
1859
  # save the program name and the entire command line for later logging
1860
  if sys.argv:
1861
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1862
    if len(sys.argv) >= 2:
1863
      binary += " " + sys.argv[1]
1864
      old_cmdline = " ".join(sys.argv[2:])
1865
    else:
1866
      old_cmdline = ""
1867
  else:
1868
    binary = "<unknown program>"
1869
    old_cmdline = ""
1870

    
1871
  if aliases is None:
1872
    aliases = {}
1873

    
1874
  try:
1875
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1876
  except errors.ParameterError, err:
1877
    result, err_msg = FormatError(err)
1878
    ToStderr(err_msg)
1879
    return 1
1880

    
1881
  if func is None: # parse error
1882
    return 1
1883

    
1884
  if override is not None:
1885
    for key, val in override.iteritems():
1886
      setattr(options, key, val)
1887

    
1888
  utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1889
                     stderr_logging=True, program=binary)
1890

    
1891
  if old_cmdline:
1892
    logging.info("run with arguments '%s'", old_cmdline)
1893
  else:
1894
    logging.info("run with no arguments")
1895

    
1896
  try:
1897
    result = func(options, args)
1898
  except (errors.GenericError, luxi.ProtocolError,
1899
          JobSubmittedException), err:
1900
    result, err_msg = FormatError(err)
1901
    logging.exception("Error during command processing")
1902
    ToStderr(err_msg)
1903

    
1904
  return result
1905

    
1906

    
1907
def ParseNicOption(optvalue):
1908
  """Parses the value of the --net option(s).
1909

1910
  """
1911
  try:
1912
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1913
  except (TypeError, ValueError), err:
1914
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1915

    
1916
  nics = [{}] * nic_max
1917
  for nidx, ndict in optvalue:
1918
    nidx = int(nidx)
1919

    
1920
    if not isinstance(ndict, dict):
1921
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1922
                                 " got %s" % (nidx, ndict))
1923

    
1924
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1925

    
1926
    nics[nidx] = ndict
1927

    
1928
  return nics
1929

    
1930

    
1931
def GenericInstanceCreate(mode, opts, args):
1932
  """Add an instance to the cluster via either creation or import.
1933

1934
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1935
  @param opts: the command line options selected by the user
1936
  @type args: list
1937
  @param args: should contain only one element, the new instance name
1938
  @rtype: int
1939
  @return: the desired exit code
1940

1941
  """
1942
  instance = args[0]
1943

    
1944
  (pnode, snode) = SplitNodeOption(opts.node)
1945

    
1946
  hypervisor = None
1947
  hvparams = {}
1948
  if opts.hypervisor:
1949
    hypervisor, hvparams = opts.hypervisor
1950

    
1951
  if opts.nics:
1952
    nics = ParseNicOption(opts.nics)
1953
  elif opts.no_nics:
1954
    # no nics
1955
    nics = []
1956
  elif mode == constants.INSTANCE_CREATE:
1957
    # default of one nic, all auto
1958
    nics = [{}]
1959
  else:
1960
    # mode == import
1961
    nics = []
1962

    
1963
  if opts.disk_template == constants.DT_DISKLESS:
1964
    if opts.disks or opts.sd_size is not None:
1965
      raise errors.OpPrereqError("Diskless instance but disk"
1966
                                 " information passed")
1967
    disks = []
1968
  else:
1969
    if (not opts.disks and not opts.sd_size
1970
        and mode == constants.INSTANCE_CREATE):
1971
      raise errors.OpPrereqError("No disk information specified")
1972
    if opts.disks and opts.sd_size is not None:
1973
      raise errors.OpPrereqError("Please use either the '--disk' or"
1974
                                 " '-s' option")
1975
    if opts.sd_size is not None:
1976
      opts.disks = [(0, {"size": opts.sd_size})]
1977

    
1978
    if opts.disks:
1979
      try:
1980
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
1981
      except ValueError, err:
1982
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
1983
      disks = [{}] * disk_max
1984
    else:
1985
      disks = []
1986
    for didx, ddict in opts.disks:
1987
      didx = int(didx)
1988
      if not isinstance(ddict, dict):
1989
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
1990
        raise errors.OpPrereqError(msg)
1991
      elif "size" in ddict:
1992
        if "adopt" in ddict:
1993
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
1994
                                     " (disk %d)" % didx)
1995
        try:
1996
          ddict["size"] = utils.ParseUnit(ddict["size"])
1997
        except ValueError, err:
1998
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
1999
                                     (didx, err))
2000
      elif "adopt" in ddict:
2001
        if mode == constants.INSTANCE_IMPORT:
2002
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2003
                                     " import")
2004
        ddict["size"] = 0
2005
      else:
2006
        raise errors.OpPrereqError("Missing size or adoption source for"
2007
                                   " disk %d" % didx)
2008
      disks[didx] = ddict
2009

    
2010
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2011
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2012

    
2013
  if mode == constants.INSTANCE_CREATE:
2014
    start = opts.start
2015
    os_type = opts.os
2016
    force_variant = opts.force_variant
2017
    src_node = None
2018
    src_path = None
2019
    no_install = opts.no_install
2020
    identify_defaults = False
2021
  elif mode == constants.INSTANCE_IMPORT:
2022
    start = False
2023
    os_type = None
2024
    force_variant = False
2025
    src_node = opts.src_node
2026
    src_path = opts.src_dir
2027
    no_install = None
2028
    identify_defaults = opts.identify_defaults
2029
  else:
2030
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2031

    
2032
  op = opcodes.OpCreateInstance(instance_name=instance,
2033
                                disks=disks,
2034
                                disk_template=opts.disk_template,
2035
                                nics=nics,
2036
                                pnode=pnode, snode=snode,
2037
                                ip_check=opts.ip_check,
2038
                                name_check=opts.name_check,
2039
                                wait_for_sync=opts.wait_for_sync,
2040
                                file_storage_dir=opts.file_storage_dir,
2041
                                file_driver=opts.file_driver,
2042
                                iallocator=opts.iallocator,
2043
                                hypervisor=hypervisor,
2044
                                hvparams=hvparams,
2045
                                beparams=opts.beparams,
2046
                                osparams=opts.osparams,
2047
                                mode=mode,
2048
                                start=start,
2049
                                os_type=os_type,
2050
                                force_variant=force_variant,
2051
                                src_node=src_node,
2052
                                src_path=src_path,
2053
                                no_install=no_install,
2054
                                identify_defaults=identify_defaults)
2055

    
2056
  SubmitOrSend(op, opts)
2057
  return 0
2058

    
2059

    
2060
class _RunWhileClusterStoppedHelper:
2061
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2062

2063
  """
2064
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2065
    """Initializes this class.
2066

2067
    @type feedback_fn: callable
2068
    @param feedback_fn: Feedback function
2069
    @type cluster_name: string
2070
    @param cluster_name: Cluster name
2071
    @type master_node: string
2072
    @param master_node Master node name
2073
    @type online_nodes: list
2074
    @param online_nodes: List of names of online nodes
2075

2076
    """
2077
    self.feedback_fn = feedback_fn
2078
    self.cluster_name = cluster_name
2079
    self.master_node = master_node
2080
    self.online_nodes = online_nodes
2081

    
2082
    self.ssh = ssh.SshRunner(self.cluster_name)
2083

    
2084
    self.nonmaster_nodes = [name for name in online_nodes
2085
                            if name != master_node]
2086

    
2087
    assert self.master_node not in self.nonmaster_nodes
2088

    
2089
  def _RunCmd(self, node_name, cmd):
2090
    """Runs a command on the local or a remote machine.
2091

2092
    @type node_name: string
2093
    @param node_name: Machine name
2094
    @type cmd: list
2095
    @param cmd: Command
2096

2097
    """
2098
    if node_name is None or node_name == self.master_node:
2099
      # No need to use SSH
2100
      result = utils.RunCmd(cmd)
2101
    else:
2102
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2103

    
2104
    if result.failed:
2105
      errmsg = ["Failed to run command %s" % result.cmd]
2106
      if node_name:
2107
        errmsg.append("on node %s" % node_name)
2108
      errmsg.append(": exitcode %s and error %s" %
2109
                    (result.exit_code, result.output))
2110
      raise errors.OpExecError(" ".join(errmsg))
2111

    
2112
  def Call(self, fn, *args):
2113
    """Call function while all daemons are stopped.
2114

2115
    @type fn: callable
2116
    @param fn: Function to be called
2117

2118
    """
2119
    # Pause watcher by acquiring an exclusive lock on watcher state file
2120
    self.feedback_fn("Blocking watcher")
2121
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2122
    try:
2123
      # TODO: Currently, this just blocks. There's no timeout.
2124
      # TODO: Should it be a shared lock?
2125
      watcher_block.Exclusive(blocking=True)
2126

    
2127
      # Stop master daemons, so that no new jobs can come in and all running
2128
      # ones are finished
2129
      self.feedback_fn("Stopping master daemons")
2130
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2131
      try:
2132
        # Stop daemons on all nodes
2133
        for node_name in self.online_nodes:
2134
          self.feedback_fn("Stopping daemons on %s" % node_name)
2135
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2136

    
2137
        # All daemons are shut down now
2138
        try:
2139
          return fn(self, *args)
2140
        except Exception, err:
2141
          _, errmsg = FormatError(err)
2142
          logging.exception("Caught exception")
2143
          self.feedback_fn(errmsg)
2144
          raise
2145
      finally:
2146
        # Start cluster again, master node last
2147
        for node_name in self.nonmaster_nodes + [self.master_node]:
2148
          self.feedback_fn("Starting daemons on %s" % node_name)
2149
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2150
    finally:
2151
      # Resume watcher
2152
      watcher_block.Close()
2153

    
2154

    
2155
def RunWhileClusterStopped(feedback_fn, fn, *args):
2156
  """Calls a function while all cluster daemons are stopped.
2157

2158
  @type feedback_fn: callable
2159
  @param feedback_fn: Feedback function
2160
  @type fn: callable
2161
  @param fn: Function to be called when daemons are stopped
2162

2163
  """
2164
  feedback_fn("Gathering cluster information")
2165

    
2166
  # This ensures we're running on the master daemon
2167
  cl = GetClient()
2168

    
2169
  (cluster_name, master_node) = \
2170
    cl.QueryConfigValues(["cluster_name", "master_node"])
2171

    
2172
  online_nodes = GetOnlineNodes([], cl=cl)
2173

    
2174
  # Don't keep a reference to the client. The master daemon will go away.
2175
  del cl
2176

    
2177
  assert master_node in online_nodes
2178

    
2179
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2180
                                       online_nodes).Call(fn, *args)
2181

    
2182

    
2183
def GenerateTable(headers, fields, separator, data,
2184
                  numfields=None, unitfields=None,
2185
                  units=None):
2186
  """Prints a table with headers and different fields.
2187

2188
  @type headers: dict
2189
  @param headers: dictionary mapping field names to headers for
2190
      the table
2191
  @type fields: list
2192
  @param fields: the field names corresponding to each row in
2193
      the data field
2194
  @param separator: the separator to be used; if this is None,
2195
      the default 'smart' algorithm is used which computes optimal
2196
      field width, otherwise just the separator is used between
2197
      each field
2198
  @type data: list
2199
  @param data: a list of lists, each sublist being one row to be output
2200
  @type numfields: list
2201
  @param numfields: a list with the fields that hold numeric
2202
      values and thus should be right-aligned
2203
  @type unitfields: list
2204
  @param unitfields: a list with the fields that hold numeric
2205
      values that should be formatted with the units field
2206
  @type units: string or None
2207
  @param units: the units we should use for formatting, or None for
2208
      automatic choice (human-readable for non-separator usage, otherwise
2209
      megabytes); this is a one-letter string
2210

2211
  """
2212
  if units is None:
2213
    if separator:
2214
      units = "m"
2215
    else:
2216
      units = "h"
2217

    
2218
  if numfields is None:
2219
    numfields = []
2220
  if unitfields is None:
2221
    unitfields = []
2222

    
2223
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2224
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2225

    
2226
  format_fields = []
2227
  for field in fields:
2228
    if headers and field not in headers:
2229
      # TODO: handle better unknown fields (either revert to old
2230
      # style of raising exception, or deal more intelligently with
2231
      # variable fields)
2232
      headers[field] = field
2233
    if separator is not None:
2234
      format_fields.append("%s")
2235
    elif numfields.Matches(field):
2236
      format_fields.append("%*s")
2237
    else:
2238
      format_fields.append("%-*s")
2239

    
2240
  if separator is None:
2241
    mlens = [0 for name in fields]
2242
    format_str = ' '.join(format_fields)
2243
  else:
2244
    format_str = separator.replace("%", "%%").join(format_fields)
2245

    
2246
  for row in data:
2247
    if row is None:
2248
      continue
2249
    for idx, val in enumerate(row):
2250
      if unitfields.Matches(fields[idx]):
2251
        try:
2252
          val = int(val)
2253
        except (TypeError, ValueError):
2254
          pass
2255
        else:
2256
          val = row[idx] = utils.FormatUnit(val, units)
2257
      val = row[idx] = str(val)
2258
      if separator is None:
2259
        mlens[idx] = max(mlens[idx], len(val))
2260

    
2261
  result = []
2262
  if headers:
2263
    args = []
2264
    for idx, name in enumerate(fields):
2265
      hdr = headers[name]
2266
      if separator is None:
2267
        mlens[idx] = max(mlens[idx], len(hdr))
2268
        args.append(mlens[idx])
2269
      args.append(hdr)
2270
    result.append(format_str % tuple(args))
2271

    
2272
  if separator is None:
2273
    assert len(mlens) == len(fields)
2274

    
2275
    if fields and not numfields.Matches(fields[-1]):
2276
      mlens[-1] = 0
2277

    
2278
  for line in data:
2279
    args = []
2280
    if line is None:
2281
      line = ['-' for _ in fields]
2282
    for idx in range(len(fields)):
2283
      if separator is None:
2284
        args.append(mlens[idx])
2285
      args.append(line[idx])
2286
    result.append(format_str % tuple(args))
2287

    
2288
  return result
2289

    
2290

    
2291
def FormatTimestamp(ts):
2292
  """Formats a given timestamp.
2293

2294
  @type ts: timestamp
2295
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2296

2297
  @rtype: string
2298
  @return: a string with the formatted timestamp
2299

2300
  """
2301
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2302
    return '?'
2303
  sec, usec = ts
2304
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2305

    
2306

    
2307
def ParseTimespec(value):
2308
  """Parse a time specification.
2309

2310
  The following suffixed will be recognized:
2311

2312
    - s: seconds
2313
    - m: minutes
2314
    - h: hours
2315
    - d: day
2316
    - w: weeks
2317

2318
  Without any suffix, the value will be taken to be in seconds.
2319

2320
  """
2321
  value = str(value)
2322
  if not value:
2323
    raise errors.OpPrereqError("Empty time specification passed")
2324
  suffix_map = {
2325
    's': 1,
2326
    'm': 60,
2327
    'h': 3600,
2328
    'd': 86400,
2329
    'w': 604800,
2330
    }
2331
  if value[-1] not in suffix_map:
2332
    try:
2333
      value = int(value)
2334
    except (TypeError, ValueError):
2335
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2336
  else:
2337
    multiplier = suffix_map[value[-1]]
2338
    value = value[:-1]
2339
    if not value: # no data left after stripping the suffix
2340
      raise errors.OpPrereqError("Invalid time specification (only"
2341
                                 " suffix passed)")
2342
    try:
2343
      value = int(value) * multiplier
2344
    except (TypeError, ValueError):
2345
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2346
  return value
2347

    
2348

    
2349
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2350
                   filter_master=False):
2351
  """Returns the names of online nodes.
2352

2353
  This function will also log a warning on stderr with the names of
2354
  the online nodes.
2355

2356
  @param nodes: if not empty, use only this subset of nodes (minus the
2357
      offline ones)
2358
  @param cl: if not None, luxi client to use
2359
  @type nowarn: boolean
2360
  @param nowarn: by default, this function will output a note with the
2361
      offline nodes that are skipped; if this parameter is True the
2362
      note is not displayed
2363
  @type secondary_ips: boolean
2364
  @param secondary_ips: if True, return the secondary IPs instead of the
2365
      names, useful for doing network traffic over the replication interface
2366
      (if any)
2367
  @type filter_master: boolean
2368
  @param filter_master: if True, do not return the master node in the list
2369
      (useful in coordination with secondary_ips where we cannot check our
2370
      node name against the list)
2371

2372
  """
2373
  if cl is None:
2374
    cl = GetClient()
2375

    
2376
  if secondary_ips:
2377
    name_idx = 2
2378
  else:
2379
    name_idx = 0
2380

    
2381
  if filter_master:
2382
    master_node = cl.QueryConfigValues(["master_node"])[0]
2383
    filter_fn = lambda x: x != master_node
2384
  else:
2385
    filter_fn = lambda _: True
2386

    
2387
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2388
                         use_locking=False)
2389
  offline = [row[0] for row in result if row[1]]
2390
  if offline and not nowarn:
2391
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2392
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2393

    
2394

    
2395
def _ToStream(stream, txt, *args):
2396
  """Write a message to a stream, bypassing the logging system
2397

2398
  @type stream: file object
2399
  @param stream: the file to which we should write
2400
  @type txt: str
2401
  @param txt: the message
2402

2403
  """
2404
  if args:
2405
    args = tuple(args)
2406
    stream.write(txt % args)
2407
  else:
2408
    stream.write(txt)
2409
  stream.write('\n')
2410
  stream.flush()
2411

    
2412

    
2413
def ToStdout(txt, *args):
2414
  """Write a message to stdout only, bypassing the logging system
2415

2416
  This is just a wrapper over _ToStream.
2417

2418
  @type txt: str
2419
  @param txt: the message
2420

2421
  """
2422
  _ToStream(sys.stdout, txt, *args)
2423

    
2424

    
2425
def ToStderr(txt, *args):
2426
  """Write a message to stderr only, bypassing the logging system
2427

2428
  This is just a wrapper over _ToStream.
2429

2430
  @type txt: str
2431
  @param txt: the message
2432

2433
  """
2434
  _ToStream(sys.stderr, txt, *args)
2435

    
2436

    
2437
class JobExecutor(object):
2438
  """Class which manages the submission and execution of multiple jobs.
2439

2440
  Note that instances of this class should not be reused between
2441
  GetResults() calls.
2442

2443
  """
2444
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2445
    self.queue = []
2446
    if cl is None:
2447
      cl = GetClient()
2448
    self.cl = cl
2449
    self.verbose = verbose
2450
    self.jobs = []
2451
    self.opts = opts
2452
    self.feedback_fn = feedback_fn
2453

    
2454
  def QueueJob(self, name, *ops):
2455
    """Record a job for later submit.
2456

2457
    @type name: string
2458
    @param name: a description of the job, will be used in WaitJobSet
2459
    """
2460
    SetGenericOpcodeOpts(ops, self.opts)
2461
    self.queue.append((name, ops))
2462

    
2463
  def SubmitPending(self, each=False):
2464
    """Submit all pending jobs.
2465

2466
    """
2467
    if each:
2468
      results = []
2469
      for row in self.queue:
2470
        # SubmitJob will remove the success status, but raise an exception if
2471
        # the submission fails, so we'll notice that anyway.
2472
        results.append([True, self.cl.SubmitJob(row[1])])
2473
    else:
2474
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2475
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2476
                                                            self.queue)):
2477
      self.jobs.append((idx, status, data, name))
2478

    
2479
  def _ChooseJob(self):
2480
    """Choose a non-waiting/queued job to poll next.
2481

2482
    """
2483
    assert self.jobs, "_ChooseJob called with empty job list"
2484

    
2485
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2486
    assert result
2487

    
2488
    for job_data, status in zip(self.jobs, result):
2489
      if (isinstance(status, list) and status and
2490
          status[0] in (constants.JOB_STATUS_QUEUED,
2491
                        constants.JOB_STATUS_WAITLOCK,
2492
                        constants.JOB_STATUS_CANCELING)):
2493
        # job is still present and waiting
2494
        continue
2495
      # good candidate found (either running job or lost job)
2496
      self.jobs.remove(job_data)
2497
      return job_data
2498

    
2499
    # no job found
2500
    return self.jobs.pop(0)
2501

    
2502
  def GetResults(self):
2503
    """Wait for and return the results of all jobs.
2504

2505
    @rtype: list
2506
    @return: list of tuples (success, job results), in the same order
2507
        as the submitted jobs; if a job has failed, instead of the result
2508
        there will be the error message
2509

2510
    """
2511
    if not self.jobs:
2512
      self.SubmitPending()
2513
    results = []
2514
    if self.verbose:
2515
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2516
      if ok_jobs:
2517
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2518

    
2519
    # first, remove any non-submitted jobs
2520
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2521
    for idx, _, jid, name in failures:
2522
      ToStderr("Failed to submit job for %s: %s", name, jid)
2523
      results.append((idx, False, jid))
2524

    
2525
    while self.jobs:
2526
      (idx, _, jid, name) = self._ChooseJob()
2527
      ToStdout("Waiting for job %s for %s...", jid, name)
2528
      try:
2529
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2530
        success = True
2531
      except errors.JobLost, err:
2532
        _, job_result = FormatError(err)
2533
        ToStderr("Job %s for %s has been archived, cannot check its result",
2534
                 jid, name)
2535
        success = False
2536
      except (errors.GenericError, luxi.ProtocolError), err:
2537
        _, job_result = FormatError(err)
2538
        success = False
2539
        # the error message will always be shown, verbose or not
2540
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2541

    
2542
      results.append((idx, success, job_result))
2543

    
2544
    # sort based on the index, then drop it
2545
    results.sort()
2546
    results = [i[1:] for i in results]
2547

    
2548
    return results
2549

    
2550
  def WaitOrShow(self, wait):
2551
    """Wait for job results or only print the job IDs.
2552

2553
    @type wait: boolean
2554
    @param wait: whether to wait or not
2555

2556
    """
2557
    if wait:
2558
      return self.GetResults()
2559
    else:
2560
      if not self.jobs:
2561
        self.SubmitPending()
2562
      for _, status, result, name in self.jobs:
2563
        if status:
2564
          ToStdout("%s: %s", result, name)
2565
        else:
2566
          ToStderr("Failure for %s: %s", name, result)
2567
      return [row[1:3] for row in self.jobs]