Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 36e247e1

History | View | Annotate | Download (80 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42

    
43
from optparse import (OptionParser, TitledHelpFormatter,
44
                      Option, OptionValueError)
45

    
46

    
47
__all__ = [
48
  # Command line options
49
  "ADD_UIDS_OPT",
50
  "ALLOCATABLE_OPT",
51
  "ALL_OPT",
52
  "AUTO_PROMOTE_OPT",
53
  "AUTO_REPLACE_OPT",
54
  "BACKEND_OPT",
55
  "CLEANUP_OPT",
56
  "CLUSTER_DOMAIN_SECRET_OPT",
57
  "CONFIRM_OPT",
58
  "CP_SIZE_OPT",
59
  "DEBUG_OPT",
60
  "DEBUG_SIMERR_OPT",
61
  "DISKIDX_OPT",
62
  "DISK_OPT",
63
  "DISK_TEMPLATE_OPT",
64
  "DRAINED_OPT",
65
  "DRY_RUN_OPT",
66
  "DRBD_HELPER_OPT",
67
  "EARLY_RELEASE_OPT",
68
  "ENABLED_HV_OPT",
69
  "ERROR_CODES_OPT",
70
  "FIELDS_OPT",
71
  "FILESTORE_DIR_OPT",
72
  "FILESTORE_DRIVER_OPT",
73
  "FORCE_OPT",
74
  "FORCE_VARIANT_OPT",
75
  "GLOBAL_FILEDIR_OPT",
76
  "HVLIST_OPT",
77
  "HVOPTS_OPT",
78
  "HYPERVISOR_OPT",
79
  "IALLOCATOR_OPT",
80
  "DEFAULT_IALLOCATOR_OPT",
81
  "IDENTIFY_DEFAULTS_OPT",
82
  "IGNORE_CONSIST_OPT",
83
  "IGNORE_FAILURES_OPT",
84
  "IGNORE_REMOVE_FAILURES_OPT",
85
  "IGNORE_SECONDARIES_OPT",
86
  "IGNORE_SIZE_OPT",
87
  "INTERVAL_OPT",
88
  "MAC_PREFIX_OPT",
89
  "MAINTAIN_NODE_HEALTH_OPT",
90
  "MASTER_NETDEV_OPT",
91
  "MC_OPT",
92
  "MIGRATION_MODE_OPT",
93
  "NET_OPT",
94
  "NEW_CLUSTER_CERT_OPT",
95
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
96
  "NEW_CONFD_HMAC_KEY_OPT",
97
  "NEW_RAPI_CERT_OPT",
98
  "NEW_SECONDARY_OPT",
99
  "NIC_PARAMS_OPT",
100
  "NODE_LIST_OPT",
101
  "NODE_PLACEMENT_OPT",
102
  "NODRBD_STORAGE_OPT",
103
  "NOHDR_OPT",
104
  "NOIPCHECK_OPT",
105
  "NO_INSTALL_OPT",
106
  "NONAMECHECK_OPT",
107
  "NOLVM_STORAGE_OPT",
108
  "NOMODIFY_ETCHOSTS_OPT",
109
  "NOMODIFY_SSH_SETUP_OPT",
110
  "NONICS_OPT",
111
  "NONLIVE_OPT",
112
  "NONPLUS1_OPT",
113
  "NOSHUTDOWN_OPT",
114
  "NOSTART_OPT",
115
  "NOSSH_KEYCHECK_OPT",
116
  "NOVOTING_OPT",
117
  "NWSYNC_OPT",
118
  "ON_PRIMARY_OPT",
119
  "ON_SECONDARY_OPT",
120
  "OFFLINE_OPT",
121
  "OSPARAMS_OPT",
122
  "OS_OPT",
123
  "OS_SIZE_OPT",
124
  "PRIMARY_IP_VERSION_OPT",
125
  "PRIORITY_OPT",
126
  "RAPI_CERT_OPT",
127
  "READD_OPT",
128
  "REBOOT_TYPE_OPT",
129
  "REMOVE_INSTANCE_OPT",
130
  "REMOVE_UIDS_OPT",
131
  "RESERVED_LVS_OPT",
132
  "ROMAN_OPT",
133
  "SECONDARY_IP_OPT",
134
  "SELECT_OS_OPT",
135
  "SEP_OPT",
136
  "SHOWCMD_OPT",
137
  "SHUTDOWN_TIMEOUT_OPT",
138
  "SINGLE_NODE_OPT",
139
  "SRC_DIR_OPT",
140
  "SRC_NODE_OPT",
141
  "SUBMIT_OPT",
142
  "STATIC_OPT",
143
  "SYNC_OPT",
144
  "TAG_SRC_OPT",
145
  "TIMEOUT_OPT",
146
  "UIDPOOL_OPT",
147
  "USEUNITS_OPT",
148
  "USE_REPL_NET_OPT",
149
  "VERBOSE_OPT",
150
  "VG_NAME_OPT",
151
  "YES_DOIT_OPT",
152
  # Generic functions for CLI programs
153
  "GenericMain",
154
  "GenericInstanceCreate",
155
  "GetClient",
156
  "GetOnlineNodes",
157
  "JobExecutor",
158
  "JobSubmittedException",
159
  "ParseTimespec",
160
  "RunWhileClusterStopped",
161
  "SubmitOpCode",
162
  "SubmitOrSend",
163
  "UsesRPC",
164
  # Formatting functions
165
  "ToStderr", "ToStdout",
166
  "FormatError",
167
  "GenerateTable",
168
  "AskUser",
169
  "FormatTimestamp",
170
  "FormatLogMessage",
171
  # Tags functions
172
  "ListTags",
173
  "AddTags",
174
  "RemoveTags",
175
  # command line options support infrastructure
176
  "ARGS_MANY_INSTANCES",
177
  "ARGS_MANY_NODES",
178
  "ARGS_NONE",
179
  "ARGS_ONE_INSTANCE",
180
  "ARGS_ONE_NODE",
181
  "ARGS_ONE_OS",
182
  "ArgChoice",
183
  "ArgCommand",
184
  "ArgFile",
185
  "ArgHost",
186
  "ArgInstance",
187
  "ArgJobId",
188
  "ArgNode",
189
  "ArgOs",
190
  "ArgSuggest",
191
  "ArgUnknown",
192
  "OPT_COMPL_INST_ADD_NODES",
193
  "OPT_COMPL_MANY_NODES",
194
  "OPT_COMPL_ONE_IALLOCATOR",
195
  "OPT_COMPL_ONE_INSTANCE",
196
  "OPT_COMPL_ONE_NODE",
197
  "OPT_COMPL_ONE_NODEGROUP",
198
  "OPT_COMPL_ONE_OS",
199
  "cli_option",
200
  "SplitNodeOption",
201
  "CalculateOSNames",
202
  "ParseFields",
203
  ]
204

    
205
NO_PREFIX = "no_"
206
UN_PREFIX = "-"
207

    
208
#: Priorities (sorted)
209
_PRIORITY_NAMES = [
210
  ("low", constants.OP_PRIO_LOW),
211
  ("normal", constants.OP_PRIO_NORMAL),
212
  ("high", constants.OP_PRIO_HIGH),
213
  ]
214

    
215
#: Priority dictionary for easier lookup
216
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
217
# we migrate to Python 2.6
218
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
219

    
220

    
221
class _Argument:
222
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
223
    self.min = min
224
    self.max = max
225

    
226
  def __repr__(self):
227
    return ("<%s min=%s max=%s>" %
228
            (self.__class__.__name__, self.min, self.max))
229

    
230

    
231
class ArgSuggest(_Argument):
232
  """Suggesting argument.
233

234
  Value can be any of the ones passed to the constructor.
235

236
  """
237
  # pylint: disable-msg=W0622
238
  def __init__(self, min=0, max=None, choices=None):
239
    _Argument.__init__(self, min=min, max=max)
240
    self.choices = choices
241

    
242
  def __repr__(self):
243
    return ("<%s min=%s max=%s choices=%r>" %
244
            (self.__class__.__name__, self.min, self.max, self.choices))
245

    
246

    
247
class ArgChoice(ArgSuggest):
248
  """Choice argument.
249

250
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
251
  but value must be one of the choices.
252

253
  """
254

    
255

    
256
class ArgUnknown(_Argument):
257
  """Unknown argument to program (e.g. determined at runtime).
258

259
  """
260

    
261

    
262
class ArgInstance(_Argument):
263
  """Instances argument.
264

265
  """
266

    
267

    
268
class ArgNode(_Argument):
269
  """Node argument.
270

271
  """
272

    
273
class ArgJobId(_Argument):
274
  """Job ID argument.
275

276
  """
277

    
278

    
279
class ArgFile(_Argument):
280
  """File path argument.
281

282
  """
283

    
284

    
285
class ArgCommand(_Argument):
286
  """Command argument.
287

288
  """
289

    
290

    
291
class ArgHost(_Argument):
292
  """Host argument.
293

294
  """
295

    
296

    
297
class ArgOs(_Argument):
298
  """OS argument.
299

300
  """
301

    
302

    
303
ARGS_NONE = []
304
ARGS_MANY_INSTANCES = [ArgInstance()]
305
ARGS_MANY_NODES = [ArgNode()]
306
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
307
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
308
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
309

    
310

    
311
def _ExtractTagsObject(opts, args):
312
  """Extract the tag type object.
313

314
  Note that this function will modify its args parameter.
315

316
  """
317
  if not hasattr(opts, "tag_type"):
318
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
319
  kind = opts.tag_type
320
  if kind == constants.TAG_CLUSTER:
321
    retval = kind, kind
322
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
323
    if not args:
324
      raise errors.OpPrereqError("no arguments passed to the command")
325
    name = args.pop(0)
326
    retval = kind, name
327
  else:
328
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
329
  return retval
330

    
331

    
332
def _ExtendTags(opts, args):
333
  """Extend the args if a source file has been given.
334

335
  This function will extend the tags with the contents of the file
336
  passed in the 'tags_source' attribute of the opts parameter. A file
337
  named '-' will be replaced by stdin.
338

339
  """
340
  fname = opts.tags_source
341
  if fname is None:
342
    return
343
  if fname == "-":
344
    new_fh = sys.stdin
345
  else:
346
    new_fh = open(fname, "r")
347
  new_data = []
348
  try:
349
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
350
    # because of python bug 1633941
351
    while True:
352
      line = new_fh.readline()
353
      if not line:
354
        break
355
      new_data.append(line.strip())
356
  finally:
357
    new_fh.close()
358
  args.extend(new_data)
359

    
360

    
361
def ListTags(opts, args):
362
  """List the tags on a given object.
363

364
  This is a generic implementation that knows how to deal with all
365
  three cases of tag objects (cluster, node, instance). The opts
366
  argument is expected to contain a tag_type field denoting what
367
  object type we work on.
368

369
  """
370
  kind, name = _ExtractTagsObject(opts, args)
371
  cl = GetClient()
372
  result = cl.QueryTags(kind, name)
373
  result = list(result)
374
  result.sort()
375
  for tag in result:
376
    ToStdout(tag)
377

    
378

    
379
def AddTags(opts, args):
380
  """Add tags on a given object.
381

382
  This is a generic implementation that knows how to deal with all
383
  three cases of tag objects (cluster, node, instance). The opts
384
  argument is expected to contain a tag_type field denoting what
385
  object type we work on.
386

387
  """
388
  kind, name = _ExtractTagsObject(opts, args)
389
  _ExtendTags(opts, args)
390
  if not args:
391
    raise errors.OpPrereqError("No tags to be added")
392
  op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
393
  SubmitOpCode(op)
394

    
395

    
396
def RemoveTags(opts, args):
397
  """Remove tags from a given object.
398

399
  This is a generic implementation that knows how to deal with all
400
  three cases of tag objects (cluster, node, instance). The opts
401
  argument is expected to contain a tag_type field denoting what
402
  object type we work on.
403

404
  """
405
  kind, name = _ExtractTagsObject(opts, args)
406
  _ExtendTags(opts, args)
407
  if not args:
408
    raise errors.OpPrereqError("No tags to be removed")
409
  op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
410
  SubmitOpCode(op)
411

    
412

    
413
def check_unit(option, opt, value): # pylint: disable-msg=W0613
414
  """OptParsers custom converter for units.
415

416
  """
417
  try:
418
    return utils.ParseUnit(value)
419
  except errors.UnitParseError, err:
420
    raise OptionValueError("option %s: %s" % (opt, err))
421

    
422

    
423
def _SplitKeyVal(opt, data):
424
  """Convert a KeyVal string into a dict.
425

426
  This function will convert a key=val[,...] string into a dict. Empty
427
  values will be converted specially: keys which have the prefix 'no_'
428
  will have the value=False and the prefix stripped, the others will
429
  have value=True.
430

431
  @type opt: string
432
  @param opt: a string holding the option name for which we process the
433
      data, used in building error messages
434
  @type data: string
435
  @param data: a string of the format key=val,key=val,...
436
  @rtype: dict
437
  @return: {key=val, key=val}
438
  @raises errors.ParameterError: if there are duplicate keys
439

440
  """
441
  kv_dict = {}
442
  if data:
443
    for elem in utils.UnescapeAndSplit(data, sep=","):
444
      if "=" in elem:
445
        key, val = elem.split("=", 1)
446
      else:
447
        if elem.startswith(NO_PREFIX):
448
          key, val = elem[len(NO_PREFIX):], False
449
        elif elem.startswith(UN_PREFIX):
450
          key, val = elem[len(UN_PREFIX):], None
451
        else:
452
          key, val = elem, True
453
      if key in kv_dict:
454
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
455
                                    (key, opt))
456
      kv_dict[key] = val
457
  return kv_dict
458

    
459

    
460
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
461
  """Custom parser for ident:key=val,key=val options.
462

463
  This will store the parsed values as a tuple (ident, {key: val}). As such,
464
  multiple uses of this option via action=append is possible.
465

466
  """
467
  if ":" not in value:
468
    ident, rest = value, ''
469
  else:
470
    ident, rest = value.split(":", 1)
471

    
472
  if ident.startswith(NO_PREFIX):
473
    if rest:
474
      msg = "Cannot pass options when removing parameter groups: %s" % value
475
      raise errors.ParameterError(msg)
476
    retval = (ident[len(NO_PREFIX):], False)
477
  elif ident.startswith(UN_PREFIX):
478
    if rest:
479
      msg = "Cannot pass options when removing parameter groups: %s" % value
480
      raise errors.ParameterError(msg)
481
    retval = (ident[len(UN_PREFIX):], None)
482
  else:
483
    kv_dict = _SplitKeyVal(opt, rest)
484
    retval = (ident, kv_dict)
485
  return retval
486

    
487

    
488
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
489
  """Custom parser class for key=val,key=val options.
490

491
  This will store the parsed values as a dict {key: val}.
492

493
  """
494
  return _SplitKeyVal(opt, value)
495

    
496

    
497
def check_bool(option, opt, value): # pylint: disable-msg=W0613
498
  """Custom parser for yes/no options.
499

500
  This will store the parsed value as either True or False.
501

502
  """
503
  value = value.lower()
504
  if value == constants.VALUE_FALSE or value == "no":
505
    return False
506
  elif value == constants.VALUE_TRUE or value == "yes":
507
    return True
508
  else:
509
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
510

    
511

    
512
# completion_suggestion is normally a list. Using numeric values not evaluating
513
# to False for dynamic completion.
514
(OPT_COMPL_MANY_NODES,
515
 OPT_COMPL_ONE_NODE,
516
 OPT_COMPL_ONE_INSTANCE,
517
 OPT_COMPL_ONE_OS,
518
 OPT_COMPL_ONE_IALLOCATOR,
519
 OPT_COMPL_INST_ADD_NODES,
520
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
521

    
522
OPT_COMPL_ALL = frozenset([
523
  OPT_COMPL_MANY_NODES,
524
  OPT_COMPL_ONE_NODE,
525
  OPT_COMPL_ONE_INSTANCE,
526
  OPT_COMPL_ONE_OS,
527
  OPT_COMPL_ONE_IALLOCATOR,
528
  OPT_COMPL_INST_ADD_NODES,
529
  OPT_COMPL_ONE_NODEGROUP,
530
  ])
531

    
532

    
533
class CliOption(Option):
534
  """Custom option class for optparse.
535

536
  """
537
  ATTRS = Option.ATTRS + [
538
    "completion_suggest",
539
    ]
540
  TYPES = Option.TYPES + (
541
    "identkeyval",
542
    "keyval",
543
    "unit",
544
    "bool",
545
    )
546
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
547
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
548
  TYPE_CHECKER["keyval"] = check_key_val
549
  TYPE_CHECKER["unit"] = check_unit
550
  TYPE_CHECKER["bool"] = check_bool
551

    
552

    
553
# optparse.py sets make_option, so we do it for our own option class, too
554
cli_option = CliOption
555

    
556

    
557
_YORNO = "yes|no"
558

    
559
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
560
                       help="Increase debugging level")
561

    
562
NOHDR_OPT = cli_option("--no-headers", default=False,
563
                       action="store_true", dest="no_headers",
564
                       help="Don't display column headers")
565

    
566
SEP_OPT = cli_option("--separator", default=None,
567
                     action="store", dest="separator",
568
                     help=("Separator between output fields"
569
                           " (defaults to one space)"))
570

    
571
USEUNITS_OPT = cli_option("--units", default=None,
572
                          dest="units", choices=('h', 'm', 'g', 't'),
573
                          help="Specify units for output (one of hmgt)")
574

    
575
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
576
                        type="string", metavar="FIELDS",
577
                        help="Comma separated list of output fields")
578

    
579
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
580
                       default=False, help="Force the operation")
581

    
582
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
583
                         default=False, help="Do not require confirmation")
584

    
585
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
586
                         default=None, help="File with tag names")
587

    
588
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
589
                        default=False, action="store_true",
590
                        help=("Submit the job and return the job ID, but"
591
                              " don't wait for the job to finish"))
592

    
593
SYNC_OPT = cli_option("--sync", dest="do_locking",
594
                      default=False, action="store_true",
595
                      help=("Grab locks while doing the queries"
596
                            " in order to ensure more consistent results"))
597

    
598
DRY_RUN_OPT = cli_option("--dry-run", default=False,
599
                         action="store_true",
600
                         help=("Do not execute the operation, just run the"
601
                               " check steps and verify it it could be"
602
                               " executed"))
603

    
604
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
605
                         action="store_true",
606
                         help="Increase the verbosity of the operation")
607

    
608
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
609
                              action="store_true", dest="simulate_errors",
610
                              help="Debugging option that makes the operation"
611
                              " treat most runtime checks as failed")
612

    
613
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
614
                        default=True, action="store_false",
615
                        help="Don't wait for sync (DANGEROUS!)")
616

    
617
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
618
                               help="Custom disk setup (diskless, file,"
619
                               " plain or drbd)",
620
                               default=None, metavar="TEMPL",
621
                               choices=list(constants.DISK_TEMPLATES))
622

    
623
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
624
                        help="Do not create any network cards for"
625
                        " the instance")
626

    
627
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
628
                               help="Relative path under default cluster-wide"
629
                               " file storage dir to store file-based disks",
630
                               default=None, metavar="<DIR>")
631

    
632
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
633
                                  help="Driver to use for image files",
634
                                  default="loop", metavar="<DRIVER>",
635
                                  choices=list(constants.FILE_DRIVER))
636

    
637
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
638
                            help="Select nodes for the instance automatically"
639
                            " using the <NAME> iallocator plugin",
640
                            default=None, type="string",
641
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
642

    
643
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
644
                            metavar="<NAME>",
645
                            help="Set the default instance allocator plugin",
646
                            default=None, type="string",
647
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
648

    
649
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
650
                    metavar="<os>",
651
                    completion_suggest=OPT_COMPL_ONE_OS)
652

    
653
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
654
                         type="keyval", default={},
655
                         help="OS parameters")
656

    
657
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
658
                               action="store_true", default=False,
659
                               help="Force an unknown variant")
660

    
661
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
662
                            action="store_true", default=False,
663
                            help="Do not install the OS (will"
664
                            " enable no-start)")
665

    
666
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
667
                         type="keyval", default={},
668
                         help="Backend parameters")
669

    
670
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
671
                         default={}, dest="hvparams",
672
                         help="Hypervisor parameters")
673

    
674
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
675
                            help="Hypervisor and hypervisor options, in the"
676
                            " format hypervisor:option=value,option=value,...",
677
                            default=None, type="identkeyval")
678

    
679
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
680
                        help="Hypervisor and hypervisor options, in the"
681
                        " format hypervisor:option=value,option=value,...",
682
                        default=[], action="append", type="identkeyval")
683

    
684
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
685
                           action="store_false",
686
                           help="Don't check that the instance's IP"
687
                           " is alive")
688

    
689
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
690
                             default=True, action="store_false",
691
                             help="Don't check that the instance's name"
692
                             " is resolvable")
693

    
694
NET_OPT = cli_option("--net",
695
                     help="NIC parameters", default=[],
696
                     dest="nics", action="append", type="identkeyval")
697

    
698
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
699
                      dest="disks", action="append", type="identkeyval")
700

    
701
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
702
                         help="Comma-separated list of disks"
703
                         " indices to act on (e.g. 0,2) (optional,"
704
                         " defaults to all disks)")
705

    
706
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
707
                         help="Enforces a single-disk configuration using the"
708
                         " given disk size, in MiB unless a suffix is used",
709
                         default=None, type="unit", metavar="<size>")
710

    
711
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
712
                                dest="ignore_consistency",
713
                                action="store_true", default=False,
714
                                help="Ignore the consistency of the disks on"
715
                                " the secondary")
716

    
717
NONLIVE_OPT = cli_option("--non-live", dest="live",
718
                         default=True, action="store_false",
719
                         help="Do a non-live migration (this usually means"
720
                         " freeze the instance, save the state, transfer and"
721
                         " only then resume running on the secondary node)")
722

    
723
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
724
                                default=None,
725
                                choices=list(constants.HT_MIGRATION_MODES),
726
                                help="Override default migration mode (choose"
727
                                " either live or non-live")
728

    
729
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
730
                                help="Target node and optional secondary node",
731
                                metavar="<pnode>[:<snode>]",
732
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
733

    
734
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
735
                           action="append", metavar="<node>",
736
                           help="Use only this node (can be used multiple"
737
                           " times, if not given defaults to all nodes)",
738
                           completion_suggest=OPT_COMPL_ONE_NODE)
739

    
740
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
741
                             metavar="<node>",
742
                             completion_suggest=OPT_COMPL_ONE_NODE)
743

    
744
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
745
                         action="store_false",
746
                         help="Don't start the instance after creation")
747

    
748
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
749
                         action="store_true", default=False,
750
                         help="Show command instead of executing it")
751

    
752
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
753
                         default=False, action="store_true",
754
                         help="Instead of performing the migration, try to"
755
                         " recover from a failed cleanup. This is safe"
756
                         " to run even if the instance is healthy, but it"
757
                         " will create extra replication traffic and "
758
                         " disrupt briefly the replication (like during the"
759
                         " migration")
760

    
761
STATIC_OPT = cli_option("-s", "--static", dest="static",
762
                        action="store_true", default=False,
763
                        help="Only show configuration data, not runtime data")
764

    
765
ALL_OPT = cli_option("--all", dest="show_all",
766
                     default=False, action="store_true",
767
                     help="Show info on all instances on the cluster."
768
                     " This can take a long time to run, use wisely")
769

    
770
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
771
                           action="store_true", default=False,
772
                           help="Interactive OS reinstall, lists available"
773
                           " OS templates for selection")
774

    
775
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
776
                                 action="store_true", default=False,
777
                                 help="Remove the instance from the cluster"
778
                                 " configuration even if there are failures"
779
                                 " during the removal process")
780

    
781
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
782
                                        dest="ignore_remove_failures",
783
                                        action="store_true", default=False,
784
                                        help="Remove the instance from the"
785
                                        " cluster configuration even if there"
786
                                        " are failures during the removal"
787
                                        " process")
788

    
789
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
790
                                 action="store_true", default=False,
791
                                 help="Remove the instance from the cluster")
792

    
793
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
794
                               help="Specifies the new secondary node",
795
                               metavar="NODE", default=None,
796
                               completion_suggest=OPT_COMPL_ONE_NODE)
797

    
798
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
799
                            default=False, action="store_true",
800
                            help="Replace the disk(s) on the primary"
801
                            " node (only for the drbd template)")
802

    
803
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
804
                              default=False, action="store_true",
805
                              help="Replace the disk(s) on the secondary"
806
                              " node (only for the drbd template)")
807

    
808
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
809
                              default=False, action="store_true",
810
                              help="Lock all nodes and auto-promote as needed"
811
                              " to MC status")
812

    
813
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
814
                              default=False, action="store_true",
815
                              help="Automatically replace faulty disks"
816
                              " (only for the drbd template)")
817

    
818
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
819
                             default=False, action="store_true",
820
                             help="Ignore current recorded size"
821
                             " (useful for forcing activation when"
822
                             " the recorded size is wrong)")
823

    
824
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
825
                          metavar="<node>",
826
                          completion_suggest=OPT_COMPL_ONE_NODE)
827

    
828
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
829
                         metavar="<dir>")
830

    
831
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
832
                              help="Specify the secondary ip for the node",
833
                              metavar="ADDRESS", default=None)
834

    
835
READD_OPT = cli_option("--readd", dest="readd",
836
                       default=False, action="store_true",
837
                       help="Readd old node after replacing it")
838

    
839
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
840
                                default=True, action="store_false",
841
                                help="Disable SSH key fingerprint checking")
842

    
843

    
844
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
845
                    type="bool", default=None, metavar=_YORNO,
846
                    help="Set the master_candidate flag on the node")
847

    
848
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
849
                         type="bool", default=None,
850
                         help="Set the offline flag on the node")
851

    
852
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
853
                         type="bool", default=None,
854
                         help="Set the drained flag on the node")
855

    
856
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
857
                             type="bool", default=None, metavar=_YORNO,
858
                             help="Set the allocatable flag on a volume")
859

    
860
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
861
                               help="Disable support for lvm based instances"
862
                               " (cluster-wide)",
863
                               action="store_false", default=True)
864

    
865
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
866
                            dest="enabled_hypervisors",
867
                            help="Comma-separated list of hypervisors",
868
                            type="string", default=None)
869

    
870
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
871
                            type="keyval", default={},
872
                            help="NIC parameters")
873

    
874
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
875
                         dest="candidate_pool_size", type="int",
876
                         help="Set the candidate pool size")
877

    
878
VG_NAME_OPT = cli_option("-g", "--vg-name", dest="vg_name",
879
                         help="Enables LVM and specifies the volume group"
880
                         " name (cluster-wide) for disk allocation [xenvg]",
881
                         metavar="VG", default=None)
882

    
883
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
884
                          help="Destroy cluster", action="store_true")
885

    
886
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
887
                          help="Skip node agreement check (dangerous)",
888
                          action="store_true", default=False)
889

    
890
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
891
                            help="Specify the mac prefix for the instance IP"
892
                            " addresses, in the format XX:XX:XX",
893
                            metavar="PREFIX",
894
                            default=None)
895

    
896
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
897
                               help="Specify the node interface (cluster-wide)"
898
                               " on which the master IP address will be added "
899
                               " [%s]" % constants.DEFAULT_BRIDGE,
900
                               metavar="NETDEV",
901
                               default=constants.DEFAULT_BRIDGE)
902

    
903
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
904
                                help="Specify the default directory (cluster-"
905
                                "wide) for storing the file-based disks [%s]" %
906
                                constants.DEFAULT_FILE_STORAGE_DIR,
907
                                metavar="DIR",
908
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
909

    
910
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
911
                                   help="Don't modify /etc/hosts",
912
                                   action="store_false", default=True)
913

    
914
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
915
                                    help="Don't initialize SSH keys",
916
                                    action="store_false", default=True)
917

    
918
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
919
                             help="Enable parseable error messages",
920
                             action="store_true", default=False)
921

    
922
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
923
                          help="Skip N+1 memory redundancy tests",
924
                          action="store_true", default=False)
925

    
926
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
927
                             help="Type of reboot: soft/hard/full",
928
                             default=constants.INSTANCE_REBOOT_HARD,
929
                             metavar="<REBOOT>",
930
                             choices=list(constants.REBOOT_TYPES))
931

    
932
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
933
                                    dest="ignore_secondaries",
934
                                    default=False, action="store_true",
935
                                    help="Ignore errors from secondaries")
936

    
937
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
938
                            action="store_false", default=True,
939
                            help="Don't shutdown the instance (unsafe)")
940

    
941
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
942
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
943
                         help="Maximum time to wait")
944

    
945
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
946
                         dest="shutdown_timeout", type="int",
947
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
948
                         help="Maximum time to wait for instance shutdown")
949

    
950
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
951
                          default=None,
952
                          help=("Number of seconds between repetions of the"
953
                                " command"))
954

    
955
EARLY_RELEASE_OPT = cli_option("--early-release",
956
                               dest="early_release", default=False,
957
                               action="store_true",
958
                               help="Release the locks on the secondary"
959
                               " node(s) early")
960

    
961
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
962
                                  dest="new_cluster_cert",
963
                                  default=False, action="store_true",
964
                                  help="Generate a new cluster certificate")
965

    
966
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
967
                           default=None,
968
                           help="File containing new RAPI certificate")
969

    
970
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
971
                               default=None, action="store_true",
972
                               help=("Generate a new self-signed RAPI"
973
                                     " certificate"))
974

    
975
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
976
                                    dest="new_confd_hmac_key",
977
                                    default=False, action="store_true",
978
                                    help=("Create a new HMAC key for %s" %
979
                                          constants.CONFD))
980

    
981
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
982
                                       dest="cluster_domain_secret",
983
                                       default=None,
984
                                       help=("Load new new cluster domain"
985
                                             " secret from file"))
986

    
987
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
988
                                           dest="new_cluster_domain_secret",
989
                                           default=False, action="store_true",
990
                                           help=("Create a new cluster domain"
991
                                                 " secret"))
992

    
993
USE_REPL_NET_OPT = cli_option("--use-replication-network",
994
                              dest="use_replication_network",
995
                              help="Whether to use the replication network"
996
                              " for talking to the nodes",
997
                              action="store_true", default=False)
998

    
999
MAINTAIN_NODE_HEALTH_OPT = \
1000
    cli_option("--maintain-node-health", dest="maintain_node_health",
1001
               metavar=_YORNO, default=None, type="bool",
1002
               help="Configure the cluster to automatically maintain node"
1003
               " health, by shutting down unknown instances, shutting down"
1004
               " unknown DRBD devices, etc.")
1005

    
1006
IDENTIFY_DEFAULTS_OPT = \
1007
    cli_option("--identify-defaults", dest="identify_defaults",
1008
               default=False, action="store_true",
1009
               help="Identify which saved instance parameters are equal to"
1010
               " the current cluster defaults and set them as such, instead"
1011
               " of marking them as overridden")
1012

    
1013
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1014
                         action="store", dest="uid_pool",
1015
                         help=("A list of user-ids or user-id"
1016
                               " ranges separated by commas"))
1017

    
1018
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1019
                          action="store", dest="add_uids",
1020
                          help=("A list of user-ids or user-id"
1021
                                " ranges separated by commas, to be"
1022
                                " added to the user-id pool"))
1023

    
1024
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1025
                             action="store", dest="remove_uids",
1026
                             help=("A list of user-ids or user-id"
1027
                                   " ranges separated by commas, to be"
1028
                                   " removed from the user-id pool"))
1029

    
1030
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1031
                             action="store", dest="reserved_lvs",
1032
                             help=("A comma-separated list of reserved"
1033
                                   " logical volumes names, that will be"
1034
                                   " ignored by cluster verify"))
1035

    
1036
ROMAN_OPT = cli_option("--roman",
1037
                       dest="roman_integers", default=False,
1038
                       action="store_true",
1039
                       help="Use roman numbers for positive integers")
1040

    
1041
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1042
                             action="store", default=None,
1043
                             help="Specifies usermode helper for DRBD")
1044

    
1045
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1046
                                action="store_false", default=True,
1047
                                help="Disable support for DRBD")
1048

    
1049
PRIMARY_IP_VERSION_OPT = \
1050
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1051
               action="store", dest="primary_ip_version",
1052
               metavar="%d|%d" % (constants.IP4_VERSION,
1053
                                  constants.IP6_VERSION),
1054
               help="Cluster-wide IP version for primary IP")
1055

    
1056
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1057
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1058
                          choices=_PRIONAME_TO_VALUE.keys(),
1059
                          help="Priority for opcode(s) processing")
1060

    
1061
#: Options provided by all commands
1062
COMMON_OPTS = [DEBUG_OPT]
1063

    
1064

    
1065
def _ParseArgs(argv, commands, aliases):
1066
  """Parser for the command line arguments.
1067

1068
  This function parses the arguments and returns the function which
1069
  must be executed together with its (modified) arguments.
1070

1071
  @param argv: the command line
1072
  @param commands: dictionary with special contents, see the design
1073
      doc for cmdline handling
1074
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1075

1076
  """
1077
  if len(argv) == 0:
1078
    binary = "<command>"
1079
  else:
1080
    binary = argv[0].split("/")[-1]
1081

    
1082
  if len(argv) > 1 and argv[1] == "--version":
1083
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1084
             constants.RELEASE_VERSION)
1085
    # Quit right away. That way we don't have to care about this special
1086
    # argument. optparse.py does it the same.
1087
    sys.exit(0)
1088

    
1089
  if len(argv) < 2 or not (argv[1] in commands or
1090
                           argv[1] in aliases):
1091
    # let's do a nice thing
1092
    sortedcmds = commands.keys()
1093
    sortedcmds.sort()
1094

    
1095
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1096
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1097
    ToStdout("")
1098

    
1099
    # compute the max line length for cmd + usage
1100
    mlen = max([len(" %s" % cmd) for cmd in commands])
1101
    mlen = min(60, mlen) # should not get here...
1102

    
1103
    # and format a nice command list
1104
    ToStdout("Commands:")
1105
    for cmd in sortedcmds:
1106
      cmdstr = " %s" % (cmd,)
1107
      help_text = commands[cmd][4]
1108
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1109
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1110
      for line in help_lines:
1111
        ToStdout("%-*s   %s", mlen, "", line)
1112

    
1113
    ToStdout("")
1114

    
1115
    return None, None, None
1116

    
1117
  # get command, unalias it, and look it up in commands
1118
  cmd = argv.pop(1)
1119
  if cmd in aliases:
1120
    if cmd in commands:
1121
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1122
                                   " command" % cmd)
1123

    
1124
    if aliases[cmd] not in commands:
1125
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1126
                                   " command '%s'" % (cmd, aliases[cmd]))
1127

    
1128
    cmd = aliases[cmd]
1129

    
1130
  func, args_def, parser_opts, usage, description = commands[cmd]
1131
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1132
                        description=description,
1133
                        formatter=TitledHelpFormatter(),
1134
                        usage="%%prog %s %s" % (cmd, usage))
1135
  parser.disable_interspersed_args()
1136
  options, args = parser.parse_args()
1137

    
1138
  if not _CheckArguments(cmd, args_def, args):
1139
    return None, None, None
1140

    
1141
  return func, options, args
1142

    
1143

    
1144
def _CheckArguments(cmd, args_def, args):
1145
  """Verifies the arguments using the argument definition.
1146

1147
  Algorithm:
1148

1149
    1. Abort with error if values specified by user but none expected.
1150

1151
    1. For each argument in definition
1152

1153
      1. Keep running count of minimum number of values (min_count)
1154
      1. Keep running count of maximum number of values (max_count)
1155
      1. If it has an unlimited number of values
1156

1157
        1. Abort with error if it's not the last argument in the definition
1158

1159
    1. If last argument has limited number of values
1160

1161
      1. Abort with error if number of values doesn't match or is too large
1162

1163
    1. Abort with error if user didn't pass enough values (min_count)
1164

1165
  """
1166
  if args and not args_def:
1167
    ToStderr("Error: Command %s expects no arguments", cmd)
1168
    return False
1169

    
1170
  min_count = None
1171
  max_count = None
1172
  check_max = None
1173

    
1174
  last_idx = len(args_def) - 1
1175

    
1176
  for idx, arg in enumerate(args_def):
1177
    if min_count is None:
1178
      min_count = arg.min
1179
    elif arg.min is not None:
1180
      min_count += arg.min
1181

    
1182
    if max_count is None:
1183
      max_count = arg.max
1184
    elif arg.max is not None:
1185
      max_count += arg.max
1186

    
1187
    if idx == last_idx:
1188
      check_max = (arg.max is not None)
1189

    
1190
    elif arg.max is None:
1191
      raise errors.ProgrammerError("Only the last argument can have max=None")
1192

    
1193
  if check_max:
1194
    # Command with exact number of arguments
1195
    if (min_count is not None and max_count is not None and
1196
        min_count == max_count and len(args) != min_count):
1197
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1198
      return False
1199

    
1200
    # Command with limited number of arguments
1201
    if max_count is not None and len(args) > max_count:
1202
      ToStderr("Error: Command %s expects only %d argument(s)",
1203
               cmd, max_count)
1204
      return False
1205

    
1206
  # Command with some required arguments
1207
  if min_count is not None and len(args) < min_count:
1208
    ToStderr("Error: Command %s expects at least %d argument(s)",
1209
             cmd, min_count)
1210
    return False
1211

    
1212
  return True
1213

    
1214

    
1215
def SplitNodeOption(value):
1216
  """Splits the value of a --node option.
1217

1218
  """
1219
  if value and ':' in value:
1220
    return value.split(':', 1)
1221
  else:
1222
    return (value, None)
1223

    
1224

    
1225
def CalculateOSNames(os_name, os_variants):
1226
  """Calculates all the names an OS can be called, according to its variants.
1227

1228
  @type os_name: string
1229
  @param os_name: base name of the os
1230
  @type os_variants: list or None
1231
  @param os_variants: list of supported variants
1232
  @rtype: list
1233
  @return: list of valid names
1234

1235
  """
1236
  if os_variants:
1237
    return ['%s+%s' % (os_name, v) for v in os_variants]
1238
  else:
1239
    return [os_name]
1240

    
1241

    
1242
def ParseFields(selected, default):
1243
  """Parses the values of "--field"-like options.
1244

1245
  @type selected: string or None
1246
  @param selected: User-selected options
1247
  @type default: list
1248
  @param default: Default fields
1249

1250
  """
1251
  if selected is None:
1252
    return default
1253

    
1254
  if selected.startswith("+"):
1255
    return default + selected[1:].split(",")
1256

    
1257
  return selected.split(",")
1258

    
1259

    
1260
UsesRPC = rpc.RunWithRPC
1261

    
1262

    
1263
def AskUser(text, choices=None):
1264
  """Ask the user a question.
1265

1266
  @param text: the question to ask
1267

1268
  @param choices: list with elements tuples (input_char, return_value,
1269
      description); if not given, it will default to: [('y', True,
1270
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1271
      note that the '?' char is reserved for help
1272

1273
  @return: one of the return values from the choices list; if input is
1274
      not possible (i.e. not running with a tty, we return the last
1275
      entry from the list
1276

1277
  """
1278
  if choices is None:
1279
    choices = [('y', True, 'Perform the operation'),
1280
               ('n', False, 'Do not perform the operation')]
1281
  if not choices or not isinstance(choices, list):
1282
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1283
  for entry in choices:
1284
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1285
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1286

    
1287
  answer = choices[-1][1]
1288
  new_text = []
1289
  for line in text.splitlines():
1290
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1291
  text = "\n".join(new_text)
1292
  try:
1293
    f = file("/dev/tty", "a+")
1294
  except IOError:
1295
    return answer
1296
  try:
1297
    chars = [entry[0] for entry in choices]
1298
    chars[-1] = "[%s]" % chars[-1]
1299
    chars.append('?')
1300
    maps = dict([(entry[0], entry[1]) for entry in choices])
1301
    while True:
1302
      f.write(text)
1303
      f.write('\n')
1304
      f.write("/".join(chars))
1305
      f.write(": ")
1306
      line = f.readline(2).strip().lower()
1307
      if line in maps:
1308
        answer = maps[line]
1309
        break
1310
      elif line == '?':
1311
        for entry in choices:
1312
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1313
        f.write("\n")
1314
        continue
1315
  finally:
1316
    f.close()
1317
  return answer
1318

    
1319

    
1320
class JobSubmittedException(Exception):
1321
  """Job was submitted, client should exit.
1322

1323
  This exception has one argument, the ID of the job that was
1324
  submitted. The handler should print this ID.
1325

1326
  This is not an error, just a structured way to exit from clients.
1327

1328
  """
1329

    
1330

    
1331
def SendJob(ops, cl=None):
1332
  """Function to submit an opcode without waiting for the results.
1333

1334
  @type ops: list
1335
  @param ops: list of opcodes
1336
  @type cl: luxi.Client
1337
  @param cl: the luxi client to use for communicating with the master;
1338
             if None, a new client will be created
1339

1340
  """
1341
  if cl is None:
1342
    cl = GetClient()
1343

    
1344
  job_id = cl.SubmitJob(ops)
1345

    
1346
  return job_id
1347

    
1348

    
1349
def GenericPollJob(job_id, cbs, report_cbs):
1350
  """Generic job-polling function.
1351

1352
  @type job_id: number
1353
  @param job_id: Job ID
1354
  @type cbs: Instance of L{JobPollCbBase}
1355
  @param cbs: Data callbacks
1356
  @type report_cbs: Instance of L{JobPollReportCbBase}
1357
  @param report_cbs: Reporting callbacks
1358

1359
  """
1360
  prev_job_info = None
1361
  prev_logmsg_serial = None
1362

    
1363
  status = None
1364

    
1365
  while True:
1366
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1367
                                      prev_logmsg_serial)
1368
    if not result:
1369
      # job not found, go away!
1370
      raise errors.JobLost("Job with id %s lost" % job_id)
1371

    
1372
    if result == constants.JOB_NOTCHANGED:
1373
      report_cbs.ReportNotChanged(job_id, status)
1374

    
1375
      # Wait again
1376
      continue
1377

    
1378
    # Split result, a tuple of (field values, log entries)
1379
    (job_info, log_entries) = result
1380
    (status, ) = job_info
1381

    
1382
    if log_entries:
1383
      for log_entry in log_entries:
1384
        (serial, timestamp, log_type, message) = log_entry
1385
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1386
                                    log_type, message)
1387
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1388

    
1389
    # TODO: Handle canceled and archived jobs
1390
    elif status in (constants.JOB_STATUS_SUCCESS,
1391
                    constants.JOB_STATUS_ERROR,
1392
                    constants.JOB_STATUS_CANCELING,
1393
                    constants.JOB_STATUS_CANCELED):
1394
      break
1395

    
1396
    prev_job_info = job_info
1397

    
1398
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1399
  if not jobs:
1400
    raise errors.JobLost("Job with id %s lost" % job_id)
1401

    
1402
  status, opstatus, result = jobs[0]
1403

    
1404
  if status == constants.JOB_STATUS_SUCCESS:
1405
    return result
1406

    
1407
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1408
    raise errors.OpExecError("Job was canceled")
1409

    
1410
  has_ok = False
1411
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1412
    if status == constants.OP_STATUS_SUCCESS:
1413
      has_ok = True
1414
    elif status == constants.OP_STATUS_ERROR:
1415
      errors.MaybeRaise(msg)
1416

    
1417
      if has_ok:
1418
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1419
                                 (idx, msg))
1420

    
1421
      raise errors.OpExecError(str(msg))
1422

    
1423
  # default failure mode
1424
  raise errors.OpExecError(result)
1425

    
1426

    
1427
class JobPollCbBase:
1428
  """Base class for L{GenericPollJob} callbacks.
1429

1430
  """
1431
  def __init__(self):
1432
    """Initializes this class.
1433

1434
    """
1435

    
1436
  def WaitForJobChangeOnce(self, job_id, fields,
1437
                           prev_job_info, prev_log_serial):
1438
    """Waits for changes on a job.
1439

1440
    """
1441
    raise NotImplementedError()
1442

    
1443
  def QueryJobs(self, job_ids, fields):
1444
    """Returns the selected fields for the selected job IDs.
1445

1446
    @type job_ids: list of numbers
1447
    @param job_ids: Job IDs
1448
    @type fields: list of strings
1449
    @param fields: Fields
1450

1451
    """
1452
    raise NotImplementedError()
1453

    
1454

    
1455
class JobPollReportCbBase:
1456
  """Base class for L{GenericPollJob} reporting callbacks.
1457

1458
  """
1459
  def __init__(self):
1460
    """Initializes this class.
1461

1462
    """
1463

    
1464
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1465
    """Handles a log message.
1466

1467
    """
1468
    raise NotImplementedError()
1469

    
1470
  def ReportNotChanged(self, job_id, status):
1471
    """Called for if a job hasn't changed in a while.
1472

1473
    @type job_id: number
1474
    @param job_id: Job ID
1475
    @type status: string or None
1476
    @param status: Job status if available
1477

1478
    """
1479
    raise NotImplementedError()
1480

    
1481

    
1482
class _LuxiJobPollCb(JobPollCbBase):
1483
  def __init__(self, cl):
1484
    """Initializes this class.
1485

1486
    """
1487
    JobPollCbBase.__init__(self)
1488
    self.cl = cl
1489

    
1490
  def WaitForJobChangeOnce(self, job_id, fields,
1491
                           prev_job_info, prev_log_serial):
1492
    """Waits for changes on a job.
1493

1494
    """
1495
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1496
                                        prev_job_info, prev_log_serial)
1497

    
1498
  def QueryJobs(self, job_ids, fields):
1499
    """Returns the selected fields for the selected job IDs.
1500

1501
    """
1502
    return self.cl.QueryJobs(job_ids, fields)
1503

    
1504

    
1505
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1506
  def __init__(self, feedback_fn):
1507
    """Initializes this class.
1508

1509
    """
1510
    JobPollReportCbBase.__init__(self)
1511

    
1512
    self.feedback_fn = feedback_fn
1513

    
1514
    assert callable(feedback_fn)
1515

    
1516
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1517
    """Handles a log message.
1518

1519
    """
1520
    self.feedback_fn((timestamp, log_type, log_msg))
1521

    
1522
  def ReportNotChanged(self, job_id, status):
1523
    """Called if a job hasn't changed in a while.
1524

1525
    """
1526
    # Ignore
1527

    
1528

    
1529
class StdioJobPollReportCb(JobPollReportCbBase):
1530
  def __init__(self):
1531
    """Initializes this class.
1532

1533
    """
1534
    JobPollReportCbBase.__init__(self)
1535

    
1536
    self.notified_queued = False
1537
    self.notified_waitlock = False
1538

    
1539
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1540
    """Handles a log message.
1541

1542
    """
1543
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1544
             FormatLogMessage(log_type, log_msg))
1545

    
1546
  def ReportNotChanged(self, job_id, status):
1547
    """Called if a job hasn't changed in a while.
1548

1549
    """
1550
    if status is None:
1551
      return
1552

    
1553
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1554
      ToStderr("Job %s is waiting in queue", job_id)
1555
      self.notified_queued = True
1556

    
1557
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1558
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1559
      self.notified_waitlock = True
1560

    
1561

    
1562
def FormatLogMessage(log_type, log_msg):
1563
  """Formats a job message according to its type.
1564

1565
  """
1566
  if log_type != constants.ELOG_MESSAGE:
1567
    log_msg = str(log_msg)
1568

    
1569
  return utils.SafeEncode(log_msg)
1570

    
1571

    
1572
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1573
  """Function to poll for the result of a job.
1574

1575
  @type job_id: job identified
1576
  @param job_id: the job to poll for results
1577
  @type cl: luxi.Client
1578
  @param cl: the luxi client to use for communicating with the master;
1579
             if None, a new client will be created
1580

1581
  """
1582
  if cl is None:
1583
    cl = GetClient()
1584

    
1585
  if reporter is None:
1586
    if feedback_fn:
1587
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1588
    else:
1589
      reporter = StdioJobPollReportCb()
1590
  elif feedback_fn:
1591
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1592

    
1593
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1594

    
1595

    
1596
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1597
  """Legacy function to submit an opcode.
1598

1599
  This is just a simple wrapper over the construction of the processor
1600
  instance. It should be extended to better handle feedback and
1601
  interaction functions.
1602

1603
  """
1604
  if cl is None:
1605
    cl = GetClient()
1606

    
1607
  SetGenericOpcodeOpts([op], opts)
1608

    
1609
  job_id = SendJob([op], cl=cl)
1610

    
1611
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1612
                       reporter=reporter)
1613

    
1614
  return op_results[0]
1615

    
1616

    
1617
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1618
  """Wrapper around SubmitOpCode or SendJob.
1619

1620
  This function will decide, based on the 'opts' parameter, whether to
1621
  submit and wait for the result of the opcode (and return it), or
1622
  whether to just send the job and print its identifier. It is used in
1623
  order to simplify the implementation of the '--submit' option.
1624

1625
  It will also process the opcodes if we're sending the via SendJob
1626
  (otherwise SubmitOpCode does it).
1627

1628
  """
1629
  if opts and opts.submit_only:
1630
    job = [op]
1631
    SetGenericOpcodeOpts(job, opts)
1632
    job_id = SendJob(job, cl=cl)
1633
    raise JobSubmittedException(job_id)
1634
  else:
1635
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1636

    
1637

    
1638
def SetGenericOpcodeOpts(opcode_list, options):
1639
  """Processor for generic options.
1640

1641
  This function updates the given opcodes based on generic command
1642
  line options (like debug, dry-run, etc.).
1643

1644
  @param opcode_list: list of opcodes
1645
  @param options: command line options or None
1646
  @return: None (in-place modification)
1647

1648
  """
1649
  if not options:
1650
    return
1651
  for op in opcode_list:
1652
    if hasattr(options, "dry_run"):
1653
      op.dry_run = options.dry_run
1654
    op.debug_level = options.debug
1655

    
1656

    
1657
def GetClient():
1658
  # TODO: Cache object?
1659
  try:
1660
    client = luxi.Client()
1661
  except luxi.NoMasterError:
1662
    ss = ssconf.SimpleStore()
1663

    
1664
    # Try to read ssconf file
1665
    try:
1666
      ss.GetMasterNode()
1667
    except errors.ConfigurationError:
1668
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1669
                                 " not part of a cluster")
1670

    
1671
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1672
    if master != myself:
1673
      raise errors.OpPrereqError("This is not the master node, please connect"
1674
                                 " to node '%s' and rerun the command" %
1675
                                 master)
1676
    raise
1677
  return client
1678

    
1679

    
1680
def FormatError(err):
1681
  """Return a formatted error message for a given error.
1682

1683
  This function takes an exception instance and returns a tuple
1684
  consisting of two values: first, the recommended exit code, and
1685
  second, a string describing the error message (not
1686
  newline-terminated).
1687

1688
  """
1689
  retcode = 1
1690
  obuf = StringIO()
1691
  msg = str(err)
1692
  if isinstance(err, errors.ConfigurationError):
1693
    txt = "Corrupt configuration file: %s" % msg
1694
    logging.error(txt)
1695
    obuf.write(txt + "\n")
1696
    obuf.write("Aborting.")
1697
    retcode = 2
1698
  elif isinstance(err, errors.HooksAbort):
1699
    obuf.write("Failure: hooks execution failed:\n")
1700
    for node, script, out in err.args[0]:
1701
      if out:
1702
        obuf.write("  node: %s, script: %s, output: %s\n" %
1703
                   (node, script, out))
1704
      else:
1705
        obuf.write("  node: %s, script: %s (no output)\n" %
1706
                   (node, script))
1707
  elif isinstance(err, errors.HooksFailure):
1708
    obuf.write("Failure: hooks general failure: %s" % msg)
1709
  elif isinstance(err, errors.ResolverError):
1710
    this_host = netutils.Hostname.GetSysName()
1711
    if err.args[0] == this_host:
1712
      msg = "Failure: can't resolve my own hostname ('%s')"
1713
    else:
1714
      msg = "Failure: can't resolve hostname '%s'"
1715
    obuf.write(msg % err.args[0])
1716
  elif isinstance(err, errors.OpPrereqError):
1717
    if len(err.args) == 2:
1718
      obuf.write("Failure: prerequisites not met for this"
1719
               " operation:\nerror type: %s, error details:\n%s" %
1720
                 (err.args[1], err.args[0]))
1721
    else:
1722
      obuf.write("Failure: prerequisites not met for this"
1723
                 " operation:\n%s" % msg)
1724
  elif isinstance(err, errors.OpExecError):
1725
    obuf.write("Failure: command execution error:\n%s" % msg)
1726
  elif isinstance(err, errors.TagError):
1727
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1728
  elif isinstance(err, errors.JobQueueDrainError):
1729
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1730
               " accept new requests\n")
1731
  elif isinstance(err, errors.JobQueueFull):
1732
    obuf.write("Failure: the job queue is full and doesn't accept new"
1733
               " job submissions until old jobs are archived\n")
1734
  elif isinstance(err, errors.TypeEnforcementError):
1735
    obuf.write("Parameter Error: %s" % msg)
1736
  elif isinstance(err, errors.ParameterError):
1737
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1738
  elif isinstance(err, luxi.NoMasterError):
1739
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1740
               " and listening for connections?")
1741
  elif isinstance(err, luxi.TimeoutError):
1742
    obuf.write("Timeout while talking to the master daemon. Error:\n"
1743
               "%s" % msg)
1744
  elif isinstance(err, luxi.PermissionError):
1745
    obuf.write("It seems you don't have permissions to connect to the"
1746
               " master daemon.\nPlease retry as a different user.")
1747
  elif isinstance(err, luxi.ProtocolError):
1748
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1749
               "%s" % msg)
1750
  elif isinstance(err, errors.JobLost):
1751
    obuf.write("Error checking job status: %s" % msg)
1752
  elif isinstance(err, errors.GenericError):
1753
    obuf.write("Unhandled Ganeti error: %s" % msg)
1754
  elif isinstance(err, JobSubmittedException):
1755
    obuf.write("JobID: %s\n" % err.args[0])
1756
    retcode = 0
1757
  else:
1758
    obuf.write("Unhandled exception: %s" % msg)
1759
  return retcode, obuf.getvalue().rstrip('\n')
1760

    
1761

    
1762
def GenericMain(commands, override=None, aliases=None):
1763
  """Generic main function for all the gnt-* commands.
1764

1765
  Arguments:
1766
    - commands: a dictionary with a special structure, see the design doc
1767
                for command line handling.
1768
    - override: if not None, we expect a dictionary with keys that will
1769
                override command line options; this can be used to pass
1770
                options from the scripts to generic functions
1771
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1772

1773
  """
1774
  # save the program name and the entire command line for later logging
1775
  if sys.argv:
1776
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1777
    if len(sys.argv) >= 2:
1778
      binary += " " + sys.argv[1]
1779
      old_cmdline = " ".join(sys.argv[2:])
1780
    else:
1781
      old_cmdline = ""
1782
  else:
1783
    binary = "<unknown program>"
1784
    old_cmdline = ""
1785

    
1786
  if aliases is None:
1787
    aliases = {}
1788

    
1789
  try:
1790
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1791
  except errors.ParameterError, err:
1792
    result, err_msg = FormatError(err)
1793
    ToStderr(err_msg)
1794
    return 1
1795

    
1796
  if func is None: # parse error
1797
    return 1
1798

    
1799
  if override is not None:
1800
    for key, val in override.iteritems():
1801
      setattr(options, key, val)
1802

    
1803
  utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1804
                     stderr_logging=True, program=binary)
1805

    
1806
  if old_cmdline:
1807
    logging.info("run with arguments '%s'", old_cmdline)
1808
  else:
1809
    logging.info("run with no arguments")
1810

    
1811
  try:
1812
    result = func(options, args)
1813
  except (errors.GenericError, luxi.ProtocolError,
1814
          JobSubmittedException), err:
1815
    result, err_msg = FormatError(err)
1816
    logging.exception("Error during command processing")
1817
    ToStderr(err_msg)
1818

    
1819
  return result
1820

    
1821

    
1822
def GenericInstanceCreate(mode, opts, args):
1823
  """Add an instance to the cluster via either creation or import.
1824

1825
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1826
  @param opts: the command line options selected by the user
1827
  @type args: list
1828
  @param args: should contain only one element, the new instance name
1829
  @rtype: int
1830
  @return: the desired exit code
1831

1832
  """
1833
  instance = args[0]
1834

    
1835
  (pnode, snode) = SplitNodeOption(opts.node)
1836

    
1837
  hypervisor = None
1838
  hvparams = {}
1839
  if opts.hypervisor:
1840
    hypervisor, hvparams = opts.hypervisor
1841

    
1842
  if opts.nics:
1843
    try:
1844
      nic_max = max(int(nidx[0]) + 1 for nidx in opts.nics)
1845
    except ValueError, err:
1846
      raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1847
    nics = [{}] * nic_max
1848
    for nidx, ndict in opts.nics:
1849
      nidx = int(nidx)
1850
      if not isinstance(ndict, dict):
1851
        msg = "Invalid nic/%d value: expected dict, got %s" % (nidx, ndict)
1852
        raise errors.OpPrereqError(msg)
1853
      nics[nidx] = ndict
1854
  elif opts.no_nics:
1855
    # no nics
1856
    nics = []
1857
  elif mode == constants.INSTANCE_CREATE:
1858
    # default of one nic, all auto
1859
    nics = [{}]
1860
  else:
1861
    # mode == import
1862
    nics = []
1863

    
1864
  if opts.disk_template == constants.DT_DISKLESS:
1865
    if opts.disks or opts.sd_size is not None:
1866
      raise errors.OpPrereqError("Diskless instance but disk"
1867
                                 " information passed")
1868
    disks = []
1869
  else:
1870
    if (not opts.disks and not opts.sd_size
1871
        and mode == constants.INSTANCE_CREATE):
1872
      raise errors.OpPrereqError("No disk information specified")
1873
    if opts.disks and opts.sd_size is not None:
1874
      raise errors.OpPrereqError("Please use either the '--disk' or"
1875
                                 " '-s' option")
1876
    if opts.sd_size is not None:
1877
      opts.disks = [(0, {"size": opts.sd_size})]
1878

    
1879
    if opts.disks:
1880
      try:
1881
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
1882
      except ValueError, err:
1883
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
1884
      disks = [{}] * disk_max
1885
    else:
1886
      disks = []
1887
    for didx, ddict in opts.disks:
1888
      didx = int(didx)
1889
      if not isinstance(ddict, dict):
1890
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
1891
        raise errors.OpPrereqError(msg)
1892
      elif "size" in ddict:
1893
        if "adopt" in ddict:
1894
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
1895
                                     " (disk %d)" % didx)
1896
        try:
1897
          ddict["size"] = utils.ParseUnit(ddict["size"])
1898
        except ValueError, err:
1899
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
1900
                                     (didx, err))
1901
      elif "adopt" in ddict:
1902
        if mode == constants.INSTANCE_IMPORT:
1903
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
1904
                                     " import")
1905
        ddict["size"] = 0
1906
      else:
1907
        raise errors.OpPrereqError("Missing size or adoption source for"
1908
                                   " disk %d" % didx)
1909
      disks[didx] = ddict
1910

    
1911
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
1912
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
1913

    
1914
  if mode == constants.INSTANCE_CREATE:
1915
    start = opts.start
1916
    os_type = opts.os
1917
    force_variant = opts.force_variant
1918
    src_node = None
1919
    src_path = None
1920
    no_install = opts.no_install
1921
    identify_defaults = False
1922
  elif mode == constants.INSTANCE_IMPORT:
1923
    start = False
1924
    os_type = None
1925
    force_variant = False
1926
    src_node = opts.src_node
1927
    src_path = opts.src_dir
1928
    no_install = None
1929
    identify_defaults = opts.identify_defaults
1930
  else:
1931
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
1932

    
1933
  op = opcodes.OpCreateInstance(instance_name=instance,
1934
                                disks=disks,
1935
                                disk_template=opts.disk_template,
1936
                                nics=nics,
1937
                                pnode=pnode, snode=snode,
1938
                                ip_check=opts.ip_check,
1939
                                name_check=opts.name_check,
1940
                                wait_for_sync=opts.wait_for_sync,
1941
                                file_storage_dir=opts.file_storage_dir,
1942
                                file_driver=opts.file_driver,
1943
                                iallocator=opts.iallocator,
1944
                                hypervisor=hypervisor,
1945
                                hvparams=hvparams,
1946
                                beparams=opts.beparams,
1947
                                osparams=opts.osparams,
1948
                                mode=mode,
1949
                                start=start,
1950
                                os_type=os_type,
1951
                                force_variant=force_variant,
1952
                                src_node=src_node,
1953
                                src_path=src_path,
1954
                                no_install=no_install,
1955
                                identify_defaults=identify_defaults)
1956

    
1957
  SubmitOrSend(op, opts)
1958
  return 0
1959

    
1960

    
1961
class _RunWhileClusterStoppedHelper:
1962
  """Helper class for L{RunWhileClusterStopped} to simplify state management
1963

1964
  """
1965
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
1966
    """Initializes this class.
1967

1968
    @type feedback_fn: callable
1969
    @param feedback_fn: Feedback function
1970
    @type cluster_name: string
1971
    @param cluster_name: Cluster name
1972
    @type master_node: string
1973
    @param master_node Master node name
1974
    @type online_nodes: list
1975
    @param online_nodes: List of names of online nodes
1976

1977
    """
1978
    self.feedback_fn = feedback_fn
1979
    self.cluster_name = cluster_name
1980
    self.master_node = master_node
1981
    self.online_nodes = online_nodes
1982

    
1983
    self.ssh = ssh.SshRunner(self.cluster_name)
1984

    
1985
    self.nonmaster_nodes = [name for name in online_nodes
1986
                            if name != master_node]
1987

    
1988
    assert self.master_node not in self.nonmaster_nodes
1989

    
1990
  def _RunCmd(self, node_name, cmd):
1991
    """Runs a command on the local or a remote machine.
1992

1993
    @type node_name: string
1994
    @param node_name: Machine name
1995
    @type cmd: list
1996
    @param cmd: Command
1997

1998
    """
1999
    if node_name is None or node_name == self.master_node:
2000
      # No need to use SSH
2001
      result = utils.RunCmd(cmd)
2002
    else:
2003
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2004

    
2005
    if result.failed:
2006
      errmsg = ["Failed to run command %s" % result.cmd]
2007
      if node_name:
2008
        errmsg.append("on node %s" % node_name)
2009
      errmsg.append(": exitcode %s and error %s" %
2010
                    (result.exit_code, result.output))
2011
      raise errors.OpExecError(" ".join(errmsg))
2012

    
2013
  def Call(self, fn, *args):
2014
    """Call function while all daemons are stopped.
2015

2016
    @type fn: callable
2017
    @param fn: Function to be called
2018

2019
    """
2020
    # Pause watcher by acquiring an exclusive lock on watcher state file
2021
    self.feedback_fn("Blocking watcher")
2022
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2023
    try:
2024
      # TODO: Currently, this just blocks. There's no timeout.
2025
      # TODO: Should it be a shared lock?
2026
      watcher_block.Exclusive(blocking=True)
2027

    
2028
      # Stop master daemons, so that no new jobs can come in and all running
2029
      # ones are finished
2030
      self.feedback_fn("Stopping master daemons")
2031
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2032
      try:
2033
        # Stop daemons on all nodes
2034
        for node_name in self.online_nodes:
2035
          self.feedback_fn("Stopping daemons on %s" % node_name)
2036
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2037

    
2038
        # All daemons are shut down now
2039
        try:
2040
          return fn(self, *args)
2041
        except Exception, err:
2042
          _, errmsg = FormatError(err)
2043
          logging.exception("Caught exception")
2044
          self.feedback_fn(errmsg)
2045
          raise
2046
      finally:
2047
        # Start cluster again, master node last
2048
        for node_name in self.nonmaster_nodes + [self.master_node]:
2049
          self.feedback_fn("Starting daemons on %s" % node_name)
2050
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2051
    finally:
2052
      # Resume watcher
2053
      watcher_block.Close()
2054

    
2055

    
2056
def RunWhileClusterStopped(feedback_fn, fn, *args):
2057
  """Calls a function while all cluster daemons are stopped.
2058

2059
  @type feedback_fn: callable
2060
  @param feedback_fn: Feedback function
2061
  @type fn: callable
2062
  @param fn: Function to be called when daemons are stopped
2063

2064
  """
2065
  feedback_fn("Gathering cluster information")
2066

    
2067
  # This ensures we're running on the master daemon
2068
  cl = GetClient()
2069

    
2070
  (cluster_name, master_node) = \
2071
    cl.QueryConfigValues(["cluster_name", "master_node"])
2072

    
2073
  online_nodes = GetOnlineNodes([], cl=cl)
2074

    
2075
  # Don't keep a reference to the client. The master daemon will go away.
2076
  del cl
2077

    
2078
  assert master_node in online_nodes
2079

    
2080
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2081
                                       online_nodes).Call(fn, *args)
2082

    
2083

    
2084
def GenerateTable(headers, fields, separator, data,
2085
                  numfields=None, unitfields=None,
2086
                  units=None):
2087
  """Prints a table with headers and different fields.
2088

2089
  @type headers: dict
2090
  @param headers: dictionary mapping field names to headers for
2091
      the table
2092
  @type fields: list
2093
  @param fields: the field names corresponding to each row in
2094
      the data field
2095
  @param separator: the separator to be used; if this is None,
2096
      the default 'smart' algorithm is used which computes optimal
2097
      field width, otherwise just the separator is used between
2098
      each field
2099
  @type data: list
2100
  @param data: a list of lists, each sublist being one row to be output
2101
  @type numfields: list
2102
  @param numfields: a list with the fields that hold numeric
2103
      values and thus should be right-aligned
2104
  @type unitfields: list
2105
  @param unitfields: a list with the fields that hold numeric
2106
      values that should be formatted with the units field
2107
  @type units: string or None
2108
  @param units: the units we should use for formatting, or None for
2109
      automatic choice (human-readable for non-separator usage, otherwise
2110
      megabytes); this is a one-letter string
2111

2112
  """
2113
  if units is None:
2114
    if separator:
2115
      units = "m"
2116
    else:
2117
      units = "h"
2118

    
2119
  if numfields is None:
2120
    numfields = []
2121
  if unitfields is None:
2122
    unitfields = []
2123

    
2124
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2125
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2126

    
2127
  format_fields = []
2128
  for field in fields:
2129
    if headers and field not in headers:
2130
      # TODO: handle better unknown fields (either revert to old
2131
      # style of raising exception, or deal more intelligently with
2132
      # variable fields)
2133
      headers[field] = field
2134
    if separator is not None:
2135
      format_fields.append("%s")
2136
    elif numfields.Matches(field):
2137
      format_fields.append("%*s")
2138
    else:
2139
      format_fields.append("%-*s")
2140

    
2141
  if separator is None:
2142
    mlens = [0 for name in fields]
2143
    format_str = ' '.join(format_fields)
2144
  else:
2145
    format_str = separator.replace("%", "%%").join(format_fields)
2146

    
2147
  for row in data:
2148
    if row is None:
2149
      continue
2150
    for idx, val in enumerate(row):
2151
      if unitfields.Matches(fields[idx]):
2152
        try:
2153
          val = int(val)
2154
        except (TypeError, ValueError):
2155
          pass
2156
        else:
2157
          val = row[idx] = utils.FormatUnit(val, units)
2158
      val = row[idx] = str(val)
2159
      if separator is None:
2160
        mlens[idx] = max(mlens[idx], len(val))
2161

    
2162
  result = []
2163
  if headers:
2164
    args = []
2165
    for idx, name in enumerate(fields):
2166
      hdr = headers[name]
2167
      if separator is None:
2168
        mlens[idx] = max(mlens[idx], len(hdr))
2169
        args.append(mlens[idx])
2170
      args.append(hdr)
2171
    result.append(format_str % tuple(args))
2172

    
2173
  if separator is None:
2174
    assert len(mlens) == len(fields)
2175

    
2176
    if fields and not numfields.Matches(fields[-1]):
2177
      mlens[-1] = 0
2178

    
2179
  for line in data:
2180
    args = []
2181
    if line is None:
2182
      line = ['-' for _ in fields]
2183
    for idx in range(len(fields)):
2184
      if separator is None:
2185
        args.append(mlens[idx])
2186
      args.append(line[idx])
2187
    result.append(format_str % tuple(args))
2188

    
2189
  return result
2190

    
2191

    
2192
def FormatTimestamp(ts):
2193
  """Formats a given timestamp.
2194

2195
  @type ts: timestamp
2196
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2197

2198
  @rtype: string
2199
  @return: a string with the formatted timestamp
2200

2201
  """
2202
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2203
    return '?'
2204
  sec, usec = ts
2205
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2206

    
2207

    
2208
def ParseTimespec(value):
2209
  """Parse a time specification.
2210

2211
  The following suffixed will be recognized:
2212

2213
    - s: seconds
2214
    - m: minutes
2215
    - h: hours
2216
    - d: day
2217
    - w: weeks
2218

2219
  Without any suffix, the value will be taken to be in seconds.
2220

2221
  """
2222
  value = str(value)
2223
  if not value:
2224
    raise errors.OpPrereqError("Empty time specification passed")
2225
  suffix_map = {
2226
    's': 1,
2227
    'm': 60,
2228
    'h': 3600,
2229
    'd': 86400,
2230
    'w': 604800,
2231
    }
2232
  if value[-1] not in suffix_map:
2233
    try:
2234
      value = int(value)
2235
    except (TypeError, ValueError):
2236
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2237
  else:
2238
    multiplier = suffix_map[value[-1]]
2239
    value = value[:-1]
2240
    if not value: # no data left after stripping the suffix
2241
      raise errors.OpPrereqError("Invalid time specification (only"
2242
                                 " suffix passed)")
2243
    try:
2244
      value = int(value) * multiplier
2245
    except (TypeError, ValueError):
2246
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2247
  return value
2248

    
2249

    
2250
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2251
                   filter_master=False):
2252
  """Returns the names of online nodes.
2253

2254
  This function will also log a warning on stderr with the names of
2255
  the online nodes.
2256

2257
  @param nodes: if not empty, use only this subset of nodes (minus the
2258
      offline ones)
2259
  @param cl: if not None, luxi client to use
2260
  @type nowarn: boolean
2261
  @param nowarn: by default, this function will output a note with the
2262
      offline nodes that are skipped; if this parameter is True the
2263
      note is not displayed
2264
  @type secondary_ips: boolean
2265
  @param secondary_ips: if True, return the secondary IPs instead of the
2266
      names, useful for doing network traffic over the replication interface
2267
      (if any)
2268
  @type filter_master: boolean
2269
  @param filter_master: if True, do not return the master node in the list
2270
      (useful in coordination with secondary_ips where we cannot check our
2271
      node name against the list)
2272

2273
  """
2274
  if cl is None:
2275
    cl = GetClient()
2276

    
2277
  if secondary_ips:
2278
    name_idx = 2
2279
  else:
2280
    name_idx = 0
2281

    
2282
  if filter_master:
2283
    master_node = cl.QueryConfigValues(["master_node"])[0]
2284
    filter_fn = lambda x: x != master_node
2285
  else:
2286
    filter_fn = lambda _: True
2287

    
2288
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2289
                         use_locking=False)
2290
  offline = [row[0] for row in result if row[1]]
2291
  if offline and not nowarn:
2292
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2293
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2294

    
2295

    
2296
def _ToStream(stream, txt, *args):
2297
  """Write a message to a stream, bypassing the logging system
2298

2299
  @type stream: file object
2300
  @param stream: the file to which we should write
2301
  @type txt: str
2302
  @param txt: the message
2303

2304
  """
2305
  if args:
2306
    args = tuple(args)
2307
    stream.write(txt % args)
2308
  else:
2309
    stream.write(txt)
2310
  stream.write('\n')
2311
  stream.flush()
2312

    
2313

    
2314
def ToStdout(txt, *args):
2315
  """Write a message to stdout only, bypassing the logging system
2316

2317
  This is just a wrapper over _ToStream.
2318

2319
  @type txt: str
2320
  @param txt: the message
2321

2322
  """
2323
  _ToStream(sys.stdout, txt, *args)
2324

    
2325

    
2326
def ToStderr(txt, *args):
2327
  """Write a message to stderr only, bypassing the logging system
2328

2329
  This is just a wrapper over _ToStream.
2330

2331
  @type txt: str
2332
  @param txt: the message
2333

2334
  """
2335
  _ToStream(sys.stderr, txt, *args)
2336

    
2337

    
2338
class JobExecutor(object):
2339
  """Class which manages the submission and execution of multiple jobs.
2340

2341
  Note that instances of this class should not be reused between
2342
  GetResults() calls.
2343

2344
  """
2345
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2346
    self.queue = []
2347
    if cl is None:
2348
      cl = GetClient()
2349
    self.cl = cl
2350
    self.verbose = verbose
2351
    self.jobs = []
2352
    self.opts = opts
2353
    self.feedback_fn = feedback_fn
2354

    
2355
  def QueueJob(self, name, *ops):
2356
    """Record a job for later submit.
2357

2358
    @type name: string
2359
    @param name: a description of the job, will be used in WaitJobSet
2360
    """
2361
    SetGenericOpcodeOpts(ops, self.opts)
2362
    self.queue.append((name, ops))
2363

    
2364
  def SubmitPending(self, each=False):
2365
    """Submit all pending jobs.
2366

2367
    """
2368
    if each:
2369
      results = []
2370
      for row in self.queue:
2371
        # SubmitJob will remove the success status, but raise an exception if
2372
        # the submission fails, so we'll notice that anyway.
2373
        results.append([True, self.cl.SubmitJob(row[1])])
2374
    else:
2375
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2376
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2377
                                                            self.queue)):
2378
      self.jobs.append((idx, status, data, name))
2379

    
2380
  def _ChooseJob(self):
2381
    """Choose a non-waiting/queued job to poll next.
2382

2383
    """
2384
    assert self.jobs, "_ChooseJob called with empty job list"
2385

    
2386
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2387
    assert result
2388

    
2389
    for job_data, status in zip(self.jobs, result):
2390
      if (isinstance(status, list) and status and
2391
          status[0] in (constants.JOB_STATUS_QUEUED,
2392
                        constants.JOB_STATUS_WAITLOCK,
2393
                        constants.JOB_STATUS_CANCELING)):
2394
        # job is still present and waiting
2395
        continue
2396
      # good candidate found (either running job or lost job)
2397
      self.jobs.remove(job_data)
2398
      return job_data
2399

    
2400
    # no job found
2401
    return self.jobs.pop(0)
2402

    
2403
  def GetResults(self):
2404
    """Wait for and return the results of all jobs.
2405

2406
    @rtype: list
2407
    @return: list of tuples (success, job results), in the same order
2408
        as the submitted jobs; if a job has failed, instead of the result
2409
        there will be the error message
2410

2411
    """
2412
    if not self.jobs:
2413
      self.SubmitPending()
2414
    results = []
2415
    if self.verbose:
2416
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2417
      if ok_jobs:
2418
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2419

    
2420
    # first, remove any non-submitted jobs
2421
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2422
    for idx, _, jid, name in failures:
2423
      ToStderr("Failed to submit job for %s: %s", name, jid)
2424
      results.append((idx, False, jid))
2425

    
2426
    while self.jobs:
2427
      (idx, _, jid, name) = self._ChooseJob()
2428
      ToStdout("Waiting for job %s for %s...", jid, name)
2429
      try:
2430
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2431
        success = True
2432
      except errors.JobLost, err:
2433
        _, job_result = FormatError(err)
2434
        ToStderr("Job %s for %s has been archived, cannot check its result",
2435
                 jid, name)
2436
        success = False
2437
      except (errors.GenericError, luxi.ProtocolError), err:
2438
        _, job_result = FormatError(err)
2439
        success = False
2440
        # the error message will always be shown, verbose or not
2441
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2442

    
2443
      results.append((idx, success, job_result))
2444

    
2445
    # sort based on the index, then drop it
2446
    results.sort()
2447
    results = [i[1:] for i in results]
2448

    
2449
    return results
2450

    
2451
  def WaitOrShow(self, wait):
2452
    """Wait for job results or only print the job IDs.
2453

2454
    @type wait: boolean
2455
    @param wait: whether to wait or not
2456

2457
    """
2458
    if wait:
2459
      return self.GetResults()
2460
    else:
2461
      if not self.jobs:
2462
        self.SubmitPending()
2463
      for _, status, result, name in self.jobs:
2464
        if status:
2465
          ToStdout("%s: %s", result, name)
2466
        else:
2467
          ToStderr("Failure for %s: %s", name, result)
2468
      return [row[1:3] for row in self.jobs]