Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 19b9ba9a

History | View | Annotate | Download (78.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42

    
43
from optparse import (OptionParser, TitledHelpFormatter,
44
                      Option, OptionValueError)
45

    
46

    
47
__all__ = [
48
  # Command line options
49
  "ADD_UIDS_OPT",
50
  "ALLOCATABLE_OPT",
51
  "ALL_OPT",
52
  "AUTO_PROMOTE_OPT",
53
  "AUTO_REPLACE_OPT",
54
  "BACKEND_OPT",
55
  "CLEANUP_OPT",
56
  "CLUSTER_DOMAIN_SECRET_OPT",
57
  "CONFIRM_OPT",
58
  "CP_SIZE_OPT",
59
  "DEBUG_OPT",
60
  "DEBUG_SIMERR_OPT",
61
  "DISKIDX_OPT",
62
  "DISK_OPT",
63
  "DISK_TEMPLATE_OPT",
64
  "DRAINED_OPT",
65
  "DRY_RUN_OPT",
66
  "DRBD_HELPER_OPT",
67
  "EARLY_RELEASE_OPT",
68
  "ENABLED_HV_OPT",
69
  "ERROR_CODES_OPT",
70
  "FIELDS_OPT",
71
  "FILESTORE_DIR_OPT",
72
  "FILESTORE_DRIVER_OPT",
73
  "FORCE_OPT",
74
  "FORCE_VARIANT_OPT",
75
  "GLOBAL_FILEDIR_OPT",
76
  "HVLIST_OPT",
77
  "HVOPTS_OPT",
78
  "HYPERVISOR_OPT",
79
  "IALLOCATOR_OPT",
80
  "DEFAULT_IALLOCATOR_OPT",
81
  "IDENTIFY_DEFAULTS_OPT",
82
  "IGNORE_CONSIST_OPT",
83
  "IGNORE_FAILURES_OPT",
84
  "IGNORE_REMOVE_FAILURES_OPT",
85
  "IGNORE_SECONDARIES_OPT",
86
  "IGNORE_SIZE_OPT",
87
  "INTERVAL_OPT",
88
  "MAC_PREFIX_OPT",
89
  "MAINTAIN_NODE_HEALTH_OPT",
90
  "MASTER_NETDEV_OPT",
91
  "MC_OPT",
92
  "MIGRATION_MODE_OPT",
93
  "NET_OPT",
94
  "NEW_CLUSTER_CERT_OPT",
95
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
96
  "NEW_CONFD_HMAC_KEY_OPT",
97
  "NEW_RAPI_CERT_OPT",
98
  "NEW_SECONDARY_OPT",
99
  "NIC_PARAMS_OPT",
100
  "NODE_LIST_OPT",
101
  "NODE_PLACEMENT_OPT",
102
  "NODRBD_STORAGE_OPT",
103
  "NOHDR_OPT",
104
  "NOIPCHECK_OPT",
105
  "NO_INSTALL_OPT",
106
  "NONAMECHECK_OPT",
107
  "NOLVM_STORAGE_OPT",
108
  "NOMODIFY_ETCHOSTS_OPT",
109
  "NOMODIFY_SSH_SETUP_OPT",
110
  "NONICS_OPT",
111
  "NONLIVE_OPT",
112
  "NONPLUS1_OPT",
113
  "NOSHUTDOWN_OPT",
114
  "NOSTART_OPT",
115
  "NOSSH_KEYCHECK_OPT",
116
  "NOVOTING_OPT",
117
  "NWSYNC_OPT",
118
  "ON_PRIMARY_OPT",
119
  "ON_SECONDARY_OPT",
120
  "OFFLINE_OPT",
121
  "OSPARAMS_OPT",
122
  "OS_OPT",
123
  "OS_SIZE_OPT",
124
  "RAPI_CERT_OPT",
125
  "READD_OPT",
126
  "REBOOT_TYPE_OPT",
127
  "REMOVE_INSTANCE_OPT",
128
  "REMOVE_UIDS_OPT",
129
  "RESERVED_LVS_OPT",
130
  "ROMAN_OPT",
131
  "SECONDARY_IP_OPT",
132
  "SELECT_OS_OPT",
133
  "SEP_OPT",
134
  "SHOWCMD_OPT",
135
  "SHUTDOWN_TIMEOUT_OPT",
136
  "SINGLE_NODE_OPT",
137
  "SRC_DIR_OPT",
138
  "SRC_NODE_OPT",
139
  "SUBMIT_OPT",
140
  "STATIC_OPT",
141
  "SYNC_OPT",
142
  "TAG_SRC_OPT",
143
  "TIMEOUT_OPT",
144
  "UIDPOOL_OPT",
145
  "USEUNITS_OPT",
146
  "USE_REPL_NET_OPT",
147
  "VERBOSE_OPT",
148
  "VG_NAME_OPT",
149
  "YES_DOIT_OPT",
150
  # Generic functions for CLI programs
151
  "GenericMain",
152
  "GenericInstanceCreate",
153
  "GetClient",
154
  "GetOnlineNodes",
155
  "JobExecutor",
156
  "JobSubmittedException",
157
  "ParseTimespec",
158
  "RunWhileClusterStopped",
159
  "SubmitOpCode",
160
  "SubmitOrSend",
161
  "UsesRPC",
162
  # Formatting functions
163
  "ToStderr", "ToStdout",
164
  "FormatError",
165
  "GenerateTable",
166
  "AskUser",
167
  "FormatTimestamp",
168
  "FormatLogMessage",
169
  # Tags functions
170
  "ListTags",
171
  "AddTags",
172
  "RemoveTags",
173
  # command line options support infrastructure
174
  "ARGS_MANY_INSTANCES",
175
  "ARGS_MANY_NODES",
176
  "ARGS_NONE",
177
  "ARGS_ONE_INSTANCE",
178
  "ARGS_ONE_NODE",
179
  "ARGS_ONE_OS",
180
  "ArgChoice",
181
  "ArgCommand",
182
  "ArgFile",
183
  "ArgHost",
184
  "ArgInstance",
185
  "ArgJobId",
186
  "ArgNode",
187
  "ArgOs",
188
  "ArgSuggest",
189
  "ArgUnknown",
190
  "OPT_COMPL_INST_ADD_NODES",
191
  "OPT_COMPL_MANY_NODES",
192
  "OPT_COMPL_ONE_IALLOCATOR",
193
  "OPT_COMPL_ONE_INSTANCE",
194
  "OPT_COMPL_ONE_NODE",
195
  "OPT_COMPL_ONE_OS",
196
  "cli_option",
197
  "SplitNodeOption",
198
  "CalculateOSNames",
199
  "ParseFields",
200
  ]
201

    
202
NO_PREFIX = "no_"
203
UN_PREFIX = "-"
204

    
205

    
206
class _Argument:
207
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
208
    self.min = min
209
    self.max = max
210

    
211
  def __repr__(self):
212
    return ("<%s min=%s max=%s>" %
213
            (self.__class__.__name__, self.min, self.max))
214

    
215

    
216
class ArgSuggest(_Argument):
217
  """Suggesting argument.
218

219
  Value can be any of the ones passed to the constructor.
220

221
  """
222
  # pylint: disable-msg=W0622
223
  def __init__(self, min=0, max=None, choices=None):
224
    _Argument.__init__(self, min=min, max=max)
225
    self.choices = choices
226

    
227
  def __repr__(self):
228
    return ("<%s min=%s max=%s choices=%r>" %
229
            (self.__class__.__name__, self.min, self.max, self.choices))
230

    
231

    
232
class ArgChoice(ArgSuggest):
233
  """Choice argument.
234

235
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
236
  but value must be one of the choices.
237

238
  """
239

    
240

    
241
class ArgUnknown(_Argument):
242
  """Unknown argument to program (e.g. determined at runtime).
243

244
  """
245

    
246

    
247
class ArgInstance(_Argument):
248
  """Instances argument.
249

250
  """
251

    
252

    
253
class ArgNode(_Argument):
254
  """Node argument.
255

256
  """
257

    
258
class ArgJobId(_Argument):
259
  """Job ID argument.
260

261
  """
262

    
263

    
264
class ArgFile(_Argument):
265
  """File path argument.
266

267
  """
268

    
269

    
270
class ArgCommand(_Argument):
271
  """Command argument.
272

273
  """
274

    
275

    
276
class ArgHost(_Argument):
277
  """Host argument.
278

279
  """
280

    
281

    
282
class ArgOs(_Argument):
283
  """OS argument.
284

285
  """
286

    
287

    
288
ARGS_NONE = []
289
ARGS_MANY_INSTANCES = [ArgInstance()]
290
ARGS_MANY_NODES = [ArgNode()]
291
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
292
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
293
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
294

    
295

    
296
def _ExtractTagsObject(opts, args):
297
  """Extract the tag type object.
298

299
  Note that this function will modify its args parameter.
300

301
  """
302
  if not hasattr(opts, "tag_type"):
303
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
304
  kind = opts.tag_type
305
  if kind == constants.TAG_CLUSTER:
306
    retval = kind, kind
307
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
308
    if not args:
309
      raise errors.OpPrereqError("no arguments passed to the command")
310
    name = args.pop(0)
311
    retval = kind, name
312
  else:
313
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
314
  return retval
315

    
316

    
317
def _ExtendTags(opts, args):
318
  """Extend the args if a source file has been given.
319

320
  This function will extend the tags with the contents of the file
321
  passed in the 'tags_source' attribute of the opts parameter. A file
322
  named '-' will be replaced by stdin.
323

324
  """
325
  fname = opts.tags_source
326
  if fname is None:
327
    return
328
  if fname == "-":
329
    new_fh = sys.stdin
330
  else:
331
    new_fh = open(fname, "r")
332
  new_data = []
333
  try:
334
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
335
    # because of python bug 1633941
336
    while True:
337
      line = new_fh.readline()
338
      if not line:
339
        break
340
      new_data.append(line.strip())
341
  finally:
342
    new_fh.close()
343
  args.extend(new_data)
344

    
345

    
346
def ListTags(opts, args):
347
  """List the tags on a given object.
348

349
  This is a generic implementation that knows how to deal with all
350
  three cases of tag objects (cluster, node, instance). The opts
351
  argument is expected to contain a tag_type field denoting what
352
  object type we work on.
353

354
  """
355
  kind, name = _ExtractTagsObject(opts, args)
356
  cl = GetClient()
357
  result = cl.QueryTags(kind, name)
358
  result = list(result)
359
  result.sort()
360
  for tag in result:
361
    ToStdout(tag)
362

    
363

    
364
def AddTags(opts, args):
365
  """Add tags on a given object.
366

367
  This is a generic implementation that knows how to deal with all
368
  three cases of tag objects (cluster, node, instance). The opts
369
  argument is expected to contain a tag_type field denoting what
370
  object type we work on.
371

372
  """
373
  kind, name = _ExtractTagsObject(opts, args)
374
  _ExtendTags(opts, args)
375
  if not args:
376
    raise errors.OpPrereqError("No tags to be added")
377
  op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
378
  SubmitOpCode(op)
379

    
380

    
381
def RemoveTags(opts, args):
382
  """Remove tags from a given object.
383

384
  This is a generic implementation that knows how to deal with all
385
  three cases of tag objects (cluster, node, instance). The opts
386
  argument is expected to contain a tag_type field denoting what
387
  object type we work on.
388

389
  """
390
  kind, name = _ExtractTagsObject(opts, args)
391
  _ExtendTags(opts, args)
392
  if not args:
393
    raise errors.OpPrereqError("No tags to be removed")
394
  op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
395
  SubmitOpCode(op)
396

    
397

    
398
def check_unit(option, opt, value): # pylint: disable-msg=W0613
399
  """OptParsers custom converter for units.
400

401
  """
402
  try:
403
    return utils.ParseUnit(value)
404
  except errors.UnitParseError, err:
405
    raise OptionValueError("option %s: %s" % (opt, err))
406

    
407

    
408
def _SplitKeyVal(opt, data):
409
  """Convert a KeyVal string into a dict.
410

411
  This function will convert a key=val[,...] string into a dict. Empty
412
  values will be converted specially: keys which have the prefix 'no_'
413
  will have the value=False and the prefix stripped, the others will
414
  have value=True.
415

416
  @type opt: string
417
  @param opt: a string holding the option name for which we process the
418
      data, used in building error messages
419
  @type data: string
420
  @param data: a string of the format key=val,key=val,...
421
  @rtype: dict
422
  @return: {key=val, key=val}
423
  @raises errors.ParameterError: if there are duplicate keys
424

425
  """
426
  kv_dict = {}
427
  if data:
428
    for elem in utils.UnescapeAndSplit(data, sep=","):
429
      if "=" in elem:
430
        key, val = elem.split("=", 1)
431
      else:
432
        if elem.startswith(NO_PREFIX):
433
          key, val = elem[len(NO_PREFIX):], False
434
        elif elem.startswith(UN_PREFIX):
435
          key, val = elem[len(UN_PREFIX):], None
436
        else:
437
          key, val = elem, True
438
      if key in kv_dict:
439
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
440
                                    (key, opt))
441
      kv_dict[key] = val
442
  return kv_dict
443

    
444

    
445
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
446
  """Custom parser for ident:key=val,key=val options.
447

448
  This will store the parsed values as a tuple (ident, {key: val}). As such,
449
  multiple uses of this option via action=append is possible.
450

451
  """
452
  if ":" not in value:
453
    ident, rest = value, ''
454
  else:
455
    ident, rest = value.split(":", 1)
456

    
457
  if ident.startswith(NO_PREFIX):
458
    if rest:
459
      msg = "Cannot pass options when removing parameter groups: %s" % value
460
      raise errors.ParameterError(msg)
461
    retval = (ident[len(NO_PREFIX):], False)
462
  elif ident.startswith(UN_PREFIX):
463
    if rest:
464
      msg = "Cannot pass options when removing parameter groups: %s" % value
465
      raise errors.ParameterError(msg)
466
    retval = (ident[len(UN_PREFIX):], None)
467
  else:
468
    kv_dict = _SplitKeyVal(opt, rest)
469
    retval = (ident, kv_dict)
470
  return retval
471

    
472

    
473
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
474
  """Custom parser class for key=val,key=val options.
475

476
  This will store the parsed values as a dict {key: val}.
477

478
  """
479
  return _SplitKeyVal(opt, value)
480

    
481

    
482
def check_bool(option, opt, value): # pylint: disable-msg=W0613
483
  """Custom parser for yes/no options.
484

485
  This will store the parsed value as either True or False.
486

487
  """
488
  value = value.lower()
489
  if value == constants.VALUE_FALSE or value == "no":
490
    return False
491
  elif value == constants.VALUE_TRUE or value == "yes":
492
    return True
493
  else:
494
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
495

    
496

    
497
# completion_suggestion is normally a list. Using numeric values not evaluating
498
# to False for dynamic completion.
499
(OPT_COMPL_MANY_NODES,
500
 OPT_COMPL_ONE_NODE,
501
 OPT_COMPL_ONE_INSTANCE,
502
 OPT_COMPL_ONE_OS,
503
 OPT_COMPL_ONE_IALLOCATOR,
504
 OPT_COMPL_INST_ADD_NODES) = range(100, 106)
505

    
506
OPT_COMPL_ALL = frozenset([
507
  OPT_COMPL_MANY_NODES,
508
  OPT_COMPL_ONE_NODE,
509
  OPT_COMPL_ONE_INSTANCE,
510
  OPT_COMPL_ONE_OS,
511
  OPT_COMPL_ONE_IALLOCATOR,
512
  OPT_COMPL_INST_ADD_NODES,
513
  ])
514

    
515

    
516
class CliOption(Option):
517
  """Custom option class for optparse.
518

519
  """
520
  ATTRS = Option.ATTRS + [
521
    "completion_suggest",
522
    ]
523
  TYPES = Option.TYPES + (
524
    "identkeyval",
525
    "keyval",
526
    "unit",
527
    "bool",
528
    )
529
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
530
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
531
  TYPE_CHECKER["keyval"] = check_key_val
532
  TYPE_CHECKER["unit"] = check_unit
533
  TYPE_CHECKER["bool"] = check_bool
534

    
535

    
536
# optparse.py sets make_option, so we do it for our own option class, too
537
cli_option = CliOption
538

    
539

    
540
_YORNO = "yes|no"
541

    
542
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
543
                       help="Increase debugging level")
544

    
545
NOHDR_OPT = cli_option("--no-headers", default=False,
546
                       action="store_true", dest="no_headers",
547
                       help="Don't display column headers")
548

    
549
SEP_OPT = cli_option("--separator", default=None,
550
                     action="store", dest="separator",
551
                     help=("Separator between output fields"
552
                           " (defaults to one space)"))
553

    
554
USEUNITS_OPT = cli_option("--units", default=None,
555
                          dest="units", choices=('h', 'm', 'g', 't'),
556
                          help="Specify units for output (one of hmgt)")
557

    
558
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
559
                        type="string", metavar="FIELDS",
560
                        help="Comma separated list of output fields")
561

    
562
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
563
                       default=False, help="Force the operation")
564

    
565
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
566
                         default=False, help="Do not require confirmation")
567

    
568
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
569
                         default=None, help="File with tag names")
570

    
571
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
572
                        default=False, action="store_true",
573
                        help=("Submit the job and return the job ID, but"
574
                              " don't wait for the job to finish"))
575

    
576
SYNC_OPT = cli_option("--sync", dest="do_locking",
577
                      default=False, action="store_true",
578
                      help=("Grab locks while doing the queries"
579
                            " in order to ensure more consistent results"))
580

    
581
DRY_RUN_OPT = cli_option("--dry-run", default=False,
582
                         action="store_true",
583
                         help=("Do not execute the operation, just run the"
584
                               " check steps and verify it it could be"
585
                               " executed"))
586

    
587
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
588
                         action="store_true",
589
                         help="Increase the verbosity of the operation")
590

    
591
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
592
                              action="store_true", dest="simulate_errors",
593
                              help="Debugging option that makes the operation"
594
                              " treat most runtime checks as failed")
595

    
596
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
597
                        default=True, action="store_false",
598
                        help="Don't wait for sync (DANGEROUS!)")
599

    
600
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
601
                               help="Custom disk setup (diskless, file,"
602
                               " plain or drbd)",
603
                               default=None, metavar="TEMPL",
604
                               choices=list(constants.DISK_TEMPLATES))
605

    
606
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
607
                        help="Do not create any network cards for"
608
                        " the instance")
609

    
610
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
611
                               help="Relative path under default cluster-wide"
612
                               " file storage dir to store file-based disks",
613
                               default=None, metavar="<DIR>")
614

    
615
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
616
                                  help="Driver to use for image files",
617
                                  default="loop", metavar="<DRIVER>",
618
                                  choices=list(constants.FILE_DRIVER))
619

    
620
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
621
                            help="Select nodes for the instance automatically"
622
                            " using the <NAME> iallocator plugin",
623
                            default=None, type="string",
624
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
625

    
626
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
627
                            metavar="<NAME>",
628
                            help="Set the default instance allocator plugin",
629
                            default=None, type="string",
630
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
631

    
632
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
633
                    metavar="<os>",
634
                    completion_suggest=OPT_COMPL_ONE_OS)
635

    
636
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
637
                         type="keyval", default={},
638
                         help="OS parameters")
639

    
640
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
641
                               action="store_true", default=False,
642
                               help="Force an unknown variant")
643

    
644
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
645
                            action="store_true", default=False,
646
                            help="Do not install the OS (will"
647
                            " enable no-start)")
648

    
649
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
650
                         type="keyval", default={},
651
                         help="Backend parameters")
652

    
653
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
654
                         default={}, dest="hvparams",
655
                         help="Hypervisor parameters")
656

    
657
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
658
                            help="Hypervisor and hypervisor options, in the"
659
                            " format hypervisor:option=value,option=value,...",
660
                            default=None, type="identkeyval")
661

    
662
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
663
                        help="Hypervisor and hypervisor options, in the"
664
                        " format hypervisor:option=value,option=value,...",
665
                        default=[], action="append", type="identkeyval")
666

    
667
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
668
                           action="store_false",
669
                           help="Don't check that the instance's IP"
670
                           " is alive")
671

    
672
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
673
                             default=True, action="store_false",
674
                             help="Don't check that the instance's name"
675
                             " is resolvable")
676

    
677
NET_OPT = cli_option("--net",
678
                     help="NIC parameters", default=[],
679
                     dest="nics", action="append", type="identkeyval")
680

    
681
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
682
                      dest="disks", action="append", type="identkeyval")
683

    
684
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
685
                         help="Comma-separated list of disks"
686
                         " indices to act on (e.g. 0,2) (optional,"
687
                         " defaults to all disks)")
688

    
689
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
690
                         help="Enforces a single-disk configuration using the"
691
                         " given disk size, in MiB unless a suffix is used",
692
                         default=None, type="unit", metavar="<size>")
693

    
694
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
695
                                dest="ignore_consistency",
696
                                action="store_true", default=False,
697
                                help="Ignore the consistency of the disks on"
698
                                " the secondary")
699

    
700
NONLIVE_OPT = cli_option("--non-live", dest="live",
701
                         default=True, action="store_false",
702
                         help="Do a non-live migration (this usually means"
703
                         " freeze the instance, save the state, transfer and"
704
                         " only then resume running on the secondary node)")
705

    
706
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
707
                                default=None,
708
                                choices=list(constants.HT_MIGRATION_MODES),
709
                                help="Override default migration mode (choose"
710
                                " either live or non-live")
711

    
712
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
713
                                help="Target node and optional secondary node",
714
                                metavar="<pnode>[:<snode>]",
715
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
716

    
717
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
718
                           action="append", metavar="<node>",
719
                           help="Use only this node (can be used multiple"
720
                           " times, if not given defaults to all nodes)",
721
                           completion_suggest=OPT_COMPL_ONE_NODE)
722

    
723
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
724
                             metavar="<node>",
725
                             completion_suggest=OPT_COMPL_ONE_NODE)
726

    
727
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
728
                         action="store_false",
729
                         help="Don't start the instance after creation")
730

    
731
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
732
                         action="store_true", default=False,
733
                         help="Show command instead of executing it")
734

    
735
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
736
                         default=False, action="store_true",
737
                         help="Instead of performing the migration, try to"
738
                         " recover from a failed cleanup. This is safe"
739
                         " to run even if the instance is healthy, but it"
740
                         " will create extra replication traffic and "
741
                         " disrupt briefly the replication (like during the"
742
                         " migration")
743

    
744
STATIC_OPT = cli_option("-s", "--static", dest="static",
745
                        action="store_true", default=False,
746
                        help="Only show configuration data, not runtime data")
747

    
748
ALL_OPT = cli_option("--all", dest="show_all",
749
                     default=False, action="store_true",
750
                     help="Show info on all instances on the cluster."
751
                     " This can take a long time to run, use wisely")
752

    
753
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
754
                           action="store_true", default=False,
755
                           help="Interactive OS reinstall, lists available"
756
                           " OS templates for selection")
757

    
758
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
759
                                 action="store_true", default=False,
760
                                 help="Remove the instance from the cluster"
761
                                 " configuration even if there are failures"
762
                                 " during the removal process")
763

    
764
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
765
                                        dest="ignore_remove_failures",
766
                                        action="store_true", default=False,
767
                                        help="Remove the instance from the"
768
                                        " cluster configuration even if there"
769
                                        " are failures during the removal"
770
                                        " process")
771

    
772
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
773
                                 action="store_true", default=False,
774
                                 help="Remove the instance from the cluster")
775

    
776
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
777
                               help="Specifies the new secondary node",
778
                               metavar="NODE", default=None,
779
                               completion_suggest=OPT_COMPL_ONE_NODE)
780

    
781
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
782
                            default=False, action="store_true",
783
                            help="Replace the disk(s) on the primary"
784
                            " node (only for the drbd template)")
785

    
786
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
787
                              default=False, action="store_true",
788
                              help="Replace the disk(s) on the secondary"
789
                              " node (only for the drbd template)")
790

    
791
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
792
                              default=False, action="store_true",
793
                              help="Lock all nodes and auto-promote as needed"
794
                              " to MC status")
795

    
796
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
797
                              default=False, action="store_true",
798
                              help="Automatically replace faulty disks"
799
                              " (only for the drbd template)")
800

    
801
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
802
                             default=False, action="store_true",
803
                             help="Ignore current recorded size"
804
                             " (useful for forcing activation when"
805
                             " the recorded size is wrong)")
806

    
807
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
808
                          metavar="<node>",
809
                          completion_suggest=OPT_COMPL_ONE_NODE)
810

    
811
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
812
                         metavar="<dir>")
813

    
814
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
815
                              help="Specify the secondary ip for the node",
816
                              metavar="ADDRESS", default=None)
817

    
818
READD_OPT = cli_option("--readd", dest="readd",
819
                       default=False, action="store_true",
820
                       help="Readd old node after replacing it")
821

    
822
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
823
                                default=True, action="store_false",
824
                                help="Disable SSH key fingerprint checking")
825

    
826

    
827
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
828
                    type="bool", default=None, metavar=_YORNO,
829
                    help="Set the master_candidate flag on the node")
830

    
831
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
832
                         type="bool", default=None,
833
                         help="Set the offline flag on the node")
834

    
835
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
836
                         type="bool", default=None,
837
                         help="Set the drained flag on the node")
838

    
839
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
840
                             type="bool", default=None, metavar=_YORNO,
841
                             help="Set the allocatable flag on a volume")
842

    
843
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
844
                               help="Disable support for lvm based instances"
845
                               " (cluster-wide)",
846
                               action="store_false", default=True)
847

    
848
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
849
                            dest="enabled_hypervisors",
850
                            help="Comma-separated list of hypervisors",
851
                            type="string", default=None)
852

    
853
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
854
                            type="keyval", default={},
855
                            help="NIC parameters")
856

    
857
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
858
                         dest="candidate_pool_size", type="int",
859
                         help="Set the candidate pool size")
860

    
861
VG_NAME_OPT = cli_option("-g", "--vg-name", dest="vg_name",
862
                         help="Enables LVM and specifies the volume group"
863
                         " name (cluster-wide) for disk allocation [xenvg]",
864
                         metavar="VG", default=None)
865

    
866
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
867
                          help="Destroy cluster", action="store_true")
868

    
869
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
870
                          help="Skip node agreement check (dangerous)",
871
                          action="store_true", default=False)
872

    
873
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
874
                            help="Specify the mac prefix for the instance IP"
875
                            " addresses, in the format XX:XX:XX",
876
                            metavar="PREFIX",
877
                            default=None)
878

    
879
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
880
                               help="Specify the node interface (cluster-wide)"
881
                               " on which the master IP address will be added "
882
                               " [%s]" % constants.DEFAULT_BRIDGE,
883
                               metavar="NETDEV",
884
                               default=constants.DEFAULT_BRIDGE)
885

    
886
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
887
                                help="Specify the default directory (cluster-"
888
                                "wide) for storing the file-based disks [%s]" %
889
                                constants.DEFAULT_FILE_STORAGE_DIR,
890
                                metavar="DIR",
891
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
892

    
893
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
894
                                   help="Don't modify /etc/hosts",
895
                                   action="store_false", default=True)
896

    
897
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
898
                                    help="Don't initialize SSH keys",
899
                                    action="store_false", default=True)
900

    
901
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
902
                             help="Enable parseable error messages",
903
                             action="store_true", default=False)
904

    
905
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
906
                          help="Skip N+1 memory redundancy tests",
907
                          action="store_true", default=False)
908

    
909
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
910
                             help="Type of reboot: soft/hard/full",
911
                             default=constants.INSTANCE_REBOOT_HARD,
912
                             metavar="<REBOOT>",
913
                             choices=list(constants.REBOOT_TYPES))
914

    
915
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
916
                                    dest="ignore_secondaries",
917
                                    default=False, action="store_true",
918
                                    help="Ignore errors from secondaries")
919

    
920
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
921
                            action="store_false", default=True,
922
                            help="Don't shutdown the instance (unsafe)")
923

    
924
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
925
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
926
                         help="Maximum time to wait")
927

    
928
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
929
                         dest="shutdown_timeout", type="int",
930
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
931
                         help="Maximum time to wait for instance shutdown")
932

    
933
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
934
                          default=None,
935
                          help=("Number of seconds between repetions of the"
936
                                " command"))
937

    
938
EARLY_RELEASE_OPT = cli_option("--early-release",
939
                               dest="early_release", default=False,
940
                               action="store_true",
941
                               help="Release the locks on the secondary"
942
                               " node(s) early")
943

    
944
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
945
                                  dest="new_cluster_cert",
946
                                  default=False, action="store_true",
947
                                  help="Generate a new cluster certificate")
948

    
949
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
950
                           default=None,
951
                           help="File containing new RAPI certificate")
952

    
953
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
954
                               default=None, action="store_true",
955
                               help=("Generate a new self-signed RAPI"
956
                                     " certificate"))
957

    
958
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
959
                                    dest="new_confd_hmac_key",
960
                                    default=False, action="store_true",
961
                                    help=("Create a new HMAC key for %s" %
962
                                          constants.CONFD))
963

    
964
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
965
                                       dest="cluster_domain_secret",
966
                                       default=None,
967
                                       help=("Load new new cluster domain"
968
                                             " secret from file"))
969

    
970
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
971
                                           dest="new_cluster_domain_secret",
972
                                           default=False, action="store_true",
973
                                           help=("Create a new cluster domain"
974
                                                 " secret"))
975

    
976
USE_REPL_NET_OPT = cli_option("--use-replication-network",
977
                              dest="use_replication_network",
978
                              help="Whether to use the replication network"
979
                              " for talking to the nodes",
980
                              action="store_true", default=False)
981

    
982
MAINTAIN_NODE_HEALTH_OPT = \
983
    cli_option("--maintain-node-health", dest="maintain_node_health",
984
               metavar=_YORNO, default=None, type="bool",
985
               help="Configure the cluster to automatically maintain node"
986
               " health, by shutting down unknown instances, shutting down"
987
               " unknown DRBD devices, etc.")
988

    
989
IDENTIFY_DEFAULTS_OPT = \
990
    cli_option("--identify-defaults", dest="identify_defaults",
991
               default=False, action="store_true",
992
               help="Identify which saved instance parameters are equal to"
993
               " the current cluster defaults and set them as such, instead"
994
               " of marking them as overridden")
995

    
996
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
997
                         action="store", dest="uid_pool",
998
                         help=("A list of user-ids or user-id"
999
                               " ranges separated by commas"))
1000

    
1001
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1002
                          action="store", dest="add_uids",
1003
                          help=("A list of user-ids or user-id"
1004
                                " ranges separated by commas, to be"
1005
                                " added to the user-id pool"))
1006

    
1007
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1008
                             action="store", dest="remove_uids",
1009
                             help=("A list of user-ids or user-id"
1010
                                   " ranges separated by commas, to be"
1011
                                   " removed from the user-id pool"))
1012

    
1013
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1014
                             action="store", dest="reserved_lvs",
1015
                             help=("A comma-separated list of reserved"
1016
                                   " logical volumes names, that will be"
1017
                                   " ignored by cluster verify"))
1018

    
1019
ROMAN_OPT = cli_option("--roman",
1020
                       dest="roman_integers", default=False,
1021
                       action="store_true",
1022
                       help="Use roman numbers for positive integers")
1023

    
1024
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1025
                             action="store", default=None,
1026
                             help="Specifies usermode helper for DRBD")
1027

    
1028
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1029
                                action="store_false", default=True,
1030
                                help="Disable support for DRBD")
1031

    
1032

    
1033
def _ParseArgs(argv, commands, aliases):
1034
  """Parser for the command line arguments.
1035

1036
  This function parses the arguments and returns the function which
1037
  must be executed together with its (modified) arguments.
1038

1039
  @param argv: the command line
1040
  @param commands: dictionary with special contents, see the design
1041
      doc for cmdline handling
1042
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1043

1044
  """
1045
  if len(argv) == 0:
1046
    binary = "<command>"
1047
  else:
1048
    binary = argv[0].split("/")[-1]
1049

    
1050
  if len(argv) > 1 and argv[1] == "--version":
1051
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1052
             constants.RELEASE_VERSION)
1053
    # Quit right away. That way we don't have to care about this special
1054
    # argument. optparse.py does it the same.
1055
    sys.exit(0)
1056

    
1057
  if len(argv) < 2 or not (argv[1] in commands or
1058
                           argv[1] in aliases):
1059
    # let's do a nice thing
1060
    sortedcmds = commands.keys()
1061
    sortedcmds.sort()
1062

    
1063
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1064
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1065
    ToStdout("")
1066

    
1067
    # compute the max line length for cmd + usage
1068
    mlen = max([len(" %s" % cmd) for cmd in commands])
1069
    mlen = min(60, mlen) # should not get here...
1070

    
1071
    # and format a nice command list
1072
    ToStdout("Commands:")
1073
    for cmd in sortedcmds:
1074
      cmdstr = " %s" % (cmd,)
1075
      help_text = commands[cmd][4]
1076
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1077
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1078
      for line in help_lines:
1079
        ToStdout("%-*s   %s", mlen, "", line)
1080

    
1081
    ToStdout("")
1082

    
1083
    return None, None, None
1084

    
1085
  # get command, unalias it, and look it up in commands
1086
  cmd = argv.pop(1)
1087
  if cmd in aliases:
1088
    if cmd in commands:
1089
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1090
                                   " command" % cmd)
1091

    
1092
    if aliases[cmd] not in commands:
1093
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1094
                                   " command '%s'" % (cmd, aliases[cmd]))
1095

    
1096
    cmd = aliases[cmd]
1097

    
1098
  func, args_def, parser_opts, usage, description = commands[cmd]
1099
  parser = OptionParser(option_list=parser_opts + [DEBUG_OPT],
1100
                        description=description,
1101
                        formatter=TitledHelpFormatter(),
1102
                        usage="%%prog %s %s" % (cmd, usage))
1103
  parser.disable_interspersed_args()
1104
  options, args = parser.parse_args()
1105

    
1106
  if not _CheckArguments(cmd, args_def, args):
1107
    return None, None, None
1108

    
1109
  return func, options, args
1110

    
1111

    
1112
def _CheckArguments(cmd, args_def, args):
1113
  """Verifies the arguments using the argument definition.
1114

1115
  Algorithm:
1116

1117
    1. Abort with error if values specified by user but none expected.
1118

1119
    1. For each argument in definition
1120

1121
      1. Keep running count of minimum number of values (min_count)
1122
      1. Keep running count of maximum number of values (max_count)
1123
      1. If it has an unlimited number of values
1124

1125
        1. Abort with error if it's not the last argument in the definition
1126

1127
    1. If last argument has limited number of values
1128

1129
      1. Abort with error if number of values doesn't match or is too large
1130

1131
    1. Abort with error if user didn't pass enough values (min_count)
1132

1133
  """
1134
  if args and not args_def:
1135
    ToStderr("Error: Command %s expects no arguments", cmd)
1136
    return False
1137

    
1138
  min_count = None
1139
  max_count = None
1140
  check_max = None
1141

    
1142
  last_idx = len(args_def) - 1
1143

    
1144
  for idx, arg in enumerate(args_def):
1145
    if min_count is None:
1146
      min_count = arg.min
1147
    elif arg.min is not None:
1148
      min_count += arg.min
1149

    
1150
    if max_count is None:
1151
      max_count = arg.max
1152
    elif arg.max is not None:
1153
      max_count += arg.max
1154

    
1155
    if idx == last_idx:
1156
      check_max = (arg.max is not None)
1157

    
1158
    elif arg.max is None:
1159
      raise errors.ProgrammerError("Only the last argument can have max=None")
1160

    
1161
  if check_max:
1162
    # Command with exact number of arguments
1163
    if (min_count is not None and max_count is not None and
1164
        min_count == max_count and len(args) != min_count):
1165
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1166
      return False
1167

    
1168
    # Command with limited number of arguments
1169
    if max_count is not None and len(args) > max_count:
1170
      ToStderr("Error: Command %s expects only %d argument(s)",
1171
               cmd, max_count)
1172
      return False
1173

    
1174
  # Command with some required arguments
1175
  if min_count is not None and len(args) < min_count:
1176
    ToStderr("Error: Command %s expects at least %d argument(s)",
1177
             cmd, min_count)
1178
    return False
1179

    
1180
  return True
1181

    
1182

    
1183
def SplitNodeOption(value):
1184
  """Splits the value of a --node option.
1185

1186
  """
1187
  if value and ':' in value:
1188
    return value.split(':', 1)
1189
  else:
1190
    return (value, None)
1191

    
1192

    
1193
def CalculateOSNames(os_name, os_variants):
1194
  """Calculates all the names an OS can be called, according to its variants.
1195

1196
  @type os_name: string
1197
  @param os_name: base name of the os
1198
  @type os_variants: list or None
1199
  @param os_variants: list of supported variants
1200
  @rtype: list
1201
  @return: list of valid names
1202

1203
  """
1204
  if os_variants:
1205
    return ['%s+%s' % (os_name, v) for v in os_variants]
1206
  else:
1207
    return [os_name]
1208

    
1209

    
1210
def ParseFields(selected, default):
1211
  """Parses the values of "--field"-like options.
1212

1213
  @type selected: string or None
1214
  @param selected: User-selected options
1215
  @type default: list
1216
  @param default: Default fields
1217

1218
  """
1219
  if selected is None:
1220
    return default
1221

    
1222
  if selected.startswith("+"):
1223
    return default + selected[1:].split(",")
1224

    
1225
  return selected.split(",")
1226

    
1227

    
1228
UsesRPC = rpc.RunWithRPC
1229

    
1230

    
1231
def AskUser(text, choices=None):
1232
  """Ask the user a question.
1233

1234
  @param text: the question to ask
1235

1236
  @param choices: list with elements tuples (input_char, return_value,
1237
      description); if not given, it will default to: [('y', True,
1238
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1239
      note that the '?' char is reserved for help
1240

1241
  @return: one of the return values from the choices list; if input is
1242
      not possible (i.e. not running with a tty, we return the last
1243
      entry from the list
1244

1245
  """
1246
  if choices is None:
1247
    choices = [('y', True, 'Perform the operation'),
1248
               ('n', False, 'Do not perform the operation')]
1249
  if not choices or not isinstance(choices, list):
1250
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1251
  for entry in choices:
1252
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1253
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1254

    
1255
  answer = choices[-1][1]
1256
  new_text = []
1257
  for line in text.splitlines():
1258
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1259
  text = "\n".join(new_text)
1260
  try:
1261
    f = file("/dev/tty", "a+")
1262
  except IOError:
1263
    return answer
1264
  try:
1265
    chars = [entry[0] for entry in choices]
1266
    chars[-1] = "[%s]" % chars[-1]
1267
    chars.append('?')
1268
    maps = dict([(entry[0], entry[1]) for entry in choices])
1269
    while True:
1270
      f.write(text)
1271
      f.write('\n')
1272
      f.write("/".join(chars))
1273
      f.write(": ")
1274
      line = f.readline(2).strip().lower()
1275
      if line in maps:
1276
        answer = maps[line]
1277
        break
1278
      elif line == '?':
1279
        for entry in choices:
1280
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1281
        f.write("\n")
1282
        continue
1283
  finally:
1284
    f.close()
1285
  return answer
1286

    
1287

    
1288
class JobSubmittedException(Exception):
1289
  """Job was submitted, client should exit.
1290

1291
  This exception has one argument, the ID of the job that was
1292
  submitted. The handler should print this ID.
1293

1294
  This is not an error, just a structured way to exit from clients.
1295

1296
  """
1297

    
1298

    
1299
def SendJob(ops, cl=None):
1300
  """Function to submit an opcode without waiting for the results.
1301

1302
  @type ops: list
1303
  @param ops: list of opcodes
1304
  @type cl: luxi.Client
1305
  @param cl: the luxi client to use for communicating with the master;
1306
             if None, a new client will be created
1307

1308
  """
1309
  if cl is None:
1310
    cl = GetClient()
1311

    
1312
  job_id = cl.SubmitJob(ops)
1313

    
1314
  return job_id
1315

    
1316

    
1317
def GenericPollJob(job_id, cbs, report_cbs):
1318
  """Generic job-polling function.
1319

1320
  @type job_id: number
1321
  @param job_id: Job ID
1322
  @type cbs: Instance of L{JobPollCbBase}
1323
  @param cbs: Data callbacks
1324
  @type report_cbs: Instance of L{JobPollReportCbBase}
1325
  @param report_cbs: Reporting callbacks
1326

1327
  """
1328
  prev_job_info = None
1329
  prev_logmsg_serial = None
1330

    
1331
  status = None
1332

    
1333
  while True:
1334
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1335
                                      prev_logmsg_serial)
1336
    if not result:
1337
      # job not found, go away!
1338
      raise errors.JobLost("Job with id %s lost" % job_id)
1339

    
1340
    if result == constants.JOB_NOTCHANGED:
1341
      report_cbs.ReportNotChanged(job_id, status)
1342

    
1343
      # Wait again
1344
      continue
1345

    
1346
    # Split result, a tuple of (field values, log entries)
1347
    (job_info, log_entries) = result
1348
    (status, ) = job_info
1349

    
1350
    if log_entries:
1351
      for log_entry in log_entries:
1352
        (serial, timestamp, log_type, message) = log_entry
1353
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1354
                                    log_type, message)
1355
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1356

    
1357
    # TODO: Handle canceled and archived jobs
1358
    elif status in (constants.JOB_STATUS_SUCCESS,
1359
                    constants.JOB_STATUS_ERROR,
1360
                    constants.JOB_STATUS_CANCELING,
1361
                    constants.JOB_STATUS_CANCELED):
1362
      break
1363

    
1364
    prev_job_info = job_info
1365

    
1366
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1367
  if not jobs:
1368
    raise errors.JobLost("Job with id %s lost" % job_id)
1369

    
1370
  status, opstatus, result = jobs[0]
1371

    
1372
  if status == constants.JOB_STATUS_SUCCESS:
1373
    return result
1374

    
1375
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1376
    raise errors.OpExecError("Job was canceled")
1377

    
1378
  has_ok = False
1379
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1380
    if status == constants.OP_STATUS_SUCCESS:
1381
      has_ok = True
1382
    elif status == constants.OP_STATUS_ERROR:
1383
      errors.MaybeRaise(msg)
1384

    
1385
      if has_ok:
1386
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1387
                                 (idx, msg))
1388

    
1389
      raise errors.OpExecError(str(msg))
1390

    
1391
  # default failure mode
1392
  raise errors.OpExecError(result)
1393

    
1394

    
1395
class JobPollCbBase:
1396
  """Base class for L{GenericPollJob} callbacks.
1397

1398
  """
1399
  def __init__(self):
1400
    """Initializes this class.
1401

1402
    """
1403

    
1404
  def WaitForJobChangeOnce(self, job_id, fields,
1405
                           prev_job_info, prev_log_serial):
1406
    """Waits for changes on a job.
1407

1408
    """
1409
    raise NotImplementedError()
1410

    
1411
  def QueryJobs(self, job_ids, fields):
1412
    """Returns the selected fields for the selected job IDs.
1413

1414
    @type job_ids: list of numbers
1415
    @param job_ids: Job IDs
1416
    @type fields: list of strings
1417
    @param fields: Fields
1418

1419
    """
1420
    raise NotImplementedError()
1421

    
1422

    
1423
class JobPollReportCbBase:
1424
  """Base class for L{GenericPollJob} reporting callbacks.
1425

1426
  """
1427
  def __init__(self):
1428
    """Initializes this class.
1429

1430
    """
1431

    
1432
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1433
    """Handles a log message.
1434

1435
    """
1436
    raise NotImplementedError()
1437

    
1438
  def ReportNotChanged(self, job_id, status):
1439
    """Called for if a job hasn't changed in a while.
1440

1441
    @type job_id: number
1442
    @param job_id: Job ID
1443
    @type status: string or None
1444
    @param status: Job status if available
1445

1446
    """
1447
    raise NotImplementedError()
1448

    
1449

    
1450
class _LuxiJobPollCb(JobPollCbBase):
1451
  def __init__(self, cl):
1452
    """Initializes this class.
1453

1454
    """
1455
    JobPollCbBase.__init__(self)
1456
    self.cl = cl
1457

    
1458
  def WaitForJobChangeOnce(self, job_id, fields,
1459
                           prev_job_info, prev_log_serial):
1460
    """Waits for changes on a job.
1461

1462
    """
1463
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1464
                                        prev_job_info, prev_log_serial)
1465

    
1466
  def QueryJobs(self, job_ids, fields):
1467
    """Returns the selected fields for the selected job IDs.
1468

1469
    """
1470
    return self.cl.QueryJobs(job_ids, fields)
1471

    
1472

    
1473
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1474
  def __init__(self, feedback_fn):
1475
    """Initializes this class.
1476

1477
    """
1478
    JobPollReportCbBase.__init__(self)
1479

    
1480
    self.feedback_fn = feedback_fn
1481

    
1482
    assert callable(feedback_fn)
1483

    
1484
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1485
    """Handles a log message.
1486

1487
    """
1488
    self.feedback_fn((timestamp, log_type, log_msg))
1489

    
1490
  def ReportNotChanged(self, job_id, status):
1491
    """Called if a job hasn't changed in a while.
1492

1493
    """
1494
    # Ignore
1495

    
1496

    
1497
class StdioJobPollReportCb(JobPollReportCbBase):
1498
  def __init__(self):
1499
    """Initializes this class.
1500

1501
    """
1502
    JobPollReportCbBase.__init__(self)
1503

    
1504
    self.notified_queued = False
1505
    self.notified_waitlock = False
1506

    
1507
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1508
    """Handles a log message.
1509

1510
    """
1511
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1512
             FormatLogMessage(log_type, log_msg))
1513

    
1514
  def ReportNotChanged(self, job_id, status):
1515
    """Called if a job hasn't changed in a while.
1516

1517
    """
1518
    if status is None:
1519
      return
1520

    
1521
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1522
      ToStderr("Job %s is waiting in queue", job_id)
1523
      self.notified_queued = True
1524

    
1525
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1526
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1527
      self.notified_waitlock = True
1528

    
1529

    
1530
def FormatLogMessage(log_type, log_msg):
1531
  """Formats a job message according to its type.
1532

1533
  """
1534
  if log_type != constants.ELOG_MESSAGE:
1535
    log_msg = str(log_msg)
1536

    
1537
  return utils.SafeEncode(log_msg)
1538

    
1539

    
1540
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1541
  """Function to poll for the result of a job.
1542

1543
  @type job_id: job identified
1544
  @param job_id: the job to poll for results
1545
  @type cl: luxi.Client
1546
  @param cl: the luxi client to use for communicating with the master;
1547
             if None, a new client will be created
1548

1549
  """
1550
  if cl is None:
1551
    cl = GetClient()
1552

    
1553
  if reporter is None:
1554
    if feedback_fn:
1555
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1556
    else:
1557
      reporter = StdioJobPollReportCb()
1558
  elif feedback_fn:
1559
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1560

    
1561
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1562

    
1563

    
1564
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1565
  """Legacy function to submit an opcode.
1566

1567
  This is just a simple wrapper over the construction of the processor
1568
  instance. It should be extended to better handle feedback and
1569
  interaction functions.
1570

1571
  """
1572
  if cl is None:
1573
    cl = GetClient()
1574

    
1575
  SetGenericOpcodeOpts([op], opts)
1576

    
1577
  job_id = SendJob([op], cl=cl)
1578

    
1579
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1580
                       reporter=reporter)
1581

    
1582
  return op_results[0]
1583

    
1584

    
1585
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1586
  """Wrapper around SubmitOpCode or SendJob.
1587

1588
  This function will decide, based on the 'opts' parameter, whether to
1589
  submit and wait for the result of the opcode (and return it), or
1590
  whether to just send the job and print its identifier. It is used in
1591
  order to simplify the implementation of the '--submit' option.
1592

1593
  It will also process the opcodes if we're sending the via SendJob
1594
  (otherwise SubmitOpCode does it).
1595

1596
  """
1597
  if opts and opts.submit_only:
1598
    job = [op]
1599
    SetGenericOpcodeOpts(job, opts)
1600
    job_id = SendJob(job, cl=cl)
1601
    raise JobSubmittedException(job_id)
1602
  else:
1603
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1604

    
1605

    
1606
def SetGenericOpcodeOpts(opcode_list, options):
1607
  """Processor for generic options.
1608

1609
  This function updates the given opcodes based on generic command
1610
  line options (like debug, dry-run, etc.).
1611

1612
  @param opcode_list: list of opcodes
1613
  @param options: command line options or None
1614
  @return: None (in-place modification)
1615

1616
  """
1617
  if not options:
1618
    return
1619
  for op in opcode_list:
1620
    if hasattr(options, "dry_run"):
1621
      op.dry_run = options.dry_run
1622
    op.debug_level = options.debug
1623

    
1624

    
1625
def GetClient():
1626
  # TODO: Cache object?
1627
  try:
1628
    client = luxi.Client()
1629
  except luxi.NoMasterError:
1630
    ss = ssconf.SimpleStore()
1631

    
1632
    # Try to read ssconf file
1633
    try:
1634
      ss.GetMasterNode()
1635
    except errors.ConfigurationError:
1636
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1637
                                 " not part of a cluster")
1638

    
1639
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1640
    if master != myself:
1641
      raise errors.OpPrereqError("This is not the master node, please connect"
1642
                                 " to node '%s' and rerun the command" %
1643
                                 master)
1644
    raise
1645
  return client
1646

    
1647

    
1648
def FormatError(err):
1649
  """Return a formatted error message for a given error.
1650

1651
  This function takes an exception instance and returns a tuple
1652
  consisting of two values: first, the recommended exit code, and
1653
  second, a string describing the error message (not
1654
  newline-terminated).
1655

1656
  """
1657
  retcode = 1
1658
  obuf = StringIO()
1659
  msg = str(err)
1660
  if isinstance(err, errors.ConfigurationError):
1661
    txt = "Corrupt configuration file: %s" % msg
1662
    logging.error(txt)
1663
    obuf.write(txt + "\n")
1664
    obuf.write("Aborting.")
1665
    retcode = 2
1666
  elif isinstance(err, errors.HooksAbort):
1667
    obuf.write("Failure: hooks execution failed:\n")
1668
    for node, script, out in err.args[0]:
1669
      if out:
1670
        obuf.write("  node: %s, script: %s, output: %s\n" %
1671
                   (node, script, out))
1672
      else:
1673
        obuf.write("  node: %s, script: %s (no output)\n" %
1674
                   (node, script))
1675
  elif isinstance(err, errors.HooksFailure):
1676
    obuf.write("Failure: hooks general failure: %s" % msg)
1677
  elif isinstance(err, errors.ResolverError):
1678
    this_host = netutils.HostInfo.SysName()
1679
    if err.args[0] == this_host:
1680
      msg = "Failure: can't resolve my own hostname ('%s')"
1681
    else:
1682
      msg = "Failure: can't resolve hostname '%s'"
1683
    obuf.write(msg % err.args[0])
1684
  elif isinstance(err, errors.OpPrereqError):
1685
    if len(err.args) == 2:
1686
      obuf.write("Failure: prerequisites not met for this"
1687
               " operation:\nerror type: %s, error details:\n%s" %
1688
                 (err.args[1], err.args[0]))
1689
    else:
1690
      obuf.write("Failure: prerequisites not met for this"
1691
                 " operation:\n%s" % msg)
1692
  elif isinstance(err, errors.OpExecError):
1693
    obuf.write("Failure: command execution error:\n%s" % msg)
1694
  elif isinstance(err, errors.TagError):
1695
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1696
  elif isinstance(err, errors.JobQueueDrainError):
1697
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1698
               " accept new requests\n")
1699
  elif isinstance(err, errors.JobQueueFull):
1700
    obuf.write("Failure: the job queue is full and doesn't accept new"
1701
               " job submissions until old jobs are archived\n")
1702
  elif isinstance(err, errors.TypeEnforcementError):
1703
    obuf.write("Parameter Error: %s" % msg)
1704
  elif isinstance(err, errors.ParameterError):
1705
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1706
  elif isinstance(err, luxi.NoMasterError):
1707
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1708
               " and listening for connections?")
1709
  elif isinstance(err, luxi.TimeoutError):
1710
    obuf.write("Timeout while talking to the master daemon. Error:\n"
1711
               "%s" % msg)
1712
  elif isinstance(err, luxi.PermissionError):
1713
    obuf.write("It seems you don't have permissions to connect to the"
1714
               " master daemon.\nPlease retry as a different user.")
1715
  elif isinstance(err, luxi.ProtocolError):
1716
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1717
               "%s" % msg)
1718
  elif isinstance(err, errors.JobLost):
1719
    obuf.write("Error checking job status: %s" % msg)
1720
  elif isinstance(err, errors.GenericError):
1721
    obuf.write("Unhandled Ganeti error: %s" % msg)
1722
  elif isinstance(err, JobSubmittedException):
1723
    obuf.write("JobID: %s\n" % err.args[0])
1724
    retcode = 0
1725
  else:
1726
    obuf.write("Unhandled exception: %s" % msg)
1727
  return retcode, obuf.getvalue().rstrip('\n')
1728

    
1729

    
1730
def GenericMain(commands, override=None, aliases=None):
1731
  """Generic main function for all the gnt-* commands.
1732

1733
  Arguments:
1734
    - commands: a dictionary with a special structure, see the design doc
1735
                for command line handling.
1736
    - override: if not None, we expect a dictionary with keys that will
1737
                override command line options; this can be used to pass
1738
                options from the scripts to generic functions
1739
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1740

1741
  """
1742
  # save the program name and the entire command line for later logging
1743
  if sys.argv:
1744
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1745
    if len(sys.argv) >= 2:
1746
      binary += " " + sys.argv[1]
1747
      old_cmdline = " ".join(sys.argv[2:])
1748
    else:
1749
      old_cmdline = ""
1750
  else:
1751
    binary = "<unknown program>"
1752
    old_cmdline = ""
1753

    
1754
  if aliases is None:
1755
    aliases = {}
1756

    
1757
  try:
1758
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1759
  except errors.ParameterError, err:
1760
    result, err_msg = FormatError(err)
1761
    ToStderr(err_msg)
1762
    return 1
1763

    
1764
  if func is None: # parse error
1765
    return 1
1766

    
1767
  if override is not None:
1768
    for key, val in override.iteritems():
1769
      setattr(options, key, val)
1770

    
1771
  utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1772
                     stderr_logging=True, program=binary)
1773

    
1774
  if old_cmdline:
1775
    logging.info("run with arguments '%s'", old_cmdline)
1776
  else:
1777
    logging.info("run with no arguments")
1778

    
1779
  try:
1780
    result = func(options, args)
1781
  except (errors.GenericError, luxi.ProtocolError,
1782
          JobSubmittedException), err:
1783
    result, err_msg = FormatError(err)
1784
    logging.exception("Error during command processing")
1785
    ToStderr(err_msg)
1786

    
1787
  return result
1788

    
1789

    
1790
def GenericInstanceCreate(mode, opts, args):
1791
  """Add an instance to the cluster via either creation or import.
1792

1793
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1794
  @param opts: the command line options selected by the user
1795
  @type args: list
1796
  @param args: should contain only one element, the new instance name
1797
  @rtype: int
1798
  @return: the desired exit code
1799

1800
  """
1801
  instance = args[0]
1802

    
1803
  (pnode, snode) = SplitNodeOption(opts.node)
1804

    
1805
  hypervisor = None
1806
  hvparams = {}
1807
  if opts.hypervisor:
1808
    hypervisor, hvparams = opts.hypervisor
1809

    
1810
  if opts.nics:
1811
    try:
1812
      nic_max = max(int(nidx[0]) + 1 for nidx in opts.nics)
1813
    except ValueError, err:
1814
      raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1815
    nics = [{}] * nic_max
1816
    for nidx, ndict in opts.nics:
1817
      nidx = int(nidx)
1818
      if not isinstance(ndict, dict):
1819
        msg = "Invalid nic/%d value: expected dict, got %s" % (nidx, ndict)
1820
        raise errors.OpPrereqError(msg)
1821
      nics[nidx] = ndict
1822
  elif opts.no_nics:
1823
    # no nics
1824
    nics = []
1825
  elif mode == constants.INSTANCE_CREATE:
1826
    # default of one nic, all auto
1827
    nics = [{}]
1828
  else:
1829
    # mode == import
1830
    nics = []
1831

    
1832
  if opts.disk_template == constants.DT_DISKLESS:
1833
    if opts.disks or opts.sd_size is not None:
1834
      raise errors.OpPrereqError("Diskless instance but disk"
1835
                                 " information passed")
1836
    disks = []
1837
  else:
1838
    if (not opts.disks and not opts.sd_size
1839
        and mode == constants.INSTANCE_CREATE):
1840
      raise errors.OpPrereqError("No disk information specified")
1841
    if opts.disks and opts.sd_size is not None:
1842
      raise errors.OpPrereqError("Please use either the '--disk' or"
1843
                                 " '-s' option")
1844
    if opts.sd_size is not None:
1845
      opts.disks = [(0, {"size": opts.sd_size})]
1846

    
1847
    if opts.disks:
1848
      try:
1849
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
1850
      except ValueError, err:
1851
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
1852
      disks = [{}] * disk_max
1853
    else:
1854
      disks = []
1855
    for didx, ddict in opts.disks:
1856
      didx = int(didx)
1857
      if not isinstance(ddict, dict):
1858
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
1859
        raise errors.OpPrereqError(msg)
1860
      elif "size" in ddict:
1861
        if "adopt" in ddict:
1862
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
1863
                                     " (disk %d)" % didx)
1864
        try:
1865
          ddict["size"] = utils.ParseUnit(ddict["size"])
1866
        except ValueError, err:
1867
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
1868
                                     (didx, err))
1869
      elif "adopt" in ddict:
1870
        if mode == constants.INSTANCE_IMPORT:
1871
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
1872
                                     " import")
1873
        ddict["size"] = 0
1874
      else:
1875
        raise errors.OpPrereqError("Missing size or adoption source for"
1876
                                   " disk %d" % didx)
1877
      disks[didx] = ddict
1878

    
1879
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
1880
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
1881

    
1882
  if mode == constants.INSTANCE_CREATE:
1883
    start = opts.start
1884
    os_type = opts.os
1885
    force_variant = opts.force_variant
1886
    src_node = None
1887
    src_path = None
1888
    no_install = opts.no_install
1889
    identify_defaults = False
1890
  elif mode == constants.INSTANCE_IMPORT:
1891
    start = False
1892
    os_type = None
1893
    force_variant = False
1894
    src_node = opts.src_node
1895
    src_path = opts.src_dir
1896
    no_install = None
1897
    identify_defaults = opts.identify_defaults
1898
  else:
1899
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
1900

    
1901
  op = opcodes.OpCreateInstance(instance_name=instance,
1902
                                disks=disks,
1903
                                disk_template=opts.disk_template,
1904
                                nics=nics,
1905
                                pnode=pnode, snode=snode,
1906
                                ip_check=opts.ip_check,
1907
                                name_check=opts.name_check,
1908
                                wait_for_sync=opts.wait_for_sync,
1909
                                file_storage_dir=opts.file_storage_dir,
1910
                                file_driver=opts.file_driver,
1911
                                iallocator=opts.iallocator,
1912
                                hypervisor=hypervisor,
1913
                                hvparams=hvparams,
1914
                                beparams=opts.beparams,
1915
                                osparams=opts.osparams,
1916
                                mode=mode,
1917
                                start=start,
1918
                                os_type=os_type,
1919
                                force_variant=force_variant,
1920
                                src_node=src_node,
1921
                                src_path=src_path,
1922
                                no_install=no_install,
1923
                                identify_defaults=identify_defaults)
1924

    
1925
  SubmitOrSend(op, opts)
1926
  return 0
1927

    
1928

    
1929
class _RunWhileClusterStoppedHelper:
1930
  """Helper class for L{RunWhileClusterStopped} to simplify state management
1931

1932
  """
1933
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
1934
    """Initializes this class.
1935

1936
    @type feedback_fn: callable
1937
    @param feedback_fn: Feedback function
1938
    @type cluster_name: string
1939
    @param cluster_name: Cluster name
1940
    @type master_node: string
1941
    @param master_node Master node name
1942
    @type online_nodes: list
1943
    @param online_nodes: List of names of online nodes
1944

1945
    """
1946
    self.feedback_fn = feedback_fn
1947
    self.cluster_name = cluster_name
1948
    self.master_node = master_node
1949
    self.online_nodes = online_nodes
1950

    
1951
    self.ssh = ssh.SshRunner(self.cluster_name)
1952

    
1953
    self.nonmaster_nodes = [name for name in online_nodes
1954
                            if name != master_node]
1955

    
1956
    assert self.master_node not in self.nonmaster_nodes
1957

    
1958
  def _RunCmd(self, node_name, cmd):
1959
    """Runs a command on the local or a remote machine.
1960

1961
    @type node_name: string
1962
    @param node_name: Machine name
1963
    @type cmd: list
1964
    @param cmd: Command
1965

1966
    """
1967
    if node_name is None or node_name == self.master_node:
1968
      # No need to use SSH
1969
      result = utils.RunCmd(cmd)
1970
    else:
1971
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
1972

    
1973
    if result.failed:
1974
      errmsg = ["Failed to run command %s" % result.cmd]
1975
      if node_name:
1976
        errmsg.append("on node %s" % node_name)
1977
      errmsg.append(": exitcode %s and error %s" %
1978
                    (result.exit_code, result.output))
1979
      raise errors.OpExecError(" ".join(errmsg))
1980

    
1981
  def Call(self, fn, *args):
1982
    """Call function while all daemons are stopped.
1983

1984
    @type fn: callable
1985
    @param fn: Function to be called
1986

1987
    """
1988
    # Pause watcher by acquiring an exclusive lock on watcher state file
1989
    self.feedback_fn("Blocking watcher")
1990
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
1991
    try:
1992
      # TODO: Currently, this just blocks. There's no timeout.
1993
      # TODO: Should it be a shared lock?
1994
      watcher_block.Exclusive(blocking=True)
1995

    
1996
      # Stop master daemons, so that no new jobs can come in and all running
1997
      # ones are finished
1998
      self.feedback_fn("Stopping master daemons")
1999
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2000
      try:
2001
        # Stop daemons on all nodes
2002
        for node_name in self.online_nodes:
2003
          self.feedback_fn("Stopping daemons on %s" % node_name)
2004
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2005

    
2006
        # All daemons are shut down now
2007
        try:
2008
          return fn(self, *args)
2009
        except Exception, err:
2010
          _, errmsg = FormatError(err)
2011
          logging.exception("Caught exception")
2012
          self.feedback_fn(errmsg)
2013
          raise
2014
      finally:
2015
        # Start cluster again, master node last
2016
        for node_name in self.nonmaster_nodes + [self.master_node]:
2017
          self.feedback_fn("Starting daemons on %s" % node_name)
2018
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2019
    finally:
2020
      # Resume watcher
2021
      watcher_block.Close()
2022

    
2023

    
2024
def RunWhileClusterStopped(feedback_fn, fn, *args):
2025
  """Calls a function while all cluster daemons are stopped.
2026

2027
  @type feedback_fn: callable
2028
  @param feedback_fn: Feedback function
2029
  @type fn: callable
2030
  @param fn: Function to be called when daemons are stopped
2031

2032
  """
2033
  feedback_fn("Gathering cluster information")
2034

    
2035
  # This ensures we're running on the master daemon
2036
  cl = GetClient()
2037

    
2038
  (cluster_name, master_node) = \
2039
    cl.QueryConfigValues(["cluster_name", "master_node"])
2040

    
2041
  online_nodes = GetOnlineNodes([], cl=cl)
2042

    
2043
  # Don't keep a reference to the client. The master daemon will go away.
2044
  del cl
2045

    
2046
  assert master_node in online_nodes
2047

    
2048
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2049
                                       online_nodes).Call(fn, *args)
2050

    
2051

    
2052
def GenerateTable(headers, fields, separator, data,
2053
                  numfields=None, unitfields=None,
2054
                  units=None):
2055
  """Prints a table with headers and different fields.
2056

2057
  @type headers: dict
2058
  @param headers: dictionary mapping field names to headers for
2059
      the table
2060
  @type fields: list
2061
  @param fields: the field names corresponding to each row in
2062
      the data field
2063
  @param separator: the separator to be used; if this is None,
2064
      the default 'smart' algorithm is used which computes optimal
2065
      field width, otherwise just the separator is used between
2066
      each field
2067
  @type data: list
2068
  @param data: a list of lists, each sublist being one row to be output
2069
  @type numfields: list
2070
  @param numfields: a list with the fields that hold numeric
2071
      values and thus should be right-aligned
2072
  @type unitfields: list
2073
  @param unitfields: a list with the fields that hold numeric
2074
      values that should be formatted with the units field
2075
  @type units: string or None
2076
  @param units: the units we should use for formatting, or None for
2077
      automatic choice (human-readable for non-separator usage, otherwise
2078
      megabytes); this is a one-letter string
2079

2080
  """
2081
  if units is None:
2082
    if separator:
2083
      units = "m"
2084
    else:
2085
      units = "h"
2086

    
2087
  if numfields is None:
2088
    numfields = []
2089
  if unitfields is None:
2090
    unitfields = []
2091

    
2092
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2093
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2094

    
2095
  format_fields = []
2096
  for field in fields:
2097
    if headers and field not in headers:
2098
      # TODO: handle better unknown fields (either revert to old
2099
      # style of raising exception, or deal more intelligently with
2100
      # variable fields)
2101
      headers[field] = field
2102
    if separator is not None:
2103
      format_fields.append("%s")
2104
    elif numfields.Matches(field):
2105
      format_fields.append("%*s")
2106
    else:
2107
      format_fields.append("%-*s")
2108

    
2109
  if separator is None:
2110
    mlens = [0 for name in fields]
2111
    format_str = ' '.join(format_fields)
2112
  else:
2113
    format_str = separator.replace("%", "%%").join(format_fields)
2114

    
2115
  for row in data:
2116
    if row is None:
2117
      continue
2118
    for idx, val in enumerate(row):
2119
      if unitfields.Matches(fields[idx]):
2120
        try:
2121
          val = int(val)
2122
        except (TypeError, ValueError):
2123
          pass
2124
        else:
2125
          val = row[idx] = utils.FormatUnit(val, units)
2126
      val = row[idx] = str(val)
2127
      if separator is None:
2128
        mlens[idx] = max(mlens[idx], len(val))
2129

    
2130
  result = []
2131
  if headers:
2132
    args = []
2133
    for idx, name in enumerate(fields):
2134
      hdr = headers[name]
2135
      if separator is None:
2136
        mlens[idx] = max(mlens[idx], len(hdr))
2137
        args.append(mlens[idx])
2138
      args.append(hdr)
2139
    result.append(format_str % tuple(args))
2140

    
2141
  if separator is None:
2142
    assert len(mlens) == len(fields)
2143

    
2144
    if fields and not numfields.Matches(fields[-1]):
2145
      mlens[-1] = 0
2146

    
2147
  for line in data:
2148
    args = []
2149
    if line is None:
2150
      line = ['-' for _ in fields]
2151
    for idx in range(len(fields)):
2152
      if separator is None:
2153
        args.append(mlens[idx])
2154
      args.append(line[idx])
2155
    result.append(format_str % tuple(args))
2156

    
2157
  return result
2158

    
2159

    
2160
def FormatTimestamp(ts):
2161
  """Formats a given timestamp.
2162

2163
  @type ts: timestamp
2164
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2165

2166
  @rtype: string
2167
  @return: a string with the formatted timestamp
2168

2169
  """
2170
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2171
    return '?'
2172
  sec, usec = ts
2173
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2174

    
2175

    
2176
def ParseTimespec(value):
2177
  """Parse a time specification.
2178

2179
  The following suffixed will be recognized:
2180

2181
    - s: seconds
2182
    - m: minutes
2183
    - h: hours
2184
    - d: day
2185
    - w: weeks
2186

2187
  Without any suffix, the value will be taken to be in seconds.
2188

2189
  """
2190
  value = str(value)
2191
  if not value:
2192
    raise errors.OpPrereqError("Empty time specification passed")
2193
  suffix_map = {
2194
    's': 1,
2195
    'm': 60,
2196
    'h': 3600,
2197
    'd': 86400,
2198
    'w': 604800,
2199
    }
2200
  if value[-1] not in suffix_map:
2201
    try:
2202
      value = int(value)
2203
    except (TypeError, ValueError):
2204
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2205
  else:
2206
    multiplier = suffix_map[value[-1]]
2207
    value = value[:-1]
2208
    if not value: # no data left after stripping the suffix
2209
      raise errors.OpPrereqError("Invalid time specification (only"
2210
                                 " suffix passed)")
2211
    try:
2212
      value = int(value) * multiplier
2213
    except (TypeError, ValueError):
2214
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2215
  return value
2216

    
2217

    
2218
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2219
                   filter_master=False):
2220
  """Returns the names of online nodes.
2221

2222
  This function will also log a warning on stderr with the names of
2223
  the online nodes.
2224

2225
  @param nodes: if not empty, use only this subset of nodes (minus the
2226
      offline ones)
2227
  @param cl: if not None, luxi client to use
2228
  @type nowarn: boolean
2229
  @param nowarn: by default, this function will output a note with the
2230
      offline nodes that are skipped; if this parameter is True the
2231
      note is not displayed
2232
  @type secondary_ips: boolean
2233
  @param secondary_ips: if True, return the secondary IPs instead of the
2234
      names, useful for doing network traffic over the replication interface
2235
      (if any)
2236
  @type filter_master: boolean
2237
  @param filter_master: if True, do not return the master node in the list
2238
      (useful in coordination with secondary_ips where we cannot check our
2239
      node name against the list)
2240

2241
  """
2242
  if cl is None:
2243
    cl = GetClient()
2244

    
2245
  if secondary_ips:
2246
    name_idx = 2
2247
  else:
2248
    name_idx = 0
2249

    
2250
  if filter_master:
2251
    master_node = cl.QueryConfigValues(["master_node"])[0]
2252
    filter_fn = lambda x: x != master_node
2253
  else:
2254
    filter_fn = lambda _: True
2255

    
2256
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2257
                         use_locking=False)
2258
  offline = [row[0] for row in result if row[1]]
2259
  if offline and not nowarn:
2260
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2261
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2262

    
2263

    
2264
def _ToStream(stream, txt, *args):
2265
  """Write a message to a stream, bypassing the logging system
2266

2267
  @type stream: file object
2268
  @param stream: the file to which we should write
2269
  @type txt: str
2270
  @param txt: the message
2271

2272
  """
2273
  if args:
2274
    args = tuple(args)
2275
    stream.write(txt % args)
2276
  else:
2277
    stream.write(txt)
2278
  stream.write('\n')
2279
  stream.flush()
2280

    
2281

    
2282
def ToStdout(txt, *args):
2283
  """Write a message to stdout only, bypassing the logging system
2284

2285
  This is just a wrapper over _ToStream.
2286

2287
  @type txt: str
2288
  @param txt: the message
2289

2290
  """
2291
  _ToStream(sys.stdout, txt, *args)
2292

    
2293

    
2294
def ToStderr(txt, *args):
2295
  """Write a message to stderr only, bypassing the logging system
2296

2297
  This is just a wrapper over _ToStream.
2298

2299
  @type txt: str
2300
  @param txt: the message
2301

2302
  """
2303
  _ToStream(sys.stderr, txt, *args)
2304

    
2305

    
2306
class JobExecutor(object):
2307
  """Class which manages the submission and execution of multiple jobs.
2308

2309
  Note that instances of this class should not be reused between
2310
  GetResults() calls.
2311

2312
  """
2313
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2314
    self.queue = []
2315
    if cl is None:
2316
      cl = GetClient()
2317
    self.cl = cl
2318
    self.verbose = verbose
2319
    self.jobs = []
2320
    self.opts = opts
2321
    self.feedback_fn = feedback_fn
2322

    
2323
  def QueueJob(self, name, *ops):
2324
    """Record a job for later submit.
2325

2326
    @type name: string
2327
    @param name: a description of the job, will be used in WaitJobSet
2328
    """
2329
    SetGenericOpcodeOpts(ops, self.opts)
2330
    self.queue.append((name, ops))
2331

    
2332
  def SubmitPending(self, each=False):
2333
    """Submit all pending jobs.
2334

2335
    """
2336
    if each:
2337
      results = []
2338
      for row in self.queue:
2339
        # SubmitJob will remove the success status, but raise an exception if
2340
        # the submission fails, so we'll notice that anyway.
2341
        results.append([True, self.cl.SubmitJob(row[1])])
2342
    else:
2343
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2344
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2345
                                                            self.queue)):
2346
      self.jobs.append((idx, status, data, name))
2347

    
2348
  def _ChooseJob(self):
2349
    """Choose a non-waiting/queued job to poll next.
2350

2351
    """
2352
    assert self.jobs, "_ChooseJob called with empty job list"
2353

    
2354
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2355
    assert result
2356

    
2357
    for job_data, status in zip(self.jobs, result):
2358
      if (isinstance(status, list) and status and
2359
          status[0] in (constants.JOB_STATUS_QUEUED,
2360
                        constants.JOB_STATUS_WAITLOCK,
2361
                        constants.JOB_STATUS_CANCELING)):
2362
        # job is still present and waiting
2363
        continue
2364
      # good candidate found (either running job or lost job)
2365
      self.jobs.remove(job_data)
2366
      return job_data
2367

    
2368
    # no job found
2369
    return self.jobs.pop(0)
2370

    
2371
  def GetResults(self):
2372
    """Wait for and return the results of all jobs.
2373

2374
    @rtype: list
2375
    @return: list of tuples (success, job results), in the same order
2376
        as the submitted jobs; if a job has failed, instead of the result
2377
        there will be the error message
2378

2379
    """
2380
    if not self.jobs:
2381
      self.SubmitPending()
2382
    results = []
2383
    if self.verbose:
2384
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2385
      if ok_jobs:
2386
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2387

    
2388
    # first, remove any non-submitted jobs
2389
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2390
    for idx, _, jid, name in failures:
2391
      ToStderr("Failed to submit job for %s: %s", name, jid)
2392
      results.append((idx, False, jid))
2393

    
2394
    while self.jobs:
2395
      (idx, _, jid, name) = self._ChooseJob()
2396
      ToStdout("Waiting for job %s for %s...", jid, name)
2397
      try:
2398
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2399
        success = True
2400
      except errors.JobLost, err:
2401
        _, job_result = FormatError(err)
2402
        ToStderr("Job %s for %s has been archived, cannot check its result",
2403
                 jid, name)
2404
        success = False
2405
      except (errors.GenericError, luxi.ProtocolError), err:
2406
        _, job_result = FormatError(err)
2407
        success = False
2408
        # the error message will always be shown, verbose or not
2409
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2410

    
2411
      results.append((idx, success, job_result))
2412

    
2413
    # sort based on the index, then drop it
2414
    results.sort()
2415
    results = [i[1:] for i in results]
2416

    
2417
    return results
2418

    
2419
  def WaitOrShow(self, wait):
2420
    """Wait for job results or only print the job IDs.
2421

2422
    @type wait: boolean
2423
    @param wait: whether to wait or not
2424

2425
    """
2426
    if wait:
2427
      return self.GetResults()
2428
    else:
2429
      if not self.jobs:
2430
        self.SubmitPending()
2431
      for _, status, result, name in self.jobs:
2432
        if status:
2433
          ToStdout("%s: %s", result, name)
2434
        else:
2435
          ToStderr("Failure for %s: %s", name, result)
2436
      return [row[1:3] for row in self.jobs]