Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ e7323b5e

History | View | Annotate | Download (78.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42

    
43
from optparse import (OptionParser, TitledHelpFormatter,
44
                      Option, OptionValueError)
45

    
46

    
47
__all__ = [
48
  # Command line options
49
  "ADD_UIDS_OPT",
50
  "ALLOCATABLE_OPT",
51
  "ALL_OPT",
52
  "AUTO_PROMOTE_OPT",
53
  "AUTO_REPLACE_OPT",
54
  "BACKEND_OPT",
55
  "CLEANUP_OPT",
56
  "CLUSTER_DOMAIN_SECRET_OPT",
57
  "CONFIRM_OPT",
58
  "CP_SIZE_OPT",
59
  "DEBUG_OPT",
60
  "DEBUG_SIMERR_OPT",
61
  "DISKIDX_OPT",
62
  "DISK_OPT",
63
  "DISK_TEMPLATE_OPT",
64
  "DRAINED_OPT",
65
  "DRBD_HELPER_OPT",
66
  "EARLY_RELEASE_OPT",
67
  "ENABLED_HV_OPT",
68
  "ERROR_CODES_OPT",
69
  "FIELDS_OPT",
70
  "FILESTORE_DIR_OPT",
71
  "FILESTORE_DRIVER_OPT",
72
  "FORCE_OPT",
73
  "FORCE_VARIANT_OPT",
74
  "GLOBAL_FILEDIR_OPT",
75
  "HVLIST_OPT",
76
  "HVOPTS_OPT",
77
  "HYPERVISOR_OPT",
78
  "IALLOCATOR_OPT",
79
  "DEFAULT_IALLOCATOR_OPT",
80
  "IDENTIFY_DEFAULTS_OPT",
81
  "IGNORE_CONSIST_OPT",
82
  "IGNORE_FAILURES_OPT",
83
  "IGNORE_REMOVE_FAILURES_OPT",
84
  "IGNORE_SECONDARIES_OPT",
85
  "IGNORE_SIZE_OPT",
86
  "MAC_PREFIX_OPT",
87
  "MAINTAIN_NODE_HEALTH_OPT",
88
  "MASTER_NETDEV_OPT",
89
  "MC_OPT",
90
  "MIGRATION_MODE_OPT",
91
  "NET_OPT",
92
  "NEW_CLUSTER_CERT_OPT",
93
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
94
  "NEW_CONFD_HMAC_KEY_OPT",
95
  "NEW_RAPI_CERT_OPT",
96
  "NEW_SECONDARY_OPT",
97
  "NIC_PARAMS_OPT",
98
  "NODE_LIST_OPT",
99
  "NODE_PLACEMENT_OPT",
100
  "NODRBD_STORAGE_OPT",
101
  "NOHDR_OPT",
102
  "NOIPCHECK_OPT",
103
  "NO_INSTALL_OPT",
104
  "NONAMECHECK_OPT",
105
  "NOLVM_STORAGE_OPT",
106
  "NOMODIFY_ETCHOSTS_OPT",
107
  "NOMODIFY_SSH_SETUP_OPT",
108
  "NONICS_OPT",
109
  "NONLIVE_OPT",
110
  "NONPLUS1_OPT",
111
  "NOSHUTDOWN_OPT",
112
  "NOSTART_OPT",
113
  "NOSSH_KEYCHECK_OPT",
114
  "NOVOTING_OPT",
115
  "NWSYNC_OPT",
116
  "ON_PRIMARY_OPT",
117
  "ON_SECONDARY_OPT",
118
  "OFFLINE_OPT",
119
  "OSPARAMS_OPT",
120
  "OS_OPT",
121
  "OS_SIZE_OPT",
122
  "PRIMARY_IP_VERSION_OPT",
123
  "RAPI_CERT_OPT",
124
  "READD_OPT",
125
  "REBOOT_TYPE_OPT",
126
  "REMOVE_INSTANCE_OPT",
127
  "REMOVE_UIDS_OPT",
128
  "RESERVED_LVS_OPT",
129
  "ROMAN_OPT",
130
  "SECONDARY_IP_OPT",
131
  "SELECT_OS_OPT",
132
  "SEP_OPT",
133
  "SHOWCMD_OPT",
134
  "SHUTDOWN_TIMEOUT_OPT",
135
  "SINGLE_NODE_OPT",
136
  "SRC_DIR_OPT",
137
  "SRC_NODE_OPT",
138
  "SUBMIT_OPT",
139
  "STATIC_OPT",
140
  "SYNC_OPT",
141
  "TAG_SRC_OPT",
142
  "TIMEOUT_OPT",
143
  "UIDPOOL_OPT",
144
  "USEUNITS_OPT",
145
  "USE_REPL_NET_OPT",
146
  "VERBOSE_OPT",
147
  "VG_NAME_OPT",
148
  "YES_DOIT_OPT",
149
  # Generic functions for CLI programs
150
  "GenericMain",
151
  "GenericInstanceCreate",
152
  "GetClient",
153
  "GetOnlineNodes",
154
  "JobExecutor",
155
  "JobSubmittedException",
156
  "ParseTimespec",
157
  "RunWhileClusterStopped",
158
  "SubmitOpCode",
159
  "SubmitOrSend",
160
  "UsesRPC",
161
  # Formatting functions
162
  "ToStderr", "ToStdout",
163
  "FormatError",
164
  "GenerateTable",
165
  "AskUser",
166
  "FormatTimestamp",
167
  "FormatLogMessage",
168
  # Tags functions
169
  "ListTags",
170
  "AddTags",
171
  "RemoveTags",
172
  # command line options support infrastructure
173
  "ARGS_MANY_INSTANCES",
174
  "ARGS_MANY_NODES",
175
  "ARGS_NONE",
176
  "ARGS_ONE_INSTANCE",
177
  "ARGS_ONE_NODE",
178
  "ARGS_ONE_OS",
179
  "ArgChoice",
180
  "ArgCommand",
181
  "ArgFile",
182
  "ArgHost",
183
  "ArgInstance",
184
  "ArgJobId",
185
  "ArgNode",
186
  "ArgOs",
187
  "ArgSuggest",
188
  "ArgUnknown",
189
  "OPT_COMPL_INST_ADD_NODES",
190
  "OPT_COMPL_MANY_NODES",
191
  "OPT_COMPL_ONE_IALLOCATOR",
192
  "OPT_COMPL_ONE_INSTANCE",
193
  "OPT_COMPL_ONE_NODE",
194
  "OPT_COMPL_ONE_OS",
195
  "cli_option",
196
  "SplitNodeOption",
197
  "CalculateOSNames",
198
  ]
199

    
200
NO_PREFIX = "no_"
201
UN_PREFIX = "-"
202

    
203

    
204
class _Argument:
205
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
206
    self.min = min
207
    self.max = max
208

    
209
  def __repr__(self):
210
    return ("<%s min=%s max=%s>" %
211
            (self.__class__.__name__, self.min, self.max))
212

    
213

    
214
class ArgSuggest(_Argument):
215
  """Suggesting argument.
216

217
  Value can be any of the ones passed to the constructor.
218

219
  """
220
  # pylint: disable-msg=W0622
221
  def __init__(self, min=0, max=None, choices=None):
222
    _Argument.__init__(self, min=min, max=max)
223
    self.choices = choices
224

    
225
  def __repr__(self):
226
    return ("<%s min=%s max=%s choices=%r>" %
227
            (self.__class__.__name__, self.min, self.max, self.choices))
228

    
229

    
230
class ArgChoice(ArgSuggest):
231
  """Choice argument.
232

233
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
234
  but value must be one of the choices.
235

236
  """
237

    
238

    
239
class ArgUnknown(_Argument):
240
  """Unknown argument to program (e.g. determined at runtime).
241

242
  """
243

    
244

    
245
class ArgInstance(_Argument):
246
  """Instances argument.
247

248
  """
249

    
250

    
251
class ArgNode(_Argument):
252
  """Node argument.
253

254
  """
255

    
256
class ArgJobId(_Argument):
257
  """Job ID argument.
258

259
  """
260

    
261

    
262
class ArgFile(_Argument):
263
  """File path argument.
264

265
  """
266

    
267

    
268
class ArgCommand(_Argument):
269
  """Command argument.
270

271
  """
272

    
273

    
274
class ArgHost(_Argument):
275
  """Host argument.
276

277
  """
278

    
279

    
280
class ArgOs(_Argument):
281
  """OS argument.
282

283
  """
284

    
285

    
286
ARGS_NONE = []
287
ARGS_MANY_INSTANCES = [ArgInstance()]
288
ARGS_MANY_NODES = [ArgNode()]
289
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
290
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
291
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
292

    
293

    
294
def _ExtractTagsObject(opts, args):
295
  """Extract the tag type object.
296

297
  Note that this function will modify its args parameter.
298

299
  """
300
  if not hasattr(opts, "tag_type"):
301
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
302
  kind = opts.tag_type
303
  if kind == constants.TAG_CLUSTER:
304
    retval = kind, kind
305
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
306
    if not args:
307
      raise errors.OpPrereqError("no arguments passed to the command")
308
    name = args.pop(0)
309
    retval = kind, name
310
  else:
311
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
312
  return retval
313

    
314

    
315
def _ExtendTags(opts, args):
316
  """Extend the args if a source file has been given.
317

318
  This function will extend the tags with the contents of the file
319
  passed in the 'tags_source' attribute of the opts parameter. A file
320
  named '-' will be replaced by stdin.
321

322
  """
323
  fname = opts.tags_source
324
  if fname is None:
325
    return
326
  if fname == "-":
327
    new_fh = sys.stdin
328
  else:
329
    new_fh = open(fname, "r")
330
  new_data = []
331
  try:
332
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
333
    # because of python bug 1633941
334
    while True:
335
      line = new_fh.readline()
336
      if not line:
337
        break
338
      new_data.append(line.strip())
339
  finally:
340
    new_fh.close()
341
  args.extend(new_data)
342

    
343

    
344
def ListTags(opts, args):
345
  """List the tags on a given object.
346

347
  This is a generic implementation that knows how to deal with all
348
  three cases of tag objects (cluster, node, instance). The opts
349
  argument is expected to contain a tag_type field denoting what
350
  object type we work on.
351

352
  """
353
  kind, name = _ExtractTagsObject(opts, args)
354
  cl = GetClient()
355
  result = cl.QueryTags(kind, name)
356
  result = list(result)
357
  result.sort()
358
  for tag in result:
359
    ToStdout(tag)
360

    
361

    
362
def AddTags(opts, args):
363
  """Add tags on a given object.
364

365
  This is a generic implementation that knows how to deal with all
366
  three cases of tag objects (cluster, node, instance). The opts
367
  argument is expected to contain a tag_type field denoting what
368
  object type we work on.
369

370
  """
371
  kind, name = _ExtractTagsObject(opts, args)
372
  _ExtendTags(opts, args)
373
  if not args:
374
    raise errors.OpPrereqError("No tags to be added")
375
  op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
376
  SubmitOpCode(op)
377

    
378

    
379
def RemoveTags(opts, args):
380
  """Remove tags from a given object.
381

382
  This is a generic implementation that knows how to deal with all
383
  three cases of tag objects (cluster, node, instance). The opts
384
  argument is expected to contain a tag_type field denoting what
385
  object type we work on.
386

387
  """
388
  kind, name = _ExtractTagsObject(opts, args)
389
  _ExtendTags(opts, args)
390
  if not args:
391
    raise errors.OpPrereqError("No tags to be removed")
392
  op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
393
  SubmitOpCode(op)
394

    
395

    
396
def check_unit(option, opt, value): # pylint: disable-msg=W0613
397
  """OptParsers custom converter for units.
398

399
  """
400
  try:
401
    return utils.ParseUnit(value)
402
  except errors.UnitParseError, err:
403
    raise OptionValueError("option %s: %s" % (opt, err))
404

    
405

    
406
def _SplitKeyVal(opt, data):
407
  """Convert a KeyVal string into a dict.
408

409
  This function will convert a key=val[,...] string into a dict. Empty
410
  values will be converted specially: keys which have the prefix 'no_'
411
  will have the value=False and the prefix stripped, the others will
412
  have value=True.
413

414
  @type opt: string
415
  @param opt: a string holding the option name for which we process the
416
      data, used in building error messages
417
  @type data: string
418
  @param data: a string of the format key=val,key=val,...
419
  @rtype: dict
420
  @return: {key=val, key=val}
421
  @raises errors.ParameterError: if there are duplicate keys
422

423
  """
424
  kv_dict = {}
425
  if data:
426
    for elem in utils.UnescapeAndSplit(data, sep=","):
427
      if "=" in elem:
428
        key, val = elem.split("=", 1)
429
      else:
430
        if elem.startswith(NO_PREFIX):
431
          key, val = elem[len(NO_PREFIX):], False
432
        elif elem.startswith(UN_PREFIX):
433
          key, val = elem[len(UN_PREFIX):], None
434
        else:
435
          key, val = elem, True
436
      if key in kv_dict:
437
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
438
                                    (key, opt))
439
      kv_dict[key] = val
440
  return kv_dict
441

    
442

    
443
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
444
  """Custom parser for ident:key=val,key=val options.
445

446
  This will store the parsed values as a tuple (ident, {key: val}). As such,
447
  multiple uses of this option via action=append is possible.
448

449
  """
450
  if ":" not in value:
451
    ident, rest = value, ''
452
  else:
453
    ident, rest = value.split(":", 1)
454

    
455
  if ident.startswith(NO_PREFIX):
456
    if rest:
457
      msg = "Cannot pass options when removing parameter groups: %s" % value
458
      raise errors.ParameterError(msg)
459
    retval = (ident[len(NO_PREFIX):], False)
460
  elif ident.startswith(UN_PREFIX):
461
    if rest:
462
      msg = "Cannot pass options when removing parameter groups: %s" % value
463
      raise errors.ParameterError(msg)
464
    retval = (ident[len(UN_PREFIX):], None)
465
  else:
466
    kv_dict = _SplitKeyVal(opt, rest)
467
    retval = (ident, kv_dict)
468
  return retval
469

    
470

    
471
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
472
  """Custom parser class for key=val,key=val options.
473

474
  This will store the parsed values as a dict {key: val}.
475

476
  """
477
  return _SplitKeyVal(opt, value)
478

    
479

    
480
def check_bool(option, opt, value): # pylint: disable-msg=W0613
481
  """Custom parser for yes/no options.
482

483
  This will store the parsed value as either True or False.
484

485
  """
486
  value = value.lower()
487
  if value == constants.VALUE_FALSE or value == "no":
488
    return False
489
  elif value == constants.VALUE_TRUE or value == "yes":
490
    return True
491
  else:
492
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
493

    
494

    
495
# completion_suggestion is normally a list. Using numeric values not evaluating
496
# to False for dynamic completion.
497
(OPT_COMPL_MANY_NODES,
498
 OPT_COMPL_ONE_NODE,
499
 OPT_COMPL_ONE_INSTANCE,
500
 OPT_COMPL_ONE_OS,
501
 OPT_COMPL_ONE_IALLOCATOR,
502
 OPT_COMPL_INST_ADD_NODES) = range(100, 106)
503

    
504
OPT_COMPL_ALL = frozenset([
505
  OPT_COMPL_MANY_NODES,
506
  OPT_COMPL_ONE_NODE,
507
  OPT_COMPL_ONE_INSTANCE,
508
  OPT_COMPL_ONE_OS,
509
  OPT_COMPL_ONE_IALLOCATOR,
510
  OPT_COMPL_INST_ADD_NODES,
511
  ])
512

    
513

    
514
class CliOption(Option):
515
  """Custom option class for optparse.
516

517
  """
518
  ATTRS = Option.ATTRS + [
519
    "completion_suggest",
520
    ]
521
  TYPES = Option.TYPES + (
522
    "identkeyval",
523
    "keyval",
524
    "unit",
525
    "bool",
526
    )
527
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
528
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
529
  TYPE_CHECKER["keyval"] = check_key_val
530
  TYPE_CHECKER["unit"] = check_unit
531
  TYPE_CHECKER["bool"] = check_bool
532

    
533

    
534
# optparse.py sets make_option, so we do it for our own option class, too
535
cli_option = CliOption
536

    
537

    
538
_YORNO = "yes|no"
539

    
540
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
541
                       help="Increase debugging level")
542

    
543
NOHDR_OPT = cli_option("--no-headers", default=False,
544
                       action="store_true", dest="no_headers",
545
                       help="Don't display column headers")
546

    
547
SEP_OPT = cli_option("--separator", default=None,
548
                     action="store", dest="separator",
549
                     help=("Separator between output fields"
550
                           " (defaults to one space)"))
551

    
552
USEUNITS_OPT = cli_option("--units", default=None,
553
                          dest="units", choices=('h', 'm', 'g', 't'),
554
                          help="Specify units for output (one of hmgt)")
555

    
556
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
557
                        type="string", metavar="FIELDS",
558
                        help="Comma separated list of output fields")
559

    
560
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
561
                       default=False, help="Force the operation")
562

    
563
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
564
                         default=False, help="Do not require confirmation")
565

    
566
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
567
                         default=None, help="File with tag names")
568

    
569
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
570
                        default=False, action="store_true",
571
                        help=("Submit the job and return the job ID, but"
572
                              " don't wait for the job to finish"))
573

    
574
SYNC_OPT = cli_option("--sync", dest="do_locking",
575
                      default=False, action="store_true",
576
                      help=("Grab locks while doing the queries"
577
                            " in order to ensure more consistent results"))
578

    
579
_DRY_RUN_OPT = cli_option("--dry-run", default=False,
580
                          action="store_true",
581
                          help=("Do not execute the operation, just run the"
582
                                " check steps and verify it it could be"
583
                                " executed"))
584

    
585
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
586
                         action="store_true",
587
                         help="Increase the verbosity of the operation")
588

    
589
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
590
                              action="store_true", dest="simulate_errors",
591
                              help="Debugging option that makes the operation"
592
                              " treat most runtime checks as failed")
593

    
594
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
595
                        default=True, action="store_false",
596
                        help="Don't wait for sync (DANGEROUS!)")
597

    
598
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
599
                               help="Custom disk setup (diskless, file,"
600
                               " plain or drbd)",
601
                               default=None, metavar="TEMPL",
602
                               choices=list(constants.DISK_TEMPLATES))
603

    
604
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
605
                        help="Do not create any network cards for"
606
                        " the instance")
607

    
608
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
609
                               help="Relative path under default cluster-wide"
610
                               " file storage dir to store file-based disks",
611
                               default=None, metavar="<DIR>")
612

    
613
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
614
                                  help="Driver to use for image files",
615
                                  default="loop", metavar="<DRIVER>",
616
                                  choices=list(constants.FILE_DRIVER))
617

    
618
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
619
                            help="Select nodes for the instance automatically"
620
                            " using the <NAME> iallocator plugin",
621
                            default=None, type="string",
622
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
623

    
624
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
625
                            metavar="<NAME>",
626
                            help="Set the default instance allocator plugin",
627
                            default=None, type="string",
628
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
629

    
630
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
631
                    metavar="<os>",
632
                    completion_suggest=OPT_COMPL_ONE_OS)
633

    
634
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
635
                         type="keyval", default={},
636
                         help="OS parameters")
637

    
638
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
639
                               action="store_true", default=False,
640
                               help="Force an unknown variant")
641

    
642
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
643
                            action="store_true", default=False,
644
                            help="Do not install the OS (will"
645
                            " enable no-start)")
646

    
647
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
648
                         type="keyval", default={},
649
                         help="Backend parameters")
650

    
651
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
652
                         default={}, dest="hvparams",
653
                         help="Hypervisor parameters")
654

    
655
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
656
                            help="Hypervisor and hypervisor options, in the"
657
                            " format hypervisor:option=value,option=value,...",
658
                            default=None, type="identkeyval")
659

    
660
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
661
                        help="Hypervisor and hypervisor options, in the"
662
                        " format hypervisor:option=value,option=value,...",
663
                        default=[], action="append", type="identkeyval")
664

    
665
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
666
                           action="store_false",
667
                           help="Don't check that the instance's IP"
668
                           " is alive")
669

    
670
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
671
                             default=True, action="store_false",
672
                             help="Don't check that the instance's name"
673
                             " is resolvable")
674

    
675
NET_OPT = cli_option("--net",
676
                     help="NIC parameters", default=[],
677
                     dest="nics", action="append", type="identkeyval")
678

    
679
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
680
                      dest="disks", action="append", type="identkeyval")
681

    
682
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
683
                         help="Comma-separated list of disks"
684
                         " indices to act on (e.g. 0,2) (optional,"
685
                         " defaults to all disks)")
686

    
687
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
688
                         help="Enforces a single-disk configuration using the"
689
                         " given disk size, in MiB unless a suffix is used",
690
                         default=None, type="unit", metavar="<size>")
691

    
692
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
693
                                dest="ignore_consistency",
694
                                action="store_true", default=False,
695
                                help="Ignore the consistency of the disks on"
696
                                " the secondary")
697

    
698
NONLIVE_OPT = cli_option("--non-live", dest="live",
699
                         default=True, action="store_false",
700
                         help="Do a non-live migration (this usually means"
701
                         " freeze the instance, save the state, transfer and"
702
                         " only then resume running on the secondary node)")
703

    
704
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
705
                                default=None,
706
                                choices=list(constants.HT_MIGRATION_MODES),
707
                                help="Override default migration mode (choose"
708
                                " either live or non-live")
709

    
710
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
711
                                help="Target node and optional secondary node",
712
                                metavar="<pnode>[:<snode>]",
713
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
714

    
715
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
716
                           action="append", metavar="<node>",
717
                           help="Use only this node (can be used multiple"
718
                           " times, if not given defaults to all nodes)",
719
                           completion_suggest=OPT_COMPL_ONE_NODE)
720

    
721
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
722
                             metavar="<node>",
723
                             completion_suggest=OPT_COMPL_ONE_NODE)
724

    
725
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
726
                         action="store_false",
727
                         help="Don't start the instance after creation")
728

    
729
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
730
                         action="store_true", default=False,
731
                         help="Show command instead of executing it")
732

    
733
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
734
                         default=False, action="store_true",
735
                         help="Instead of performing the migration, try to"
736
                         " recover from a failed cleanup. This is safe"
737
                         " to run even if the instance is healthy, but it"
738
                         " will create extra replication traffic and "
739
                         " disrupt briefly the replication (like during the"
740
                         " migration")
741

    
742
STATIC_OPT = cli_option("-s", "--static", dest="static",
743
                        action="store_true", default=False,
744
                        help="Only show configuration data, not runtime data")
745

    
746
ALL_OPT = cli_option("--all", dest="show_all",
747
                     default=False, action="store_true",
748
                     help="Show info on all instances on the cluster."
749
                     " This can take a long time to run, use wisely")
750

    
751
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
752
                           action="store_true", default=False,
753
                           help="Interactive OS reinstall, lists available"
754
                           " OS templates for selection")
755

    
756
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
757
                                 action="store_true", default=False,
758
                                 help="Remove the instance from the cluster"
759
                                 " configuration even if there are failures"
760
                                 " during the removal process")
761

    
762
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
763
                                        dest="ignore_remove_failures",
764
                                        action="store_true", default=False,
765
                                        help="Remove the instance from the"
766
                                        " cluster configuration even if there"
767
                                        " are failures during the removal"
768
                                        " process")
769

    
770
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
771
                                 action="store_true", default=False,
772
                                 help="Remove the instance from the cluster")
773

    
774
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
775
                               help="Specifies the new secondary node",
776
                               metavar="NODE", default=None,
777
                               completion_suggest=OPT_COMPL_ONE_NODE)
778

    
779
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
780
                            default=False, action="store_true",
781
                            help="Replace the disk(s) on the primary"
782
                            " node (only for the drbd template)")
783

    
784
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
785
                              default=False, action="store_true",
786
                              help="Replace the disk(s) on the secondary"
787
                              " node (only for the drbd template)")
788

    
789
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
790
                              default=False, action="store_true",
791
                              help="Lock all nodes and auto-promote as needed"
792
                              " to MC status")
793

    
794
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
795
                              default=False, action="store_true",
796
                              help="Automatically replace faulty disks"
797
                              " (only for the drbd template)")
798

    
799
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
800
                             default=False, action="store_true",
801
                             help="Ignore current recorded size"
802
                             " (useful for forcing activation when"
803
                             " the recorded size is wrong)")
804

    
805
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
806
                          metavar="<node>",
807
                          completion_suggest=OPT_COMPL_ONE_NODE)
808

    
809
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
810
                         metavar="<dir>")
811

    
812
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
813
                              help="Specify the secondary ip for the node",
814
                              metavar="ADDRESS", default=None)
815

    
816
READD_OPT = cli_option("--readd", dest="readd",
817
                       default=False, action="store_true",
818
                       help="Readd old node after replacing it")
819

    
820
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
821
                                default=True, action="store_false",
822
                                help="Disable SSH key fingerprint checking")
823

    
824

    
825
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
826
                    type="bool", default=None, metavar=_YORNO,
827
                    help="Set the master_candidate flag on the node")
828

    
829
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
830
                         type="bool", default=None,
831
                         help="Set the offline flag on the node")
832

    
833
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
834
                         type="bool", default=None,
835
                         help="Set the drained flag on the node")
836

    
837
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
838
                             type="bool", default=None, metavar=_YORNO,
839
                             help="Set the allocatable flag on a volume")
840

    
841
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
842
                               help="Disable support for lvm based instances"
843
                               " (cluster-wide)",
844
                               action="store_false", default=True)
845

    
846
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
847
                            dest="enabled_hypervisors",
848
                            help="Comma-separated list of hypervisors",
849
                            type="string", default=None)
850

    
851
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
852
                            type="keyval", default={},
853
                            help="NIC parameters")
854

    
855
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
856
                         dest="candidate_pool_size", type="int",
857
                         help="Set the candidate pool size")
858

    
859
VG_NAME_OPT = cli_option("-g", "--vg-name", dest="vg_name",
860
                         help="Enables LVM and specifies the volume group"
861
                         " name (cluster-wide) for disk allocation [xenvg]",
862
                         metavar="VG", default=None)
863

    
864
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
865
                          help="Destroy cluster", action="store_true")
866

    
867
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
868
                          help="Skip node agreement check (dangerous)",
869
                          action="store_true", default=False)
870

    
871
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
872
                            help="Specify the mac prefix for the instance IP"
873
                            " addresses, in the format XX:XX:XX",
874
                            metavar="PREFIX",
875
                            default=None)
876

    
877
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
878
                               help="Specify the node interface (cluster-wide)"
879
                               " on which the master IP address will be added "
880
                               " [%s]" % constants.DEFAULT_BRIDGE,
881
                               metavar="NETDEV",
882
                               default=constants.DEFAULT_BRIDGE)
883

    
884
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
885
                                help="Specify the default directory (cluster-"
886
                                "wide) for storing the file-based disks [%s]" %
887
                                constants.DEFAULT_FILE_STORAGE_DIR,
888
                                metavar="DIR",
889
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
890

    
891
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
892
                                   help="Don't modify /etc/hosts",
893
                                   action="store_false", default=True)
894

    
895
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
896
                                    help="Don't initialize SSH keys",
897
                                    action="store_false", default=True)
898

    
899
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
900
                             help="Enable parseable error messages",
901
                             action="store_true", default=False)
902

    
903
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
904
                          help="Skip N+1 memory redundancy tests",
905
                          action="store_true", default=False)
906

    
907
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
908
                             help="Type of reboot: soft/hard/full",
909
                             default=constants.INSTANCE_REBOOT_HARD,
910
                             metavar="<REBOOT>",
911
                             choices=list(constants.REBOOT_TYPES))
912

    
913
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
914
                                    dest="ignore_secondaries",
915
                                    default=False, action="store_true",
916
                                    help="Ignore errors from secondaries")
917

    
918
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
919
                            action="store_false", default=True,
920
                            help="Don't shutdown the instance (unsafe)")
921

    
922
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
923
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
924
                         help="Maximum time to wait")
925

    
926
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
927
                         dest="shutdown_timeout", type="int",
928
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
929
                         help="Maximum time to wait for instance shutdown")
930

    
931
EARLY_RELEASE_OPT = cli_option("--early-release",
932
                               dest="early_release", default=False,
933
                               action="store_true",
934
                               help="Release the locks on the secondary"
935
                               " node(s) early")
936

    
937
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
938
                                  dest="new_cluster_cert",
939
                                  default=False, action="store_true",
940
                                  help="Generate a new cluster certificate")
941

    
942
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
943
                           default=None,
944
                           help="File containing new RAPI certificate")
945

    
946
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
947
                               default=None, action="store_true",
948
                               help=("Generate a new self-signed RAPI"
949
                                     " certificate"))
950

    
951
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
952
                                    dest="new_confd_hmac_key",
953
                                    default=False, action="store_true",
954
                                    help=("Create a new HMAC key for %s" %
955
                                          constants.CONFD))
956

    
957
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
958
                                       dest="cluster_domain_secret",
959
                                       default=None,
960
                                       help=("Load new new cluster domain"
961
                                             " secret from file"))
962

    
963
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
964
                                           dest="new_cluster_domain_secret",
965
                                           default=False, action="store_true",
966
                                           help=("Create a new cluster domain"
967
                                                 " secret"))
968

    
969
USE_REPL_NET_OPT = cli_option("--use-replication-network",
970
                              dest="use_replication_network",
971
                              help="Whether to use the replication network"
972
                              " for talking to the nodes",
973
                              action="store_true", default=False)
974

    
975
MAINTAIN_NODE_HEALTH_OPT = \
976
    cli_option("--maintain-node-health", dest="maintain_node_health",
977
               metavar=_YORNO, default=None, type="bool",
978
               help="Configure the cluster to automatically maintain node"
979
               " health, by shutting down unknown instances, shutting down"
980
               " unknown DRBD devices, etc.")
981

    
982
IDENTIFY_DEFAULTS_OPT = \
983
    cli_option("--identify-defaults", dest="identify_defaults",
984
               default=False, action="store_true",
985
               help="Identify which saved instance parameters are equal to"
986
               " the current cluster defaults and set them as such, instead"
987
               " of marking them as overridden")
988

    
989
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
990
                         action="store", dest="uid_pool",
991
                         help=("A list of user-ids or user-id"
992
                               " ranges separated by commas"))
993

    
994
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
995
                          action="store", dest="add_uids",
996
                          help=("A list of user-ids or user-id"
997
                                " ranges separated by commas, to be"
998
                                " added to the user-id pool"))
999

    
1000
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1001
                             action="store", dest="remove_uids",
1002
                             help=("A list of user-ids or user-id"
1003
                                   " ranges separated by commas, to be"
1004
                                   " removed from the user-id pool"))
1005

    
1006
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1007
                             action="store", dest="reserved_lvs",
1008
                             help=("A comma-separated list of reserved"
1009
                                   " logical volumes names, that will be"
1010
                                   " ignored by cluster verify"))
1011

    
1012
ROMAN_OPT = cli_option("--roman",
1013
                       dest="roman_integers", default=False,
1014
                       action="store_true",
1015
                       help="Use roman numbers for positive integers")
1016

    
1017
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1018
                             action="store", default=None,
1019
                             help="Specifies usermode helper for DRBD")
1020

    
1021
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1022
                                action="store_false", default=True,
1023
                                help="Disable support for DRBD")
1024

    
1025
PRIMARY_IP_VERSION_OPT = \
1026
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1027
               action="store", dest="primary_ip_version",
1028
               metavar="%d|%d" % (constants.IP4_VERSION,
1029
                                  constants.IP6_VERSION),
1030
               help="Cluster-wide IP version for primary IP")
1031

    
1032

    
1033
def _ParseArgs(argv, commands, aliases):
1034
  """Parser for the command line arguments.
1035

1036
  This function parses the arguments and returns the function which
1037
  must be executed together with its (modified) arguments.
1038

1039
  @param argv: the command line
1040
  @param commands: dictionary with special contents, see the design
1041
      doc for cmdline handling
1042
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1043

1044
  """
1045
  if len(argv) == 0:
1046
    binary = "<command>"
1047
  else:
1048
    binary = argv[0].split("/")[-1]
1049

    
1050
  if len(argv) > 1 and argv[1] == "--version":
1051
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1052
             constants.RELEASE_VERSION)
1053
    # Quit right away. That way we don't have to care about this special
1054
    # argument. optparse.py does it the same.
1055
    sys.exit(0)
1056

    
1057
  if len(argv) < 2 or not (argv[1] in commands or
1058
                           argv[1] in aliases):
1059
    # let's do a nice thing
1060
    sortedcmds = commands.keys()
1061
    sortedcmds.sort()
1062

    
1063
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1064
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1065
    ToStdout("")
1066

    
1067
    # compute the max line length for cmd + usage
1068
    mlen = max([len(" %s" % cmd) for cmd in commands])
1069
    mlen = min(60, mlen) # should not get here...
1070

    
1071
    # and format a nice command list
1072
    ToStdout("Commands:")
1073
    for cmd in sortedcmds:
1074
      cmdstr = " %s" % (cmd,)
1075
      help_text = commands[cmd][4]
1076
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1077
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1078
      for line in help_lines:
1079
        ToStdout("%-*s   %s", mlen, "", line)
1080

    
1081
    ToStdout("")
1082

    
1083
    return None, None, None
1084

    
1085
  # get command, unalias it, and look it up in commands
1086
  cmd = argv.pop(1)
1087
  if cmd in aliases:
1088
    if cmd in commands:
1089
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1090
                                   " command" % cmd)
1091

    
1092
    if aliases[cmd] not in commands:
1093
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1094
                                   " command '%s'" % (cmd, aliases[cmd]))
1095

    
1096
    cmd = aliases[cmd]
1097

    
1098
  func, args_def, parser_opts, usage, description = commands[cmd]
1099
  parser = OptionParser(option_list=parser_opts + [_DRY_RUN_OPT, DEBUG_OPT],
1100
                        description=description,
1101
                        formatter=TitledHelpFormatter(),
1102
                        usage="%%prog %s %s" % (cmd, usage))
1103
  parser.disable_interspersed_args()
1104
  options, args = parser.parse_args()
1105

    
1106
  if not _CheckArguments(cmd, args_def, args):
1107
    return None, None, None
1108

    
1109
  return func, options, args
1110

    
1111

    
1112
def _CheckArguments(cmd, args_def, args):
1113
  """Verifies the arguments using the argument definition.
1114

1115
  Algorithm:
1116

1117
    1. Abort with error if values specified by user but none expected.
1118

1119
    1. For each argument in definition
1120

1121
      1. Keep running count of minimum number of values (min_count)
1122
      1. Keep running count of maximum number of values (max_count)
1123
      1. If it has an unlimited number of values
1124

1125
        1. Abort with error if it's not the last argument in the definition
1126

1127
    1. If last argument has limited number of values
1128

1129
      1. Abort with error if number of values doesn't match or is too large
1130

1131
    1. Abort with error if user didn't pass enough values (min_count)
1132

1133
  """
1134
  if args and not args_def:
1135
    ToStderr("Error: Command %s expects no arguments", cmd)
1136
    return False
1137

    
1138
  min_count = None
1139
  max_count = None
1140
  check_max = None
1141

    
1142
  last_idx = len(args_def) - 1
1143

    
1144
  for idx, arg in enumerate(args_def):
1145
    if min_count is None:
1146
      min_count = arg.min
1147
    elif arg.min is not None:
1148
      min_count += arg.min
1149

    
1150
    if max_count is None:
1151
      max_count = arg.max
1152
    elif arg.max is not None:
1153
      max_count += arg.max
1154

    
1155
    if idx == last_idx:
1156
      check_max = (arg.max is not None)
1157

    
1158
    elif arg.max is None:
1159
      raise errors.ProgrammerError("Only the last argument can have max=None")
1160

    
1161
  if check_max:
1162
    # Command with exact number of arguments
1163
    if (min_count is not None and max_count is not None and
1164
        min_count == max_count and len(args) != min_count):
1165
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1166
      return False
1167

    
1168
    # Command with limited number of arguments
1169
    if max_count is not None and len(args) > max_count:
1170
      ToStderr("Error: Command %s expects only %d argument(s)",
1171
               cmd, max_count)
1172
      return False
1173

    
1174
  # Command with some required arguments
1175
  if min_count is not None and len(args) < min_count:
1176
    ToStderr("Error: Command %s expects at least %d argument(s)",
1177
             cmd, min_count)
1178
    return False
1179

    
1180
  return True
1181

    
1182

    
1183
def SplitNodeOption(value):
1184
  """Splits the value of a --node option.
1185

1186
  """
1187
  if value and ':' in value:
1188
    return value.split(':', 1)
1189
  else:
1190
    return (value, None)
1191

    
1192

    
1193
def CalculateOSNames(os_name, os_variants):
1194
  """Calculates all the names an OS can be called, according to its variants.
1195

1196
  @type os_name: string
1197
  @param os_name: base name of the os
1198
  @type os_variants: list or None
1199
  @param os_variants: list of supported variants
1200
  @rtype: list
1201
  @return: list of valid names
1202

1203
  """
1204
  if os_variants:
1205
    return ['%s+%s' % (os_name, v) for v in os_variants]
1206
  else:
1207
    return [os_name]
1208

    
1209

    
1210
UsesRPC = rpc.RunWithRPC
1211

    
1212

    
1213
def AskUser(text, choices=None):
1214
  """Ask the user a question.
1215

1216
  @param text: the question to ask
1217

1218
  @param choices: list with elements tuples (input_char, return_value,
1219
      description); if not given, it will default to: [('y', True,
1220
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1221
      note that the '?' char is reserved for help
1222

1223
  @return: one of the return values from the choices list; if input is
1224
      not possible (i.e. not running with a tty, we return the last
1225
      entry from the list
1226

1227
  """
1228
  if choices is None:
1229
    choices = [('y', True, 'Perform the operation'),
1230
               ('n', False, 'Do not perform the operation')]
1231
  if not choices or not isinstance(choices, list):
1232
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1233
  for entry in choices:
1234
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1235
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1236

    
1237
  answer = choices[-1][1]
1238
  new_text = []
1239
  for line in text.splitlines():
1240
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1241
  text = "\n".join(new_text)
1242
  try:
1243
    f = file("/dev/tty", "a+")
1244
  except IOError:
1245
    return answer
1246
  try:
1247
    chars = [entry[0] for entry in choices]
1248
    chars[-1] = "[%s]" % chars[-1]
1249
    chars.append('?')
1250
    maps = dict([(entry[0], entry[1]) for entry in choices])
1251
    while True:
1252
      f.write(text)
1253
      f.write('\n')
1254
      f.write("/".join(chars))
1255
      f.write(": ")
1256
      line = f.readline(2).strip().lower()
1257
      if line in maps:
1258
        answer = maps[line]
1259
        break
1260
      elif line == '?':
1261
        for entry in choices:
1262
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1263
        f.write("\n")
1264
        continue
1265
  finally:
1266
    f.close()
1267
  return answer
1268

    
1269

    
1270
class JobSubmittedException(Exception):
1271
  """Job was submitted, client should exit.
1272

1273
  This exception has one argument, the ID of the job that was
1274
  submitted. The handler should print this ID.
1275

1276
  This is not an error, just a structured way to exit from clients.
1277

1278
  """
1279

    
1280

    
1281
def SendJob(ops, cl=None):
1282
  """Function to submit an opcode without waiting for the results.
1283

1284
  @type ops: list
1285
  @param ops: list of opcodes
1286
  @type cl: luxi.Client
1287
  @param cl: the luxi client to use for communicating with the master;
1288
             if None, a new client will be created
1289

1290
  """
1291
  if cl is None:
1292
    cl = GetClient()
1293

    
1294
  job_id = cl.SubmitJob(ops)
1295

    
1296
  return job_id
1297

    
1298

    
1299
def GenericPollJob(job_id, cbs, report_cbs):
1300
  """Generic job-polling function.
1301

1302
  @type job_id: number
1303
  @param job_id: Job ID
1304
  @type cbs: Instance of L{JobPollCbBase}
1305
  @param cbs: Data callbacks
1306
  @type report_cbs: Instance of L{JobPollReportCbBase}
1307
  @param report_cbs: Reporting callbacks
1308

1309
  """
1310
  prev_job_info = None
1311
  prev_logmsg_serial = None
1312

    
1313
  status = None
1314

    
1315
  while True:
1316
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1317
                                      prev_logmsg_serial)
1318
    if not result:
1319
      # job not found, go away!
1320
      raise errors.JobLost("Job with id %s lost" % job_id)
1321

    
1322
    if result == constants.JOB_NOTCHANGED:
1323
      report_cbs.ReportNotChanged(job_id, status)
1324

    
1325
      # Wait again
1326
      continue
1327

    
1328
    # Split result, a tuple of (field values, log entries)
1329
    (job_info, log_entries) = result
1330
    (status, ) = job_info
1331

    
1332
    if log_entries:
1333
      for log_entry in log_entries:
1334
        (serial, timestamp, log_type, message) = log_entry
1335
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1336
                                    log_type, message)
1337
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1338

    
1339
    # TODO: Handle canceled and archived jobs
1340
    elif status in (constants.JOB_STATUS_SUCCESS,
1341
                    constants.JOB_STATUS_ERROR,
1342
                    constants.JOB_STATUS_CANCELING,
1343
                    constants.JOB_STATUS_CANCELED):
1344
      break
1345

    
1346
    prev_job_info = job_info
1347

    
1348
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1349
  if not jobs:
1350
    raise errors.JobLost("Job with id %s lost" % job_id)
1351

    
1352
  status, opstatus, result = jobs[0]
1353

    
1354
  if status == constants.JOB_STATUS_SUCCESS:
1355
    return result
1356

    
1357
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1358
    raise errors.OpExecError("Job was canceled")
1359

    
1360
  has_ok = False
1361
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1362
    if status == constants.OP_STATUS_SUCCESS:
1363
      has_ok = True
1364
    elif status == constants.OP_STATUS_ERROR:
1365
      errors.MaybeRaise(msg)
1366

    
1367
      if has_ok:
1368
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1369
                                 (idx, msg))
1370

    
1371
      raise errors.OpExecError(str(msg))
1372

    
1373
  # default failure mode
1374
  raise errors.OpExecError(result)
1375

    
1376

    
1377
class JobPollCbBase:
1378
  """Base class for L{GenericPollJob} callbacks.
1379

1380
  """
1381
  def __init__(self):
1382
    """Initializes this class.
1383

1384
    """
1385

    
1386
  def WaitForJobChangeOnce(self, job_id, fields,
1387
                           prev_job_info, prev_log_serial):
1388
    """Waits for changes on a job.
1389

1390
    """
1391
    raise NotImplementedError()
1392

    
1393
  def QueryJobs(self, job_ids, fields):
1394
    """Returns the selected fields for the selected job IDs.
1395

1396
    @type job_ids: list of numbers
1397
    @param job_ids: Job IDs
1398
    @type fields: list of strings
1399
    @param fields: Fields
1400

1401
    """
1402
    raise NotImplementedError()
1403

    
1404

    
1405
class JobPollReportCbBase:
1406
  """Base class for L{GenericPollJob} reporting callbacks.
1407

1408
  """
1409
  def __init__(self):
1410
    """Initializes this class.
1411

1412
    """
1413

    
1414
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1415
    """Handles a log message.
1416

1417
    """
1418
    raise NotImplementedError()
1419

    
1420
  def ReportNotChanged(self, job_id, status):
1421
    """Called for if a job hasn't changed in a while.
1422

1423
    @type job_id: number
1424
    @param job_id: Job ID
1425
    @type status: string or None
1426
    @param status: Job status if available
1427

1428
    """
1429
    raise NotImplementedError()
1430

    
1431

    
1432
class _LuxiJobPollCb(JobPollCbBase):
1433
  def __init__(self, cl):
1434
    """Initializes this class.
1435

1436
    """
1437
    JobPollCbBase.__init__(self)
1438
    self.cl = cl
1439

    
1440
  def WaitForJobChangeOnce(self, job_id, fields,
1441
                           prev_job_info, prev_log_serial):
1442
    """Waits for changes on a job.
1443

1444
    """
1445
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1446
                                        prev_job_info, prev_log_serial)
1447

    
1448
  def QueryJobs(self, job_ids, fields):
1449
    """Returns the selected fields for the selected job IDs.
1450

1451
    """
1452
    return self.cl.QueryJobs(job_ids, fields)
1453

    
1454

    
1455
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1456
  def __init__(self, feedback_fn):
1457
    """Initializes this class.
1458

1459
    """
1460
    JobPollReportCbBase.__init__(self)
1461

    
1462
    self.feedback_fn = feedback_fn
1463

    
1464
    assert callable(feedback_fn)
1465

    
1466
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1467
    """Handles a log message.
1468

1469
    """
1470
    self.feedback_fn((timestamp, log_type, log_msg))
1471

    
1472
  def ReportNotChanged(self, job_id, status):
1473
    """Called if a job hasn't changed in a while.
1474

1475
    """
1476
    # Ignore
1477

    
1478

    
1479
class StdioJobPollReportCb(JobPollReportCbBase):
1480
  def __init__(self):
1481
    """Initializes this class.
1482

1483
    """
1484
    JobPollReportCbBase.__init__(self)
1485

    
1486
    self.notified_queued = False
1487
    self.notified_waitlock = False
1488

    
1489
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1490
    """Handles a log message.
1491

1492
    """
1493
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1494
             FormatLogMessage(log_type, log_msg))
1495

    
1496
  def ReportNotChanged(self, job_id, status):
1497
    """Called if a job hasn't changed in a while.
1498

1499
    """
1500
    if status is None:
1501
      return
1502

    
1503
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1504
      ToStderr("Job %s is waiting in queue", job_id)
1505
      self.notified_queued = True
1506

    
1507
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1508
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1509
      self.notified_waitlock = True
1510

    
1511

    
1512
def FormatLogMessage(log_type, log_msg):
1513
  """Formats a job message according to its type.
1514

1515
  """
1516
  if log_type != constants.ELOG_MESSAGE:
1517
    log_msg = str(log_msg)
1518

    
1519
  return utils.SafeEncode(log_msg)
1520

    
1521

    
1522
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1523
  """Function to poll for the result of a job.
1524

1525
  @type job_id: job identified
1526
  @param job_id: the job to poll for results
1527
  @type cl: luxi.Client
1528
  @param cl: the luxi client to use for communicating with the master;
1529
             if None, a new client will be created
1530

1531
  """
1532
  if cl is None:
1533
    cl = GetClient()
1534

    
1535
  if reporter is None:
1536
    if feedback_fn:
1537
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1538
    else:
1539
      reporter = StdioJobPollReportCb()
1540
  elif feedback_fn:
1541
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1542

    
1543
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1544

    
1545

    
1546
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1547
  """Legacy function to submit an opcode.
1548

1549
  This is just a simple wrapper over the construction of the processor
1550
  instance. It should be extended to better handle feedback and
1551
  interaction functions.
1552

1553
  """
1554
  if cl is None:
1555
    cl = GetClient()
1556

    
1557
  SetGenericOpcodeOpts([op], opts)
1558

    
1559
  job_id = SendJob([op], cl=cl)
1560

    
1561
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1562
                       reporter=reporter)
1563

    
1564
  return op_results[0]
1565

    
1566

    
1567
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1568
  """Wrapper around SubmitOpCode or SendJob.
1569

1570
  This function will decide, based on the 'opts' parameter, whether to
1571
  submit and wait for the result of the opcode (and return it), or
1572
  whether to just send the job and print its identifier. It is used in
1573
  order to simplify the implementation of the '--submit' option.
1574

1575
  It will also process the opcodes if we're sending the via SendJob
1576
  (otherwise SubmitOpCode does it).
1577

1578
  """
1579
  if opts and opts.submit_only:
1580
    job = [op]
1581
    SetGenericOpcodeOpts(job, opts)
1582
    job_id = SendJob(job, cl=cl)
1583
    raise JobSubmittedException(job_id)
1584
  else:
1585
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1586

    
1587

    
1588
def SetGenericOpcodeOpts(opcode_list, options):
1589
  """Processor for generic options.
1590

1591
  This function updates the given opcodes based on generic command
1592
  line options (like debug, dry-run, etc.).
1593

1594
  @param opcode_list: list of opcodes
1595
  @param options: command line options or None
1596
  @return: None (in-place modification)
1597

1598
  """
1599
  if not options:
1600
    return
1601
  for op in opcode_list:
1602
    op.dry_run = options.dry_run
1603
    op.debug_level = options.debug
1604

    
1605

    
1606
def GetClient():
1607
  # TODO: Cache object?
1608
  try:
1609
    client = luxi.Client()
1610
  except luxi.NoMasterError:
1611
    ss = ssconf.SimpleStore()
1612

    
1613
    # Try to read ssconf file
1614
    try:
1615
      ss.GetMasterNode()
1616
    except errors.ConfigurationError:
1617
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1618
                                 " not part of a cluster")
1619

    
1620
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1621
    if master != myself:
1622
      raise errors.OpPrereqError("This is not the master node, please connect"
1623
                                 " to node '%s' and rerun the command" %
1624
                                 master)
1625
    raise
1626
  return client
1627

    
1628

    
1629
def FormatError(err):
1630
  """Return a formatted error message for a given error.
1631

1632
  This function takes an exception instance and returns a tuple
1633
  consisting of two values: first, the recommended exit code, and
1634
  second, a string describing the error message (not
1635
  newline-terminated).
1636

1637
  """
1638
  retcode = 1
1639
  obuf = StringIO()
1640
  msg = str(err)
1641
  if isinstance(err, errors.ConfigurationError):
1642
    txt = "Corrupt configuration file: %s" % msg
1643
    logging.error(txt)
1644
    obuf.write(txt + "\n")
1645
    obuf.write("Aborting.")
1646
    retcode = 2
1647
  elif isinstance(err, errors.HooksAbort):
1648
    obuf.write("Failure: hooks execution failed:\n")
1649
    for node, script, out in err.args[0]:
1650
      if out:
1651
        obuf.write("  node: %s, script: %s, output: %s\n" %
1652
                   (node, script, out))
1653
      else:
1654
        obuf.write("  node: %s, script: %s (no output)\n" %
1655
                   (node, script))
1656
  elif isinstance(err, errors.HooksFailure):
1657
    obuf.write("Failure: hooks general failure: %s" % msg)
1658
  elif isinstance(err, errors.ResolverError):
1659
    this_host = netutils.Hostname.GetSysName()
1660
    if err.args[0] == this_host:
1661
      msg = "Failure: can't resolve my own hostname ('%s')"
1662
    else:
1663
      msg = "Failure: can't resolve hostname '%s'"
1664
    obuf.write(msg % err.args[0])
1665
  elif isinstance(err, errors.OpPrereqError):
1666
    if len(err.args) == 2:
1667
      obuf.write("Failure: prerequisites not met for this"
1668
               " operation:\nerror type: %s, error details:\n%s" %
1669
                 (err.args[1], err.args[0]))
1670
    else:
1671
      obuf.write("Failure: prerequisites not met for this"
1672
                 " operation:\n%s" % msg)
1673
  elif isinstance(err, errors.OpExecError):
1674
    obuf.write("Failure: command execution error:\n%s" % msg)
1675
  elif isinstance(err, errors.TagError):
1676
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1677
  elif isinstance(err, errors.JobQueueDrainError):
1678
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1679
               " accept new requests\n")
1680
  elif isinstance(err, errors.JobQueueFull):
1681
    obuf.write("Failure: the job queue is full and doesn't accept new"
1682
               " job submissions until old jobs are archived\n")
1683
  elif isinstance(err, errors.TypeEnforcementError):
1684
    obuf.write("Parameter Error: %s" % msg)
1685
  elif isinstance(err, errors.ParameterError):
1686
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1687
  elif isinstance(err, luxi.NoMasterError):
1688
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1689
               " and listening for connections?")
1690
  elif isinstance(err, luxi.TimeoutError):
1691
    obuf.write("Timeout while talking to the master daemon. Error:\n"
1692
               "%s" % msg)
1693
  elif isinstance(err, luxi.PermissionError):
1694
    obuf.write("It seems you don't have permissions to connect to the"
1695
               " master daemon.\nPlease retry as a different user.")
1696
  elif isinstance(err, luxi.ProtocolError):
1697
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1698
               "%s" % msg)
1699
  elif isinstance(err, errors.JobLost):
1700
    obuf.write("Error checking job status: %s" % msg)
1701
  elif isinstance(err, errors.GenericError):
1702
    obuf.write("Unhandled Ganeti error: %s" % msg)
1703
  elif isinstance(err, JobSubmittedException):
1704
    obuf.write("JobID: %s\n" % err.args[0])
1705
    retcode = 0
1706
  else:
1707
    obuf.write("Unhandled exception: %s" % msg)
1708
  return retcode, obuf.getvalue().rstrip('\n')
1709

    
1710

    
1711
def GenericMain(commands, override=None, aliases=None):
1712
  """Generic main function for all the gnt-* commands.
1713

1714
  Arguments:
1715
    - commands: a dictionary with a special structure, see the design doc
1716
                for command line handling.
1717
    - override: if not None, we expect a dictionary with keys that will
1718
                override command line options; this can be used to pass
1719
                options from the scripts to generic functions
1720
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1721

1722
  """
1723
  # save the program name and the entire command line for later logging
1724
  if sys.argv:
1725
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1726
    if len(sys.argv) >= 2:
1727
      binary += " " + sys.argv[1]
1728
      old_cmdline = " ".join(sys.argv[2:])
1729
    else:
1730
      old_cmdline = ""
1731
  else:
1732
    binary = "<unknown program>"
1733
    old_cmdline = ""
1734

    
1735
  if aliases is None:
1736
    aliases = {}
1737

    
1738
  try:
1739
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1740
  except errors.ParameterError, err:
1741
    result, err_msg = FormatError(err)
1742
    ToStderr(err_msg)
1743
    return 1
1744

    
1745
  if func is None: # parse error
1746
    return 1
1747

    
1748
  if override is not None:
1749
    for key, val in override.iteritems():
1750
      setattr(options, key, val)
1751

    
1752
  utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1753
                     stderr_logging=True, program=binary)
1754

    
1755
  if old_cmdline:
1756
    logging.info("run with arguments '%s'", old_cmdline)
1757
  else:
1758
    logging.info("run with no arguments")
1759

    
1760
  try:
1761
    result = func(options, args)
1762
  except (errors.GenericError, luxi.ProtocolError,
1763
          JobSubmittedException), err:
1764
    result, err_msg = FormatError(err)
1765
    logging.exception("Error during command processing")
1766
    ToStderr(err_msg)
1767

    
1768
  return result
1769

    
1770

    
1771
def GenericInstanceCreate(mode, opts, args):
1772
  """Add an instance to the cluster via either creation or import.
1773

1774
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1775
  @param opts: the command line options selected by the user
1776
  @type args: list
1777
  @param args: should contain only one element, the new instance name
1778
  @rtype: int
1779
  @return: the desired exit code
1780

1781
  """
1782
  instance = args[0]
1783

    
1784
  (pnode, snode) = SplitNodeOption(opts.node)
1785

    
1786
  hypervisor = None
1787
  hvparams = {}
1788
  if opts.hypervisor:
1789
    hypervisor, hvparams = opts.hypervisor
1790

    
1791
  if opts.nics:
1792
    try:
1793
      nic_max = max(int(nidx[0]) + 1 for nidx in opts.nics)
1794
    except ValueError, err:
1795
      raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1796
    nics = [{}] * nic_max
1797
    for nidx, ndict in opts.nics:
1798
      nidx = int(nidx)
1799
      if not isinstance(ndict, dict):
1800
        msg = "Invalid nic/%d value: expected dict, got %s" % (nidx, ndict)
1801
        raise errors.OpPrereqError(msg)
1802
      nics[nidx] = ndict
1803
  elif opts.no_nics:
1804
    # no nics
1805
    nics = []
1806
  elif mode == constants.INSTANCE_CREATE:
1807
    # default of one nic, all auto
1808
    nics = [{}]
1809
  else:
1810
    # mode == import
1811
    nics = []
1812

    
1813
  if opts.disk_template == constants.DT_DISKLESS:
1814
    if opts.disks or opts.sd_size is not None:
1815
      raise errors.OpPrereqError("Diskless instance but disk"
1816
                                 " information passed")
1817
    disks = []
1818
  else:
1819
    if (not opts.disks and not opts.sd_size
1820
        and mode == constants.INSTANCE_CREATE):
1821
      raise errors.OpPrereqError("No disk information specified")
1822
    if opts.disks and opts.sd_size is not None:
1823
      raise errors.OpPrereqError("Please use either the '--disk' or"
1824
                                 " '-s' option")
1825
    if opts.sd_size is not None:
1826
      opts.disks = [(0, {"size": opts.sd_size})]
1827

    
1828
    if opts.disks:
1829
      try:
1830
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
1831
      except ValueError, err:
1832
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
1833
      disks = [{}] * disk_max
1834
    else:
1835
      disks = []
1836
    for didx, ddict in opts.disks:
1837
      didx = int(didx)
1838
      if not isinstance(ddict, dict):
1839
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
1840
        raise errors.OpPrereqError(msg)
1841
      elif "size" in ddict:
1842
        if "adopt" in ddict:
1843
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
1844
                                     " (disk %d)" % didx)
1845
        try:
1846
          ddict["size"] = utils.ParseUnit(ddict["size"])
1847
        except ValueError, err:
1848
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
1849
                                     (didx, err))
1850
      elif "adopt" in ddict:
1851
        if mode == constants.INSTANCE_IMPORT:
1852
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
1853
                                     " import")
1854
        ddict["size"] = 0
1855
      else:
1856
        raise errors.OpPrereqError("Missing size or adoption source for"
1857
                                   " disk %d" % didx)
1858
      disks[didx] = ddict
1859

    
1860
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
1861
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
1862

    
1863
  if mode == constants.INSTANCE_CREATE:
1864
    start = opts.start
1865
    os_type = opts.os
1866
    force_variant = opts.force_variant
1867
    src_node = None
1868
    src_path = None
1869
    no_install = opts.no_install
1870
    identify_defaults = False
1871
  elif mode == constants.INSTANCE_IMPORT:
1872
    start = False
1873
    os_type = None
1874
    force_variant = False
1875
    src_node = opts.src_node
1876
    src_path = opts.src_dir
1877
    no_install = None
1878
    identify_defaults = opts.identify_defaults
1879
  else:
1880
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
1881

    
1882
  op = opcodes.OpCreateInstance(instance_name=instance,
1883
                                disks=disks,
1884
                                disk_template=opts.disk_template,
1885
                                nics=nics,
1886
                                pnode=pnode, snode=snode,
1887
                                ip_check=opts.ip_check,
1888
                                name_check=opts.name_check,
1889
                                wait_for_sync=opts.wait_for_sync,
1890
                                file_storage_dir=opts.file_storage_dir,
1891
                                file_driver=opts.file_driver,
1892
                                iallocator=opts.iallocator,
1893
                                hypervisor=hypervisor,
1894
                                hvparams=hvparams,
1895
                                beparams=opts.beparams,
1896
                                osparams=opts.osparams,
1897
                                mode=mode,
1898
                                start=start,
1899
                                os_type=os_type,
1900
                                force_variant=force_variant,
1901
                                src_node=src_node,
1902
                                src_path=src_path,
1903
                                no_install=no_install,
1904
                                identify_defaults=identify_defaults)
1905

    
1906
  SubmitOrSend(op, opts)
1907
  return 0
1908

    
1909

    
1910
class _RunWhileClusterStoppedHelper:
1911
  """Helper class for L{RunWhileClusterStopped} to simplify state management
1912

1913
  """
1914
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
1915
    """Initializes this class.
1916

1917
    @type feedback_fn: callable
1918
    @param feedback_fn: Feedback function
1919
    @type cluster_name: string
1920
    @param cluster_name: Cluster name
1921
    @type master_node: string
1922
    @param master_node Master node name
1923
    @type online_nodes: list
1924
    @param online_nodes: List of names of online nodes
1925

1926
    """
1927
    self.feedback_fn = feedback_fn
1928
    self.cluster_name = cluster_name
1929
    self.master_node = master_node
1930
    self.online_nodes = online_nodes
1931

    
1932
    self.ssh = ssh.SshRunner(self.cluster_name)
1933

    
1934
    self.nonmaster_nodes = [name for name in online_nodes
1935
                            if name != master_node]
1936

    
1937
    assert self.master_node not in self.nonmaster_nodes
1938

    
1939
  def _RunCmd(self, node_name, cmd):
1940
    """Runs a command on the local or a remote machine.
1941

1942
    @type node_name: string
1943
    @param node_name: Machine name
1944
    @type cmd: list
1945
    @param cmd: Command
1946

1947
    """
1948
    if node_name is None or node_name == self.master_node:
1949
      # No need to use SSH
1950
      result = utils.RunCmd(cmd)
1951
    else:
1952
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
1953

    
1954
    if result.failed:
1955
      errmsg = ["Failed to run command %s" % result.cmd]
1956
      if node_name:
1957
        errmsg.append("on node %s" % node_name)
1958
      errmsg.append(": exitcode %s and error %s" %
1959
                    (result.exit_code, result.output))
1960
      raise errors.OpExecError(" ".join(errmsg))
1961

    
1962
  def Call(self, fn, *args):
1963
    """Call function while all daemons are stopped.
1964

1965
    @type fn: callable
1966
    @param fn: Function to be called
1967

1968
    """
1969
    # Pause watcher by acquiring an exclusive lock on watcher state file
1970
    self.feedback_fn("Blocking watcher")
1971
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
1972
    try:
1973
      # TODO: Currently, this just blocks. There's no timeout.
1974
      # TODO: Should it be a shared lock?
1975
      watcher_block.Exclusive(blocking=True)
1976

    
1977
      # Stop master daemons, so that no new jobs can come in and all running
1978
      # ones are finished
1979
      self.feedback_fn("Stopping master daemons")
1980
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
1981
      try:
1982
        # Stop daemons on all nodes
1983
        for node_name in self.online_nodes:
1984
          self.feedback_fn("Stopping daemons on %s" % node_name)
1985
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
1986

    
1987
        # All daemons are shut down now
1988
        try:
1989
          return fn(self, *args)
1990
        except Exception, err:
1991
          _, errmsg = FormatError(err)
1992
          logging.exception("Caught exception")
1993
          self.feedback_fn(errmsg)
1994
          raise
1995
      finally:
1996
        # Start cluster again, master node last
1997
        for node_name in self.nonmaster_nodes + [self.master_node]:
1998
          self.feedback_fn("Starting daemons on %s" % node_name)
1999
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2000
    finally:
2001
      # Resume watcher
2002
      watcher_block.Close()
2003

    
2004

    
2005
def RunWhileClusterStopped(feedback_fn, fn, *args):
2006
  """Calls a function while all cluster daemons are stopped.
2007

2008
  @type feedback_fn: callable
2009
  @param feedback_fn: Feedback function
2010
  @type fn: callable
2011
  @param fn: Function to be called when daemons are stopped
2012

2013
  """
2014
  feedback_fn("Gathering cluster information")
2015

    
2016
  # This ensures we're running on the master daemon
2017
  cl = GetClient()
2018

    
2019
  (cluster_name, master_node) = \
2020
    cl.QueryConfigValues(["cluster_name", "master_node"])
2021

    
2022
  online_nodes = GetOnlineNodes([], cl=cl)
2023

    
2024
  # Don't keep a reference to the client. The master daemon will go away.
2025
  del cl
2026

    
2027
  assert master_node in online_nodes
2028

    
2029
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2030
                                       online_nodes).Call(fn, *args)
2031

    
2032

    
2033
def GenerateTable(headers, fields, separator, data,
2034
                  numfields=None, unitfields=None,
2035
                  units=None):
2036
  """Prints a table with headers and different fields.
2037

2038
  @type headers: dict
2039
  @param headers: dictionary mapping field names to headers for
2040
      the table
2041
  @type fields: list
2042
  @param fields: the field names corresponding to each row in
2043
      the data field
2044
  @param separator: the separator to be used; if this is None,
2045
      the default 'smart' algorithm is used which computes optimal
2046
      field width, otherwise just the separator is used between
2047
      each field
2048
  @type data: list
2049
  @param data: a list of lists, each sublist being one row to be output
2050
  @type numfields: list
2051
  @param numfields: a list with the fields that hold numeric
2052
      values and thus should be right-aligned
2053
  @type unitfields: list
2054
  @param unitfields: a list with the fields that hold numeric
2055
      values that should be formatted with the units field
2056
  @type units: string or None
2057
  @param units: the units we should use for formatting, or None for
2058
      automatic choice (human-readable for non-separator usage, otherwise
2059
      megabytes); this is a one-letter string
2060

2061
  """
2062
  if units is None:
2063
    if separator:
2064
      units = "m"
2065
    else:
2066
      units = "h"
2067

    
2068
  if numfields is None:
2069
    numfields = []
2070
  if unitfields is None:
2071
    unitfields = []
2072

    
2073
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2074
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2075

    
2076
  format_fields = []
2077
  for field in fields:
2078
    if headers and field not in headers:
2079
      # TODO: handle better unknown fields (either revert to old
2080
      # style of raising exception, or deal more intelligently with
2081
      # variable fields)
2082
      headers[field] = field
2083
    if separator is not None:
2084
      format_fields.append("%s")
2085
    elif numfields.Matches(field):
2086
      format_fields.append("%*s")
2087
    else:
2088
      format_fields.append("%-*s")
2089

    
2090
  if separator is None:
2091
    mlens = [0 for name in fields]
2092
    format_str = ' '.join(format_fields)
2093
  else:
2094
    format_str = separator.replace("%", "%%").join(format_fields)
2095

    
2096
  for row in data:
2097
    if row is None:
2098
      continue
2099
    for idx, val in enumerate(row):
2100
      if unitfields.Matches(fields[idx]):
2101
        try:
2102
          val = int(val)
2103
        except (TypeError, ValueError):
2104
          pass
2105
        else:
2106
          val = row[idx] = utils.FormatUnit(val, units)
2107
      val = row[idx] = str(val)
2108
      if separator is None:
2109
        mlens[idx] = max(mlens[idx], len(val))
2110

    
2111
  result = []
2112
  if headers:
2113
    args = []
2114
    for idx, name in enumerate(fields):
2115
      hdr = headers[name]
2116
      if separator is None:
2117
        mlens[idx] = max(mlens[idx], len(hdr))
2118
        args.append(mlens[idx])
2119
      args.append(hdr)
2120
    result.append(format_str % tuple(args))
2121

    
2122
  if separator is None:
2123
    assert len(mlens) == len(fields)
2124

    
2125
    if fields and not numfields.Matches(fields[-1]):
2126
      mlens[-1] = 0
2127

    
2128
  for line in data:
2129
    args = []
2130
    if line is None:
2131
      line = ['-' for _ in fields]
2132
    for idx in range(len(fields)):
2133
      if separator is None:
2134
        args.append(mlens[idx])
2135
      args.append(line[idx])
2136
    result.append(format_str % tuple(args))
2137

    
2138
  return result
2139

    
2140

    
2141
def FormatTimestamp(ts):
2142
  """Formats a given timestamp.
2143

2144
  @type ts: timestamp
2145
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2146

2147
  @rtype: string
2148
  @return: a string with the formatted timestamp
2149

2150
  """
2151
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2152
    return '?'
2153
  sec, usec = ts
2154
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2155

    
2156

    
2157
def ParseTimespec(value):
2158
  """Parse a time specification.
2159

2160
  The following suffixed will be recognized:
2161

2162
    - s: seconds
2163
    - m: minutes
2164
    - h: hours
2165
    - d: day
2166
    - w: weeks
2167

2168
  Without any suffix, the value will be taken to be in seconds.
2169

2170
  """
2171
  value = str(value)
2172
  if not value:
2173
    raise errors.OpPrereqError("Empty time specification passed")
2174
  suffix_map = {
2175
    's': 1,
2176
    'm': 60,
2177
    'h': 3600,
2178
    'd': 86400,
2179
    'w': 604800,
2180
    }
2181
  if value[-1] not in suffix_map:
2182
    try:
2183
      value = int(value)
2184
    except (TypeError, ValueError):
2185
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2186
  else:
2187
    multiplier = suffix_map[value[-1]]
2188
    value = value[:-1]
2189
    if not value: # no data left after stripping the suffix
2190
      raise errors.OpPrereqError("Invalid time specification (only"
2191
                                 " suffix passed)")
2192
    try:
2193
      value = int(value) * multiplier
2194
    except (TypeError, ValueError):
2195
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2196
  return value
2197

    
2198

    
2199
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2200
                   filter_master=False):
2201
  """Returns the names of online nodes.
2202

2203
  This function will also log a warning on stderr with the names of
2204
  the online nodes.
2205

2206
  @param nodes: if not empty, use only this subset of nodes (minus the
2207
      offline ones)
2208
  @param cl: if not None, luxi client to use
2209
  @type nowarn: boolean
2210
  @param nowarn: by default, this function will output a note with the
2211
      offline nodes that are skipped; if this parameter is True the
2212
      note is not displayed
2213
  @type secondary_ips: boolean
2214
  @param secondary_ips: if True, return the secondary IPs instead of the
2215
      names, useful for doing network traffic over the replication interface
2216
      (if any)
2217
  @type filter_master: boolean
2218
  @param filter_master: if True, do not return the master node in the list
2219
      (useful in coordination with secondary_ips where we cannot check our
2220
      node name against the list)
2221

2222
  """
2223
  if cl is None:
2224
    cl = GetClient()
2225

    
2226
  if secondary_ips:
2227
    name_idx = 2
2228
  else:
2229
    name_idx = 0
2230

    
2231
  if filter_master:
2232
    master_node = cl.QueryConfigValues(["master_node"])[0]
2233
    filter_fn = lambda x: x != master_node
2234
  else:
2235
    filter_fn = lambda _: True
2236

    
2237
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2238
                         use_locking=False)
2239
  offline = [row[0] for row in result if row[1]]
2240
  if offline and not nowarn:
2241
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2242
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2243

    
2244

    
2245
def _ToStream(stream, txt, *args):
2246
  """Write a message to a stream, bypassing the logging system
2247

2248
  @type stream: file object
2249
  @param stream: the file to which we should write
2250
  @type txt: str
2251
  @param txt: the message
2252

2253
  """
2254
  if args:
2255
    args = tuple(args)
2256
    stream.write(txt % args)
2257
  else:
2258
    stream.write(txt)
2259
  stream.write('\n')
2260
  stream.flush()
2261

    
2262

    
2263
def ToStdout(txt, *args):
2264
  """Write a message to stdout only, bypassing the logging system
2265

2266
  This is just a wrapper over _ToStream.
2267

2268
  @type txt: str
2269
  @param txt: the message
2270

2271
  """
2272
  _ToStream(sys.stdout, txt, *args)
2273

    
2274

    
2275
def ToStderr(txt, *args):
2276
  """Write a message to stderr only, bypassing the logging system
2277

2278
  This is just a wrapper over _ToStream.
2279

2280
  @type txt: str
2281
  @param txt: the message
2282

2283
  """
2284
  _ToStream(sys.stderr, txt, *args)
2285

    
2286

    
2287
class JobExecutor(object):
2288
  """Class which manages the submission and execution of multiple jobs.
2289

2290
  Note that instances of this class should not be reused between
2291
  GetResults() calls.
2292

2293
  """
2294
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2295
    self.queue = []
2296
    if cl is None:
2297
      cl = GetClient()
2298
    self.cl = cl
2299
    self.verbose = verbose
2300
    self.jobs = []
2301
    self.opts = opts
2302
    self.feedback_fn = feedback_fn
2303

    
2304
  def QueueJob(self, name, *ops):
2305
    """Record a job for later submit.
2306

2307
    @type name: string
2308
    @param name: a description of the job, will be used in WaitJobSet
2309
    """
2310
    SetGenericOpcodeOpts(ops, self.opts)
2311
    self.queue.append((name, ops))
2312

    
2313
  def SubmitPending(self, each=False):
2314
    """Submit all pending jobs.
2315

2316
    """
2317
    if each:
2318
      results = []
2319
      for row in self.queue:
2320
        # SubmitJob will remove the success status, but raise an exception if
2321
        # the submission fails, so we'll notice that anyway.
2322
        results.append([True, self.cl.SubmitJob(row[1])])
2323
    else:
2324
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2325
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2326
                                                            self.queue)):
2327
      self.jobs.append((idx, status, data, name))
2328

    
2329
  def _ChooseJob(self):
2330
    """Choose a non-waiting/queued job to poll next.
2331

2332
    """
2333
    assert self.jobs, "_ChooseJob called with empty job list"
2334

    
2335
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2336
    assert result
2337

    
2338
    for job_data, status in zip(self.jobs, result):
2339
      if (isinstance(status, list) and status and
2340
          status[0] in (constants.JOB_STATUS_QUEUED,
2341
                        constants.JOB_STATUS_WAITLOCK,
2342
                        constants.JOB_STATUS_CANCELING)):
2343
        # job is still present and waiting
2344
        continue
2345
      # good candidate found (either running job or lost job)
2346
      self.jobs.remove(job_data)
2347
      return job_data
2348

    
2349
    # no job found
2350
    return self.jobs.pop(0)
2351

    
2352
  def GetResults(self):
2353
    """Wait for and return the results of all jobs.
2354

2355
    @rtype: list
2356
    @return: list of tuples (success, job results), in the same order
2357
        as the submitted jobs; if a job has failed, instead of the result
2358
        there will be the error message
2359

2360
    """
2361
    if not self.jobs:
2362
      self.SubmitPending()
2363
    results = []
2364
    if self.verbose:
2365
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2366
      if ok_jobs:
2367
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2368

    
2369
    # first, remove any non-submitted jobs
2370
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2371
    for idx, _, jid, name in failures:
2372
      ToStderr("Failed to submit job for %s: %s", name, jid)
2373
      results.append((idx, False, jid))
2374

    
2375
    while self.jobs:
2376
      (idx, _, jid, name) = self._ChooseJob()
2377
      ToStdout("Waiting for job %s for %s...", jid, name)
2378
      try:
2379
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2380
        success = True
2381
      except errors.JobLost, err:
2382
        _, job_result = FormatError(err)
2383
        ToStderr("Job %s for %s has been archived, cannot check its result",
2384
                 jid, name)
2385
        success = False
2386
      except (errors.GenericError, luxi.ProtocolError), err:
2387
        _, job_result = FormatError(err)
2388
        success = False
2389
        # the error message will always be shown, verbose or not
2390
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2391

    
2392
      results.append((idx, success, job_result))
2393

    
2394
    # sort based on the index, then drop it
2395
    results.sort()
2396
    results = [i[1:] for i in results]
2397

    
2398
    return results
2399

    
2400
  def WaitOrShow(self, wait):
2401
    """Wait for job results or only print the job IDs.
2402

2403
    @type wait: boolean
2404
    @param wait: whether to wait or not
2405

2406
    """
2407
    if wait:
2408
      return self.GetResults()
2409
    else:
2410
      if not self.jobs:
2411
        self.SubmitPending()
2412
      for _, status, result, name in self.jobs:
2413
        if status:
2414
          ToStdout("%s: %s", result, name)
2415
        else:
2416
          ToStderr("Failure for %s: %s", name, result)
2417
      return [row[1:3] for row in self.jobs]