Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 887c7aa6

History | View | Annotate | Download (79.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42

    
43
from optparse import (OptionParser, TitledHelpFormatter,
44
                      Option, OptionValueError)
45

    
46

    
47
__all__ = [
48
  # Command line options
49
  "ADD_UIDS_OPT",
50
  "ALLOCATABLE_OPT",
51
  "ALL_OPT",
52
  "AUTO_PROMOTE_OPT",
53
  "AUTO_REPLACE_OPT",
54
  "BACKEND_OPT",
55
  "CLEANUP_OPT",
56
  "CLUSTER_DOMAIN_SECRET_OPT",
57
  "CONFIRM_OPT",
58
  "CP_SIZE_OPT",
59
  "DEBUG_OPT",
60
  "DEBUG_SIMERR_OPT",
61
  "DISKIDX_OPT",
62
  "DISK_OPT",
63
  "DISK_TEMPLATE_OPT",
64
  "DRAINED_OPT",
65
  "DRY_RUN_OPT",
66
  "DRBD_HELPER_OPT",
67
  "EARLY_RELEASE_OPT",
68
  "ENABLED_HV_OPT",
69
  "ERROR_CODES_OPT",
70
  "FIELDS_OPT",
71
  "FILESTORE_DIR_OPT",
72
  "FILESTORE_DRIVER_OPT",
73
  "FORCE_OPT",
74
  "FORCE_VARIANT_OPT",
75
  "GLOBAL_FILEDIR_OPT",
76
  "HVLIST_OPT",
77
  "HVOPTS_OPT",
78
  "HYPERVISOR_OPT",
79
  "IALLOCATOR_OPT",
80
  "DEFAULT_IALLOCATOR_OPT",
81
  "IDENTIFY_DEFAULTS_OPT",
82
  "IGNORE_CONSIST_OPT",
83
  "IGNORE_FAILURES_OPT",
84
  "IGNORE_REMOVE_FAILURES_OPT",
85
  "IGNORE_SECONDARIES_OPT",
86
  "IGNORE_SIZE_OPT",
87
  "INTERVAL_OPT",
88
  "MAC_PREFIX_OPT",
89
  "MAINTAIN_NODE_HEALTH_OPT",
90
  "MASTER_NETDEV_OPT",
91
  "MC_OPT",
92
  "MIGRATION_MODE_OPT",
93
  "NET_OPT",
94
  "NEW_CLUSTER_CERT_OPT",
95
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
96
  "NEW_CONFD_HMAC_KEY_OPT",
97
  "NEW_RAPI_CERT_OPT",
98
  "NEW_SECONDARY_OPT",
99
  "NIC_PARAMS_OPT",
100
  "NODE_LIST_OPT",
101
  "NODE_PLACEMENT_OPT",
102
  "NODRBD_STORAGE_OPT",
103
  "NOHDR_OPT",
104
  "NOIPCHECK_OPT",
105
  "NO_INSTALL_OPT",
106
  "NONAMECHECK_OPT",
107
  "NOLVM_STORAGE_OPT",
108
  "NOMODIFY_ETCHOSTS_OPT",
109
  "NOMODIFY_SSH_SETUP_OPT",
110
  "NONICS_OPT",
111
  "NONLIVE_OPT",
112
  "NONPLUS1_OPT",
113
  "NOSHUTDOWN_OPT",
114
  "NOSTART_OPT",
115
  "NOSSH_KEYCHECK_OPT",
116
  "NOVOTING_OPT",
117
  "NWSYNC_OPT",
118
  "ON_PRIMARY_OPT",
119
  "ON_SECONDARY_OPT",
120
  "OFFLINE_OPT",
121
  "OSPARAMS_OPT",
122
  "OS_OPT",
123
  "OS_SIZE_OPT",
124
  "PRIMARY_IP_VERSION_OPT",
125
  "RAPI_CERT_OPT",
126
  "READD_OPT",
127
  "REBOOT_TYPE_OPT",
128
  "REMOVE_INSTANCE_OPT",
129
  "REMOVE_UIDS_OPT",
130
  "RESERVED_LVS_OPT",
131
  "ROMAN_OPT",
132
  "SECONDARY_IP_OPT",
133
  "SELECT_OS_OPT",
134
  "SEP_OPT",
135
  "SHOWCMD_OPT",
136
  "SHUTDOWN_TIMEOUT_OPT",
137
  "SINGLE_NODE_OPT",
138
  "SRC_DIR_OPT",
139
  "SRC_NODE_OPT",
140
  "SUBMIT_OPT",
141
  "STATIC_OPT",
142
  "SYNC_OPT",
143
  "TAG_SRC_OPT",
144
  "TIMEOUT_OPT",
145
  "UIDPOOL_OPT",
146
  "USEUNITS_OPT",
147
  "USE_REPL_NET_OPT",
148
  "VERBOSE_OPT",
149
  "VG_NAME_OPT",
150
  "YES_DOIT_OPT",
151
  # Generic functions for CLI programs
152
  "GenericMain",
153
  "GenericInstanceCreate",
154
  "GetClient",
155
  "GetOnlineNodes",
156
  "JobExecutor",
157
  "JobSubmittedException",
158
  "ParseTimespec",
159
  "RunWhileClusterStopped",
160
  "SubmitOpCode",
161
  "SubmitOrSend",
162
  "UsesRPC",
163
  # Formatting functions
164
  "ToStderr", "ToStdout",
165
  "FormatError",
166
  "GenerateTable",
167
  "AskUser",
168
  "FormatTimestamp",
169
  "FormatLogMessage",
170
  # Tags functions
171
  "ListTags",
172
  "AddTags",
173
  "RemoveTags",
174
  # command line options support infrastructure
175
  "ARGS_MANY_INSTANCES",
176
  "ARGS_MANY_NODES",
177
  "ARGS_NONE",
178
  "ARGS_ONE_INSTANCE",
179
  "ARGS_ONE_NODE",
180
  "ARGS_ONE_OS",
181
  "ArgChoice",
182
  "ArgCommand",
183
  "ArgFile",
184
  "ArgHost",
185
  "ArgInstance",
186
  "ArgJobId",
187
  "ArgNode",
188
  "ArgOs",
189
  "ArgSuggest",
190
  "ArgUnknown",
191
  "OPT_COMPL_INST_ADD_NODES",
192
  "OPT_COMPL_MANY_NODES",
193
  "OPT_COMPL_ONE_IALLOCATOR",
194
  "OPT_COMPL_ONE_INSTANCE",
195
  "OPT_COMPL_ONE_NODE",
196
  "OPT_COMPL_ONE_OS",
197
  "cli_option",
198
  "SplitNodeOption",
199
  "CalculateOSNames",
200
  "ParseFields",
201
  ]
202

    
203
NO_PREFIX = "no_"
204
UN_PREFIX = "-"
205

    
206

    
207
class _Argument:
208
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
209
    self.min = min
210
    self.max = max
211

    
212
  def __repr__(self):
213
    return ("<%s min=%s max=%s>" %
214
            (self.__class__.__name__, self.min, self.max))
215

    
216

    
217
class ArgSuggest(_Argument):
218
  """Suggesting argument.
219

220
  Value can be any of the ones passed to the constructor.
221

222
  """
223
  # pylint: disable-msg=W0622
224
  def __init__(self, min=0, max=None, choices=None):
225
    _Argument.__init__(self, min=min, max=max)
226
    self.choices = choices
227

    
228
  def __repr__(self):
229
    return ("<%s min=%s max=%s choices=%r>" %
230
            (self.__class__.__name__, self.min, self.max, self.choices))
231

    
232

    
233
class ArgChoice(ArgSuggest):
234
  """Choice argument.
235

236
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
237
  but value must be one of the choices.
238

239
  """
240

    
241

    
242
class ArgUnknown(_Argument):
243
  """Unknown argument to program (e.g. determined at runtime).
244

245
  """
246

    
247

    
248
class ArgInstance(_Argument):
249
  """Instances argument.
250

251
  """
252

    
253

    
254
class ArgNode(_Argument):
255
  """Node argument.
256

257
  """
258

    
259
class ArgJobId(_Argument):
260
  """Job ID argument.
261

262
  """
263

    
264

    
265
class ArgFile(_Argument):
266
  """File path argument.
267

268
  """
269

    
270

    
271
class ArgCommand(_Argument):
272
  """Command argument.
273

274
  """
275

    
276

    
277
class ArgHost(_Argument):
278
  """Host argument.
279

280
  """
281

    
282

    
283
class ArgOs(_Argument):
284
  """OS argument.
285

286
  """
287

    
288

    
289
ARGS_NONE = []
290
ARGS_MANY_INSTANCES = [ArgInstance()]
291
ARGS_MANY_NODES = [ArgNode()]
292
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
293
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
294
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
295

    
296

    
297
def _ExtractTagsObject(opts, args):
298
  """Extract the tag type object.
299

300
  Note that this function will modify its args parameter.
301

302
  """
303
  if not hasattr(opts, "tag_type"):
304
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
305
  kind = opts.tag_type
306
  if kind == constants.TAG_CLUSTER:
307
    retval = kind, kind
308
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
309
    if not args:
310
      raise errors.OpPrereqError("no arguments passed to the command")
311
    name = args.pop(0)
312
    retval = kind, name
313
  else:
314
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
315
  return retval
316

    
317

    
318
def _ExtendTags(opts, args):
319
  """Extend the args if a source file has been given.
320

321
  This function will extend the tags with the contents of the file
322
  passed in the 'tags_source' attribute of the opts parameter. A file
323
  named '-' will be replaced by stdin.
324

325
  """
326
  fname = opts.tags_source
327
  if fname is None:
328
    return
329
  if fname == "-":
330
    new_fh = sys.stdin
331
  else:
332
    new_fh = open(fname, "r")
333
  new_data = []
334
  try:
335
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
336
    # because of python bug 1633941
337
    while True:
338
      line = new_fh.readline()
339
      if not line:
340
        break
341
      new_data.append(line.strip())
342
  finally:
343
    new_fh.close()
344
  args.extend(new_data)
345

    
346

    
347
def ListTags(opts, args):
348
  """List the tags on a given object.
349

350
  This is a generic implementation that knows how to deal with all
351
  three cases of tag objects (cluster, node, instance). The opts
352
  argument is expected to contain a tag_type field denoting what
353
  object type we work on.
354

355
  """
356
  kind, name = _ExtractTagsObject(opts, args)
357
  cl = GetClient()
358
  result = cl.QueryTags(kind, name)
359
  result = list(result)
360
  result.sort()
361
  for tag in result:
362
    ToStdout(tag)
363

    
364

    
365
def AddTags(opts, args):
366
  """Add tags on a given object.
367

368
  This is a generic implementation that knows how to deal with all
369
  three cases of tag objects (cluster, node, instance). The opts
370
  argument is expected to contain a tag_type field denoting what
371
  object type we work on.
372

373
  """
374
  kind, name = _ExtractTagsObject(opts, args)
375
  _ExtendTags(opts, args)
376
  if not args:
377
    raise errors.OpPrereqError("No tags to be added")
378
  op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
379
  SubmitOpCode(op)
380

    
381

    
382
def RemoveTags(opts, args):
383
  """Remove tags from a given object.
384

385
  This is a generic implementation that knows how to deal with all
386
  three cases of tag objects (cluster, node, instance). The opts
387
  argument is expected to contain a tag_type field denoting what
388
  object type we work on.
389

390
  """
391
  kind, name = _ExtractTagsObject(opts, args)
392
  _ExtendTags(opts, args)
393
  if not args:
394
    raise errors.OpPrereqError("No tags to be removed")
395
  op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
396
  SubmitOpCode(op)
397

    
398

    
399
def check_unit(option, opt, value): # pylint: disable-msg=W0613
400
  """OptParsers custom converter for units.
401

402
  """
403
  try:
404
    return utils.ParseUnit(value)
405
  except errors.UnitParseError, err:
406
    raise OptionValueError("option %s: %s" % (opt, err))
407

    
408

    
409
def _SplitKeyVal(opt, data):
410
  """Convert a KeyVal string into a dict.
411

412
  This function will convert a key=val[,...] string into a dict. Empty
413
  values will be converted specially: keys which have the prefix 'no_'
414
  will have the value=False and the prefix stripped, the others will
415
  have value=True.
416

417
  @type opt: string
418
  @param opt: a string holding the option name for which we process the
419
      data, used in building error messages
420
  @type data: string
421
  @param data: a string of the format key=val,key=val,...
422
  @rtype: dict
423
  @return: {key=val, key=val}
424
  @raises errors.ParameterError: if there are duplicate keys
425

426
  """
427
  kv_dict = {}
428
  if data:
429
    for elem in utils.UnescapeAndSplit(data, sep=","):
430
      if "=" in elem:
431
        key, val = elem.split("=", 1)
432
      else:
433
        if elem.startswith(NO_PREFIX):
434
          key, val = elem[len(NO_PREFIX):], False
435
        elif elem.startswith(UN_PREFIX):
436
          key, val = elem[len(UN_PREFIX):], None
437
        else:
438
          key, val = elem, True
439
      if key in kv_dict:
440
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
441
                                    (key, opt))
442
      kv_dict[key] = val
443
  return kv_dict
444

    
445

    
446
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
447
  """Custom parser for ident:key=val,key=val options.
448

449
  This will store the parsed values as a tuple (ident, {key: val}). As such,
450
  multiple uses of this option via action=append is possible.
451

452
  """
453
  if ":" not in value:
454
    ident, rest = value, ''
455
  else:
456
    ident, rest = value.split(":", 1)
457

    
458
  if ident.startswith(NO_PREFIX):
459
    if rest:
460
      msg = "Cannot pass options when removing parameter groups: %s" % value
461
      raise errors.ParameterError(msg)
462
    retval = (ident[len(NO_PREFIX):], False)
463
  elif ident.startswith(UN_PREFIX):
464
    if rest:
465
      msg = "Cannot pass options when removing parameter groups: %s" % value
466
      raise errors.ParameterError(msg)
467
    retval = (ident[len(UN_PREFIX):], None)
468
  else:
469
    kv_dict = _SplitKeyVal(opt, rest)
470
    retval = (ident, kv_dict)
471
  return retval
472

    
473

    
474
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
475
  """Custom parser class for key=val,key=val options.
476

477
  This will store the parsed values as a dict {key: val}.
478

479
  """
480
  return _SplitKeyVal(opt, value)
481

    
482

    
483
def check_bool(option, opt, value): # pylint: disable-msg=W0613
484
  """Custom parser for yes/no options.
485

486
  This will store the parsed value as either True or False.
487

488
  """
489
  value = value.lower()
490
  if value == constants.VALUE_FALSE or value == "no":
491
    return False
492
  elif value == constants.VALUE_TRUE or value == "yes":
493
    return True
494
  else:
495
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
496

    
497

    
498
# completion_suggestion is normally a list. Using numeric values not evaluating
499
# to False for dynamic completion.
500
(OPT_COMPL_MANY_NODES,
501
 OPT_COMPL_ONE_NODE,
502
 OPT_COMPL_ONE_INSTANCE,
503
 OPT_COMPL_ONE_OS,
504
 OPT_COMPL_ONE_IALLOCATOR,
505
 OPT_COMPL_INST_ADD_NODES) = range(100, 106)
506

    
507
OPT_COMPL_ALL = frozenset([
508
  OPT_COMPL_MANY_NODES,
509
  OPT_COMPL_ONE_NODE,
510
  OPT_COMPL_ONE_INSTANCE,
511
  OPT_COMPL_ONE_OS,
512
  OPT_COMPL_ONE_IALLOCATOR,
513
  OPT_COMPL_INST_ADD_NODES,
514
  ])
515

    
516

    
517
class CliOption(Option):
518
  """Custom option class for optparse.
519

520
  """
521
  ATTRS = Option.ATTRS + [
522
    "completion_suggest",
523
    ]
524
  TYPES = Option.TYPES + (
525
    "identkeyval",
526
    "keyval",
527
    "unit",
528
    "bool",
529
    )
530
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
531
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
532
  TYPE_CHECKER["keyval"] = check_key_val
533
  TYPE_CHECKER["unit"] = check_unit
534
  TYPE_CHECKER["bool"] = check_bool
535

    
536

    
537
# optparse.py sets make_option, so we do it for our own option class, too
538
cli_option = CliOption
539

    
540

    
541
_YORNO = "yes|no"
542

    
543
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
544
                       help="Increase debugging level")
545

    
546
NOHDR_OPT = cli_option("--no-headers", default=False,
547
                       action="store_true", dest="no_headers",
548
                       help="Don't display column headers")
549

    
550
SEP_OPT = cli_option("--separator", default=None,
551
                     action="store", dest="separator",
552
                     help=("Separator between output fields"
553
                           " (defaults to one space)"))
554

    
555
USEUNITS_OPT = cli_option("--units", default=None,
556
                          dest="units", choices=('h', 'm', 'g', 't'),
557
                          help="Specify units for output (one of hmgt)")
558

    
559
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
560
                        type="string", metavar="FIELDS",
561
                        help="Comma separated list of output fields")
562

    
563
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
564
                       default=False, help="Force the operation")
565

    
566
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
567
                         default=False, help="Do not require confirmation")
568

    
569
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
570
                         default=None, help="File with tag names")
571

    
572
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
573
                        default=False, action="store_true",
574
                        help=("Submit the job and return the job ID, but"
575
                              " don't wait for the job to finish"))
576

    
577
SYNC_OPT = cli_option("--sync", dest="do_locking",
578
                      default=False, action="store_true",
579
                      help=("Grab locks while doing the queries"
580
                            " in order to ensure more consistent results"))
581

    
582
DRY_RUN_OPT = cli_option("--dry-run", default=False,
583
                         action="store_true",
584
                         help=("Do not execute the operation, just run the"
585
                               " check steps and verify it it could be"
586
                               " executed"))
587

    
588
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
589
                         action="store_true",
590
                         help="Increase the verbosity of the operation")
591

    
592
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
593
                              action="store_true", dest="simulate_errors",
594
                              help="Debugging option that makes the operation"
595
                              " treat most runtime checks as failed")
596

    
597
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
598
                        default=True, action="store_false",
599
                        help="Don't wait for sync (DANGEROUS!)")
600

    
601
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
602
                               help="Custom disk setup (diskless, file,"
603
                               " plain or drbd)",
604
                               default=None, metavar="TEMPL",
605
                               choices=list(constants.DISK_TEMPLATES))
606

    
607
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
608
                        help="Do not create any network cards for"
609
                        " the instance")
610

    
611
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
612
                               help="Relative path under default cluster-wide"
613
                               " file storage dir to store file-based disks",
614
                               default=None, metavar="<DIR>")
615

    
616
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
617
                                  help="Driver to use for image files",
618
                                  default="loop", metavar="<DRIVER>",
619
                                  choices=list(constants.FILE_DRIVER))
620

    
621
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
622
                            help="Select nodes for the instance automatically"
623
                            " using the <NAME> iallocator plugin",
624
                            default=None, type="string",
625
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
626

    
627
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
628
                            metavar="<NAME>",
629
                            help="Set the default instance allocator plugin",
630
                            default=None, type="string",
631
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
632

    
633
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
634
                    metavar="<os>",
635
                    completion_suggest=OPT_COMPL_ONE_OS)
636

    
637
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
638
                         type="keyval", default={},
639
                         help="OS parameters")
640

    
641
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
642
                               action="store_true", default=False,
643
                               help="Force an unknown variant")
644

    
645
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
646
                            action="store_true", default=False,
647
                            help="Do not install the OS (will"
648
                            " enable no-start)")
649

    
650
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
651
                         type="keyval", default={},
652
                         help="Backend parameters")
653

    
654
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
655
                         default={}, dest="hvparams",
656
                         help="Hypervisor parameters")
657

    
658
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
659
                            help="Hypervisor and hypervisor options, in the"
660
                            " format hypervisor:option=value,option=value,...",
661
                            default=None, type="identkeyval")
662

    
663
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
664
                        help="Hypervisor and hypervisor options, in the"
665
                        " format hypervisor:option=value,option=value,...",
666
                        default=[], action="append", type="identkeyval")
667

    
668
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
669
                           action="store_false",
670
                           help="Don't check that the instance's IP"
671
                           " is alive")
672

    
673
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
674
                             default=True, action="store_false",
675
                             help="Don't check that the instance's name"
676
                             " is resolvable")
677

    
678
NET_OPT = cli_option("--net",
679
                     help="NIC parameters", default=[],
680
                     dest="nics", action="append", type="identkeyval")
681

    
682
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
683
                      dest="disks", action="append", type="identkeyval")
684

    
685
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
686
                         help="Comma-separated list of disks"
687
                         " indices to act on (e.g. 0,2) (optional,"
688
                         " defaults to all disks)")
689

    
690
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
691
                         help="Enforces a single-disk configuration using the"
692
                         " given disk size, in MiB unless a suffix is used",
693
                         default=None, type="unit", metavar="<size>")
694

    
695
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
696
                                dest="ignore_consistency",
697
                                action="store_true", default=False,
698
                                help="Ignore the consistency of the disks on"
699
                                " the secondary")
700

    
701
NONLIVE_OPT = cli_option("--non-live", dest="live",
702
                         default=True, action="store_false",
703
                         help="Do a non-live migration (this usually means"
704
                         " freeze the instance, save the state, transfer and"
705
                         " only then resume running on the secondary node)")
706

    
707
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
708
                                default=None,
709
                                choices=list(constants.HT_MIGRATION_MODES),
710
                                help="Override default migration mode (choose"
711
                                " either live or non-live")
712

    
713
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
714
                                help="Target node and optional secondary node",
715
                                metavar="<pnode>[:<snode>]",
716
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
717

    
718
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
719
                           action="append", metavar="<node>",
720
                           help="Use only this node (can be used multiple"
721
                           " times, if not given defaults to all nodes)",
722
                           completion_suggest=OPT_COMPL_ONE_NODE)
723

    
724
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
725
                             metavar="<node>",
726
                             completion_suggest=OPT_COMPL_ONE_NODE)
727

    
728
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
729
                         action="store_false",
730
                         help="Don't start the instance after creation")
731

    
732
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
733
                         action="store_true", default=False,
734
                         help="Show command instead of executing it")
735

    
736
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
737
                         default=False, action="store_true",
738
                         help="Instead of performing the migration, try to"
739
                         " recover from a failed cleanup. This is safe"
740
                         " to run even if the instance is healthy, but it"
741
                         " will create extra replication traffic and "
742
                         " disrupt briefly the replication (like during the"
743
                         " migration")
744

    
745
STATIC_OPT = cli_option("-s", "--static", dest="static",
746
                        action="store_true", default=False,
747
                        help="Only show configuration data, not runtime data")
748

    
749
ALL_OPT = cli_option("--all", dest="show_all",
750
                     default=False, action="store_true",
751
                     help="Show info on all instances on the cluster."
752
                     " This can take a long time to run, use wisely")
753

    
754
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
755
                           action="store_true", default=False,
756
                           help="Interactive OS reinstall, lists available"
757
                           " OS templates for selection")
758

    
759
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
760
                                 action="store_true", default=False,
761
                                 help="Remove the instance from the cluster"
762
                                 " configuration even if there are failures"
763
                                 " during the removal process")
764

    
765
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
766
                                        dest="ignore_remove_failures",
767
                                        action="store_true", default=False,
768
                                        help="Remove the instance from the"
769
                                        " cluster configuration even if there"
770
                                        " are failures during the removal"
771
                                        " process")
772

    
773
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
774
                                 action="store_true", default=False,
775
                                 help="Remove the instance from the cluster")
776

    
777
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
778
                               help="Specifies the new secondary node",
779
                               metavar="NODE", default=None,
780
                               completion_suggest=OPT_COMPL_ONE_NODE)
781

    
782
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
783
                            default=False, action="store_true",
784
                            help="Replace the disk(s) on the primary"
785
                            " node (only for the drbd template)")
786

    
787
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
788
                              default=False, action="store_true",
789
                              help="Replace the disk(s) on the secondary"
790
                              " node (only for the drbd template)")
791

    
792
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
793
                              default=False, action="store_true",
794
                              help="Lock all nodes and auto-promote as needed"
795
                              " to MC status")
796

    
797
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
798
                              default=False, action="store_true",
799
                              help="Automatically replace faulty disks"
800
                              " (only for the drbd template)")
801

    
802
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
803
                             default=False, action="store_true",
804
                             help="Ignore current recorded size"
805
                             " (useful for forcing activation when"
806
                             " the recorded size is wrong)")
807

    
808
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
809
                          metavar="<node>",
810
                          completion_suggest=OPT_COMPL_ONE_NODE)
811

    
812
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
813
                         metavar="<dir>")
814

    
815
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
816
                              help="Specify the secondary ip for the node",
817
                              metavar="ADDRESS", default=None)
818

    
819
READD_OPT = cli_option("--readd", dest="readd",
820
                       default=False, action="store_true",
821
                       help="Readd old node after replacing it")
822

    
823
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
824
                                default=True, action="store_false",
825
                                help="Disable SSH key fingerprint checking")
826

    
827

    
828
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
829
                    type="bool", default=None, metavar=_YORNO,
830
                    help="Set the master_candidate flag on the node")
831

    
832
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
833
                         type="bool", default=None,
834
                         help="Set the offline flag on the node")
835

    
836
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
837
                         type="bool", default=None,
838
                         help="Set the drained flag on the node")
839

    
840
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
841
                             type="bool", default=None, metavar=_YORNO,
842
                             help="Set the allocatable flag on a volume")
843

    
844
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
845
                               help="Disable support for lvm based instances"
846
                               " (cluster-wide)",
847
                               action="store_false", default=True)
848

    
849
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
850
                            dest="enabled_hypervisors",
851
                            help="Comma-separated list of hypervisors",
852
                            type="string", default=None)
853

    
854
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
855
                            type="keyval", default={},
856
                            help="NIC parameters")
857

    
858
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
859
                         dest="candidate_pool_size", type="int",
860
                         help="Set the candidate pool size")
861

    
862
VG_NAME_OPT = cli_option("-g", "--vg-name", dest="vg_name",
863
                         help="Enables LVM and specifies the volume group"
864
                         " name (cluster-wide) for disk allocation [xenvg]",
865
                         metavar="VG", default=None)
866

    
867
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
868
                          help="Destroy cluster", action="store_true")
869

    
870
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
871
                          help="Skip node agreement check (dangerous)",
872
                          action="store_true", default=False)
873

    
874
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
875
                            help="Specify the mac prefix for the instance IP"
876
                            " addresses, in the format XX:XX:XX",
877
                            metavar="PREFIX",
878
                            default=None)
879

    
880
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
881
                               help="Specify the node interface (cluster-wide)"
882
                               " on which the master IP address will be added "
883
                               " [%s]" % constants.DEFAULT_BRIDGE,
884
                               metavar="NETDEV",
885
                               default=constants.DEFAULT_BRIDGE)
886

    
887
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
888
                                help="Specify the default directory (cluster-"
889
                                "wide) for storing the file-based disks [%s]" %
890
                                constants.DEFAULT_FILE_STORAGE_DIR,
891
                                metavar="DIR",
892
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
893

    
894
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
895
                                   help="Don't modify /etc/hosts",
896
                                   action="store_false", default=True)
897

    
898
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
899
                                    help="Don't initialize SSH keys",
900
                                    action="store_false", default=True)
901

    
902
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
903
                             help="Enable parseable error messages",
904
                             action="store_true", default=False)
905

    
906
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
907
                          help="Skip N+1 memory redundancy tests",
908
                          action="store_true", default=False)
909

    
910
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
911
                             help="Type of reboot: soft/hard/full",
912
                             default=constants.INSTANCE_REBOOT_HARD,
913
                             metavar="<REBOOT>",
914
                             choices=list(constants.REBOOT_TYPES))
915

    
916
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
917
                                    dest="ignore_secondaries",
918
                                    default=False, action="store_true",
919
                                    help="Ignore errors from secondaries")
920

    
921
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
922
                            action="store_false", default=True,
923
                            help="Don't shutdown the instance (unsafe)")
924

    
925
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
926
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
927
                         help="Maximum time to wait")
928

    
929
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
930
                         dest="shutdown_timeout", type="int",
931
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
932
                         help="Maximum time to wait for instance shutdown")
933

    
934
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
935
                          default=None,
936
                          help=("Number of seconds between repetions of the"
937
                                " command"))
938

    
939
EARLY_RELEASE_OPT = cli_option("--early-release",
940
                               dest="early_release", default=False,
941
                               action="store_true",
942
                               help="Release the locks on the secondary"
943
                               " node(s) early")
944

    
945
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
946
                                  dest="new_cluster_cert",
947
                                  default=False, action="store_true",
948
                                  help="Generate a new cluster certificate")
949

    
950
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
951
                           default=None,
952
                           help="File containing new RAPI certificate")
953

    
954
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
955
                               default=None, action="store_true",
956
                               help=("Generate a new self-signed RAPI"
957
                                     " certificate"))
958

    
959
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
960
                                    dest="new_confd_hmac_key",
961
                                    default=False, action="store_true",
962
                                    help=("Create a new HMAC key for %s" %
963
                                          constants.CONFD))
964

    
965
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
966
                                       dest="cluster_domain_secret",
967
                                       default=None,
968
                                       help=("Load new new cluster domain"
969
                                             " secret from file"))
970

    
971
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
972
                                           dest="new_cluster_domain_secret",
973
                                           default=False, action="store_true",
974
                                           help=("Create a new cluster domain"
975
                                                 " secret"))
976

    
977
USE_REPL_NET_OPT = cli_option("--use-replication-network",
978
                              dest="use_replication_network",
979
                              help="Whether to use the replication network"
980
                              " for talking to the nodes",
981
                              action="store_true", default=False)
982

    
983
MAINTAIN_NODE_HEALTH_OPT = \
984
    cli_option("--maintain-node-health", dest="maintain_node_health",
985
               metavar=_YORNO, default=None, type="bool",
986
               help="Configure the cluster to automatically maintain node"
987
               " health, by shutting down unknown instances, shutting down"
988
               " unknown DRBD devices, etc.")
989

    
990
IDENTIFY_DEFAULTS_OPT = \
991
    cli_option("--identify-defaults", dest="identify_defaults",
992
               default=False, action="store_true",
993
               help="Identify which saved instance parameters are equal to"
994
               " the current cluster defaults and set them as such, instead"
995
               " of marking them as overridden")
996

    
997
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
998
                         action="store", dest="uid_pool",
999
                         help=("A list of user-ids or user-id"
1000
                               " ranges separated by commas"))
1001

    
1002
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1003
                          action="store", dest="add_uids",
1004
                          help=("A list of user-ids or user-id"
1005
                                " ranges separated by commas, to be"
1006
                                " added to the user-id pool"))
1007

    
1008
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1009
                             action="store", dest="remove_uids",
1010
                             help=("A list of user-ids or user-id"
1011
                                   " ranges separated by commas, to be"
1012
                                   " removed from the user-id pool"))
1013

    
1014
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1015
                             action="store", dest="reserved_lvs",
1016
                             help=("A comma-separated list of reserved"
1017
                                   " logical volumes names, that will be"
1018
                                   " ignored by cluster verify"))
1019

    
1020
ROMAN_OPT = cli_option("--roman",
1021
                       dest="roman_integers", default=False,
1022
                       action="store_true",
1023
                       help="Use roman numbers for positive integers")
1024

    
1025
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1026
                             action="store", default=None,
1027
                             help="Specifies usermode helper for DRBD")
1028

    
1029
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1030
                                action="store_false", default=True,
1031
                                help="Disable support for DRBD")
1032

    
1033
PRIMARY_IP_VERSION_OPT = \
1034
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1035
               action="store", dest="primary_ip_version",
1036
               metavar="%d|%d" % (constants.IP4_VERSION,
1037
                                  constants.IP6_VERSION),
1038
               help="Cluster-wide IP version for primary IP")
1039

    
1040
#: Options provided by all commands
1041
COMMON_OPTS = [DEBUG_OPT]
1042

    
1043

    
1044
def _ParseArgs(argv, commands, aliases):
1045
  """Parser for the command line arguments.
1046

1047
  This function parses the arguments and returns the function which
1048
  must be executed together with its (modified) arguments.
1049

1050
  @param argv: the command line
1051
  @param commands: dictionary with special contents, see the design
1052
      doc for cmdline handling
1053
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1054

1055
  """
1056
  if len(argv) == 0:
1057
    binary = "<command>"
1058
  else:
1059
    binary = argv[0].split("/")[-1]
1060

    
1061
  if len(argv) > 1 and argv[1] == "--version":
1062
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1063
             constants.RELEASE_VERSION)
1064
    # Quit right away. That way we don't have to care about this special
1065
    # argument. optparse.py does it the same.
1066
    sys.exit(0)
1067

    
1068
  if len(argv) < 2 or not (argv[1] in commands or
1069
                           argv[1] in aliases):
1070
    # let's do a nice thing
1071
    sortedcmds = commands.keys()
1072
    sortedcmds.sort()
1073

    
1074
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1075
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1076
    ToStdout("")
1077

    
1078
    # compute the max line length for cmd + usage
1079
    mlen = max([len(" %s" % cmd) for cmd in commands])
1080
    mlen = min(60, mlen) # should not get here...
1081

    
1082
    # and format a nice command list
1083
    ToStdout("Commands:")
1084
    for cmd in sortedcmds:
1085
      cmdstr = " %s" % (cmd,)
1086
      help_text = commands[cmd][4]
1087
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1088
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1089
      for line in help_lines:
1090
        ToStdout("%-*s   %s", mlen, "", line)
1091

    
1092
    ToStdout("")
1093

    
1094
    return None, None, None
1095

    
1096
  # get command, unalias it, and look it up in commands
1097
  cmd = argv.pop(1)
1098
  if cmd in aliases:
1099
    if cmd in commands:
1100
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1101
                                   " command" % cmd)
1102

    
1103
    if aliases[cmd] not in commands:
1104
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1105
                                   " command '%s'" % (cmd, aliases[cmd]))
1106

    
1107
    cmd = aliases[cmd]
1108

    
1109
  func, args_def, parser_opts, usage, description = commands[cmd]
1110
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1111
                        description=description,
1112
                        formatter=TitledHelpFormatter(),
1113
                        usage="%%prog %s %s" % (cmd, usage))
1114
  parser.disable_interspersed_args()
1115
  options, args = parser.parse_args()
1116

    
1117
  if not _CheckArguments(cmd, args_def, args):
1118
    return None, None, None
1119

    
1120
  return func, options, args
1121

    
1122

    
1123
def _CheckArguments(cmd, args_def, args):
1124
  """Verifies the arguments using the argument definition.
1125

1126
  Algorithm:
1127

1128
    1. Abort with error if values specified by user but none expected.
1129

1130
    1. For each argument in definition
1131

1132
      1. Keep running count of minimum number of values (min_count)
1133
      1. Keep running count of maximum number of values (max_count)
1134
      1. If it has an unlimited number of values
1135

1136
        1. Abort with error if it's not the last argument in the definition
1137

1138
    1. If last argument has limited number of values
1139

1140
      1. Abort with error if number of values doesn't match or is too large
1141

1142
    1. Abort with error if user didn't pass enough values (min_count)
1143

1144
  """
1145
  if args and not args_def:
1146
    ToStderr("Error: Command %s expects no arguments", cmd)
1147
    return False
1148

    
1149
  min_count = None
1150
  max_count = None
1151
  check_max = None
1152

    
1153
  last_idx = len(args_def) - 1
1154

    
1155
  for idx, arg in enumerate(args_def):
1156
    if min_count is None:
1157
      min_count = arg.min
1158
    elif arg.min is not None:
1159
      min_count += arg.min
1160

    
1161
    if max_count is None:
1162
      max_count = arg.max
1163
    elif arg.max is not None:
1164
      max_count += arg.max
1165

    
1166
    if idx == last_idx:
1167
      check_max = (arg.max is not None)
1168

    
1169
    elif arg.max is None:
1170
      raise errors.ProgrammerError("Only the last argument can have max=None")
1171

    
1172
  if check_max:
1173
    # Command with exact number of arguments
1174
    if (min_count is not None and max_count is not None and
1175
        min_count == max_count and len(args) != min_count):
1176
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1177
      return False
1178

    
1179
    # Command with limited number of arguments
1180
    if max_count is not None and len(args) > max_count:
1181
      ToStderr("Error: Command %s expects only %d argument(s)",
1182
               cmd, max_count)
1183
      return False
1184

    
1185
  # Command with some required arguments
1186
  if min_count is not None and len(args) < min_count:
1187
    ToStderr("Error: Command %s expects at least %d argument(s)",
1188
             cmd, min_count)
1189
    return False
1190

    
1191
  return True
1192

    
1193

    
1194
def SplitNodeOption(value):
1195
  """Splits the value of a --node option.
1196

1197
  """
1198
  if value and ':' in value:
1199
    return value.split(':', 1)
1200
  else:
1201
    return (value, None)
1202

    
1203

    
1204
def CalculateOSNames(os_name, os_variants):
1205
  """Calculates all the names an OS can be called, according to its variants.
1206

1207
  @type os_name: string
1208
  @param os_name: base name of the os
1209
  @type os_variants: list or None
1210
  @param os_variants: list of supported variants
1211
  @rtype: list
1212
  @return: list of valid names
1213

1214
  """
1215
  if os_variants:
1216
    return ['%s+%s' % (os_name, v) for v in os_variants]
1217
  else:
1218
    return [os_name]
1219

    
1220

    
1221
def ParseFields(selected, default):
1222
  """Parses the values of "--field"-like options.
1223

1224
  @type selected: string or None
1225
  @param selected: User-selected options
1226
  @type default: list
1227
  @param default: Default fields
1228

1229
  """
1230
  if selected is None:
1231
    return default
1232

    
1233
  if selected.startswith("+"):
1234
    return default + selected[1:].split(",")
1235

    
1236
  return selected.split(",")
1237

    
1238

    
1239
UsesRPC = rpc.RunWithRPC
1240

    
1241

    
1242
def AskUser(text, choices=None):
1243
  """Ask the user a question.
1244

1245
  @param text: the question to ask
1246

1247
  @param choices: list with elements tuples (input_char, return_value,
1248
      description); if not given, it will default to: [('y', True,
1249
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1250
      note that the '?' char is reserved for help
1251

1252
  @return: one of the return values from the choices list; if input is
1253
      not possible (i.e. not running with a tty, we return the last
1254
      entry from the list
1255

1256
  """
1257
  if choices is None:
1258
    choices = [('y', True, 'Perform the operation'),
1259
               ('n', False, 'Do not perform the operation')]
1260
  if not choices or not isinstance(choices, list):
1261
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1262
  for entry in choices:
1263
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1264
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1265

    
1266
  answer = choices[-1][1]
1267
  new_text = []
1268
  for line in text.splitlines():
1269
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1270
  text = "\n".join(new_text)
1271
  try:
1272
    f = file("/dev/tty", "a+")
1273
  except IOError:
1274
    return answer
1275
  try:
1276
    chars = [entry[0] for entry in choices]
1277
    chars[-1] = "[%s]" % chars[-1]
1278
    chars.append('?')
1279
    maps = dict([(entry[0], entry[1]) for entry in choices])
1280
    while True:
1281
      f.write(text)
1282
      f.write('\n')
1283
      f.write("/".join(chars))
1284
      f.write(": ")
1285
      line = f.readline(2).strip().lower()
1286
      if line in maps:
1287
        answer = maps[line]
1288
        break
1289
      elif line == '?':
1290
        for entry in choices:
1291
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1292
        f.write("\n")
1293
        continue
1294
  finally:
1295
    f.close()
1296
  return answer
1297

    
1298

    
1299
class JobSubmittedException(Exception):
1300
  """Job was submitted, client should exit.
1301

1302
  This exception has one argument, the ID of the job that was
1303
  submitted. The handler should print this ID.
1304

1305
  This is not an error, just a structured way to exit from clients.
1306

1307
  """
1308

    
1309

    
1310
def SendJob(ops, cl=None):
1311
  """Function to submit an opcode without waiting for the results.
1312

1313
  @type ops: list
1314
  @param ops: list of opcodes
1315
  @type cl: luxi.Client
1316
  @param cl: the luxi client to use for communicating with the master;
1317
             if None, a new client will be created
1318

1319
  """
1320
  if cl is None:
1321
    cl = GetClient()
1322

    
1323
  job_id = cl.SubmitJob(ops)
1324

    
1325
  return job_id
1326

    
1327

    
1328
def GenericPollJob(job_id, cbs, report_cbs):
1329
  """Generic job-polling function.
1330

1331
  @type job_id: number
1332
  @param job_id: Job ID
1333
  @type cbs: Instance of L{JobPollCbBase}
1334
  @param cbs: Data callbacks
1335
  @type report_cbs: Instance of L{JobPollReportCbBase}
1336
  @param report_cbs: Reporting callbacks
1337

1338
  """
1339
  prev_job_info = None
1340
  prev_logmsg_serial = None
1341

    
1342
  status = None
1343

    
1344
  while True:
1345
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1346
                                      prev_logmsg_serial)
1347
    if not result:
1348
      # job not found, go away!
1349
      raise errors.JobLost("Job with id %s lost" % job_id)
1350

    
1351
    if result == constants.JOB_NOTCHANGED:
1352
      report_cbs.ReportNotChanged(job_id, status)
1353

    
1354
      # Wait again
1355
      continue
1356

    
1357
    # Split result, a tuple of (field values, log entries)
1358
    (job_info, log_entries) = result
1359
    (status, ) = job_info
1360

    
1361
    if log_entries:
1362
      for log_entry in log_entries:
1363
        (serial, timestamp, log_type, message) = log_entry
1364
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1365
                                    log_type, message)
1366
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1367

    
1368
    # TODO: Handle canceled and archived jobs
1369
    elif status in (constants.JOB_STATUS_SUCCESS,
1370
                    constants.JOB_STATUS_ERROR,
1371
                    constants.JOB_STATUS_CANCELING,
1372
                    constants.JOB_STATUS_CANCELED):
1373
      break
1374

    
1375
    prev_job_info = job_info
1376

    
1377
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1378
  if not jobs:
1379
    raise errors.JobLost("Job with id %s lost" % job_id)
1380

    
1381
  status, opstatus, result = jobs[0]
1382

    
1383
  if status == constants.JOB_STATUS_SUCCESS:
1384
    return result
1385

    
1386
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1387
    raise errors.OpExecError("Job was canceled")
1388

    
1389
  has_ok = False
1390
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1391
    if status == constants.OP_STATUS_SUCCESS:
1392
      has_ok = True
1393
    elif status == constants.OP_STATUS_ERROR:
1394
      errors.MaybeRaise(msg)
1395

    
1396
      if has_ok:
1397
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1398
                                 (idx, msg))
1399

    
1400
      raise errors.OpExecError(str(msg))
1401

    
1402
  # default failure mode
1403
  raise errors.OpExecError(result)
1404

    
1405

    
1406
class JobPollCbBase:
1407
  """Base class for L{GenericPollJob} callbacks.
1408

1409
  """
1410
  def __init__(self):
1411
    """Initializes this class.
1412

1413
    """
1414

    
1415
  def WaitForJobChangeOnce(self, job_id, fields,
1416
                           prev_job_info, prev_log_serial):
1417
    """Waits for changes on a job.
1418

1419
    """
1420
    raise NotImplementedError()
1421

    
1422
  def QueryJobs(self, job_ids, fields):
1423
    """Returns the selected fields for the selected job IDs.
1424

1425
    @type job_ids: list of numbers
1426
    @param job_ids: Job IDs
1427
    @type fields: list of strings
1428
    @param fields: Fields
1429

1430
    """
1431
    raise NotImplementedError()
1432

    
1433

    
1434
class JobPollReportCbBase:
1435
  """Base class for L{GenericPollJob} reporting callbacks.
1436

1437
  """
1438
  def __init__(self):
1439
    """Initializes this class.
1440

1441
    """
1442

    
1443
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1444
    """Handles a log message.
1445

1446
    """
1447
    raise NotImplementedError()
1448

    
1449
  def ReportNotChanged(self, job_id, status):
1450
    """Called for if a job hasn't changed in a while.
1451

1452
    @type job_id: number
1453
    @param job_id: Job ID
1454
    @type status: string or None
1455
    @param status: Job status if available
1456

1457
    """
1458
    raise NotImplementedError()
1459

    
1460

    
1461
class _LuxiJobPollCb(JobPollCbBase):
1462
  def __init__(self, cl):
1463
    """Initializes this class.
1464

1465
    """
1466
    JobPollCbBase.__init__(self)
1467
    self.cl = cl
1468

    
1469
  def WaitForJobChangeOnce(self, job_id, fields,
1470
                           prev_job_info, prev_log_serial):
1471
    """Waits for changes on a job.
1472

1473
    """
1474
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1475
                                        prev_job_info, prev_log_serial)
1476

    
1477
  def QueryJobs(self, job_ids, fields):
1478
    """Returns the selected fields for the selected job IDs.
1479

1480
    """
1481
    return self.cl.QueryJobs(job_ids, fields)
1482

    
1483

    
1484
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1485
  def __init__(self, feedback_fn):
1486
    """Initializes this class.
1487

1488
    """
1489
    JobPollReportCbBase.__init__(self)
1490

    
1491
    self.feedback_fn = feedback_fn
1492

    
1493
    assert callable(feedback_fn)
1494

    
1495
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1496
    """Handles a log message.
1497

1498
    """
1499
    self.feedback_fn((timestamp, log_type, log_msg))
1500

    
1501
  def ReportNotChanged(self, job_id, status):
1502
    """Called if a job hasn't changed in a while.
1503

1504
    """
1505
    # Ignore
1506

    
1507

    
1508
class StdioJobPollReportCb(JobPollReportCbBase):
1509
  def __init__(self):
1510
    """Initializes this class.
1511

1512
    """
1513
    JobPollReportCbBase.__init__(self)
1514

    
1515
    self.notified_queued = False
1516
    self.notified_waitlock = False
1517

    
1518
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1519
    """Handles a log message.
1520

1521
    """
1522
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1523
             FormatLogMessage(log_type, log_msg))
1524

    
1525
  def ReportNotChanged(self, job_id, status):
1526
    """Called if a job hasn't changed in a while.
1527

1528
    """
1529
    if status is None:
1530
      return
1531

    
1532
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1533
      ToStderr("Job %s is waiting in queue", job_id)
1534
      self.notified_queued = True
1535

    
1536
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1537
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1538
      self.notified_waitlock = True
1539

    
1540

    
1541
def FormatLogMessage(log_type, log_msg):
1542
  """Formats a job message according to its type.
1543

1544
  """
1545
  if log_type != constants.ELOG_MESSAGE:
1546
    log_msg = str(log_msg)
1547

    
1548
  return utils.SafeEncode(log_msg)
1549

    
1550

    
1551
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1552
  """Function to poll for the result of a job.
1553

1554
  @type job_id: job identified
1555
  @param job_id: the job to poll for results
1556
  @type cl: luxi.Client
1557
  @param cl: the luxi client to use for communicating with the master;
1558
             if None, a new client will be created
1559

1560
  """
1561
  if cl is None:
1562
    cl = GetClient()
1563

    
1564
  if reporter is None:
1565
    if feedback_fn:
1566
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1567
    else:
1568
      reporter = StdioJobPollReportCb()
1569
  elif feedback_fn:
1570
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1571

    
1572
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1573

    
1574

    
1575
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1576
  """Legacy function to submit an opcode.
1577

1578
  This is just a simple wrapper over the construction of the processor
1579
  instance. It should be extended to better handle feedback and
1580
  interaction functions.
1581

1582
  """
1583
  if cl is None:
1584
    cl = GetClient()
1585

    
1586
  SetGenericOpcodeOpts([op], opts)
1587

    
1588
  job_id = SendJob([op], cl=cl)
1589

    
1590
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1591
                       reporter=reporter)
1592

    
1593
  return op_results[0]
1594

    
1595

    
1596
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1597
  """Wrapper around SubmitOpCode or SendJob.
1598

1599
  This function will decide, based on the 'opts' parameter, whether to
1600
  submit and wait for the result of the opcode (and return it), or
1601
  whether to just send the job and print its identifier. It is used in
1602
  order to simplify the implementation of the '--submit' option.
1603

1604
  It will also process the opcodes if we're sending the via SendJob
1605
  (otherwise SubmitOpCode does it).
1606

1607
  """
1608
  if opts and opts.submit_only:
1609
    job = [op]
1610
    SetGenericOpcodeOpts(job, opts)
1611
    job_id = SendJob(job, cl=cl)
1612
    raise JobSubmittedException(job_id)
1613
  else:
1614
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1615

    
1616

    
1617
def SetGenericOpcodeOpts(opcode_list, options):
1618
  """Processor for generic options.
1619

1620
  This function updates the given opcodes based on generic command
1621
  line options (like debug, dry-run, etc.).
1622

1623
  @param opcode_list: list of opcodes
1624
  @param options: command line options or None
1625
  @return: None (in-place modification)
1626

1627
  """
1628
  if not options:
1629
    return
1630
  for op in opcode_list:
1631
    if hasattr(options, "dry_run"):
1632
      op.dry_run = options.dry_run
1633
    op.debug_level = options.debug
1634

    
1635

    
1636
def GetClient():
1637
  # TODO: Cache object?
1638
  try:
1639
    client = luxi.Client()
1640
  except luxi.NoMasterError:
1641
    ss = ssconf.SimpleStore()
1642

    
1643
    # Try to read ssconf file
1644
    try:
1645
      ss.GetMasterNode()
1646
    except errors.ConfigurationError:
1647
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1648
                                 " not part of a cluster")
1649

    
1650
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1651
    if master != myself:
1652
      raise errors.OpPrereqError("This is not the master node, please connect"
1653
                                 " to node '%s' and rerun the command" %
1654
                                 master)
1655
    raise
1656
  return client
1657

    
1658

    
1659
def FormatError(err):
1660
  """Return a formatted error message for a given error.
1661

1662
  This function takes an exception instance and returns a tuple
1663
  consisting of two values: first, the recommended exit code, and
1664
  second, a string describing the error message (not
1665
  newline-terminated).
1666

1667
  """
1668
  retcode = 1
1669
  obuf = StringIO()
1670
  msg = str(err)
1671
  if isinstance(err, errors.ConfigurationError):
1672
    txt = "Corrupt configuration file: %s" % msg
1673
    logging.error(txt)
1674
    obuf.write(txt + "\n")
1675
    obuf.write("Aborting.")
1676
    retcode = 2
1677
  elif isinstance(err, errors.HooksAbort):
1678
    obuf.write("Failure: hooks execution failed:\n")
1679
    for node, script, out in err.args[0]:
1680
      if out:
1681
        obuf.write("  node: %s, script: %s, output: %s\n" %
1682
                   (node, script, out))
1683
      else:
1684
        obuf.write("  node: %s, script: %s (no output)\n" %
1685
                   (node, script))
1686
  elif isinstance(err, errors.HooksFailure):
1687
    obuf.write("Failure: hooks general failure: %s" % msg)
1688
  elif isinstance(err, errors.ResolverError):
1689
    this_host = netutils.Hostname.GetSysName()
1690
    if err.args[0] == this_host:
1691
      msg = "Failure: can't resolve my own hostname ('%s')"
1692
    else:
1693
      msg = "Failure: can't resolve hostname '%s'"
1694
    obuf.write(msg % err.args[0])
1695
  elif isinstance(err, errors.OpPrereqError):
1696
    if len(err.args) == 2:
1697
      obuf.write("Failure: prerequisites not met for this"
1698
               " operation:\nerror type: %s, error details:\n%s" %
1699
                 (err.args[1], err.args[0]))
1700
    else:
1701
      obuf.write("Failure: prerequisites not met for this"
1702
                 " operation:\n%s" % msg)
1703
  elif isinstance(err, errors.OpExecError):
1704
    obuf.write("Failure: command execution error:\n%s" % msg)
1705
  elif isinstance(err, errors.TagError):
1706
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1707
  elif isinstance(err, errors.JobQueueDrainError):
1708
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1709
               " accept new requests\n")
1710
  elif isinstance(err, errors.JobQueueFull):
1711
    obuf.write("Failure: the job queue is full and doesn't accept new"
1712
               " job submissions until old jobs are archived\n")
1713
  elif isinstance(err, errors.TypeEnforcementError):
1714
    obuf.write("Parameter Error: %s" % msg)
1715
  elif isinstance(err, errors.ParameterError):
1716
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1717
  elif isinstance(err, luxi.NoMasterError):
1718
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1719
               " and listening for connections?")
1720
  elif isinstance(err, luxi.TimeoutError):
1721
    obuf.write("Timeout while talking to the master daemon. Error:\n"
1722
               "%s" % msg)
1723
  elif isinstance(err, luxi.PermissionError):
1724
    obuf.write("It seems you don't have permissions to connect to the"
1725
               " master daemon.\nPlease retry as a different user.")
1726
  elif isinstance(err, luxi.ProtocolError):
1727
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1728
               "%s" % msg)
1729
  elif isinstance(err, errors.JobLost):
1730
    obuf.write("Error checking job status: %s" % msg)
1731
  elif isinstance(err, errors.GenericError):
1732
    obuf.write("Unhandled Ganeti error: %s" % msg)
1733
  elif isinstance(err, JobSubmittedException):
1734
    obuf.write("JobID: %s\n" % err.args[0])
1735
    retcode = 0
1736
  else:
1737
    obuf.write("Unhandled exception: %s" % msg)
1738
  return retcode, obuf.getvalue().rstrip('\n')
1739

    
1740

    
1741
def GenericMain(commands, override=None, aliases=None):
1742
  """Generic main function for all the gnt-* commands.
1743

1744
  Arguments:
1745
    - commands: a dictionary with a special structure, see the design doc
1746
                for command line handling.
1747
    - override: if not None, we expect a dictionary with keys that will
1748
                override command line options; this can be used to pass
1749
                options from the scripts to generic functions
1750
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1751

1752
  """
1753
  # save the program name and the entire command line for later logging
1754
  if sys.argv:
1755
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1756
    if len(sys.argv) >= 2:
1757
      binary += " " + sys.argv[1]
1758
      old_cmdline = " ".join(sys.argv[2:])
1759
    else:
1760
      old_cmdline = ""
1761
  else:
1762
    binary = "<unknown program>"
1763
    old_cmdline = ""
1764

    
1765
  if aliases is None:
1766
    aliases = {}
1767

    
1768
  try:
1769
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1770
  except errors.ParameterError, err:
1771
    result, err_msg = FormatError(err)
1772
    ToStderr(err_msg)
1773
    return 1
1774

    
1775
  if func is None: # parse error
1776
    return 1
1777

    
1778
  if override is not None:
1779
    for key, val in override.iteritems():
1780
      setattr(options, key, val)
1781

    
1782
  utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1783
                     stderr_logging=True, program=binary)
1784

    
1785
  if old_cmdline:
1786
    logging.info("run with arguments '%s'", old_cmdline)
1787
  else:
1788
    logging.info("run with no arguments")
1789

    
1790
  try:
1791
    result = func(options, args)
1792
  except (errors.GenericError, luxi.ProtocolError,
1793
          JobSubmittedException), err:
1794
    result, err_msg = FormatError(err)
1795
    logging.exception("Error during command processing")
1796
    ToStderr(err_msg)
1797

    
1798
  return result
1799

    
1800

    
1801
def GenericInstanceCreate(mode, opts, args):
1802
  """Add an instance to the cluster via either creation or import.
1803

1804
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1805
  @param opts: the command line options selected by the user
1806
  @type args: list
1807
  @param args: should contain only one element, the new instance name
1808
  @rtype: int
1809
  @return: the desired exit code
1810

1811
  """
1812
  instance = args[0]
1813

    
1814
  (pnode, snode) = SplitNodeOption(opts.node)
1815

    
1816
  hypervisor = None
1817
  hvparams = {}
1818
  if opts.hypervisor:
1819
    hypervisor, hvparams = opts.hypervisor
1820

    
1821
  if opts.nics:
1822
    try:
1823
      nic_max = max(int(nidx[0]) + 1 for nidx in opts.nics)
1824
    except ValueError, err:
1825
      raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1826
    nics = [{}] * nic_max
1827
    for nidx, ndict in opts.nics:
1828
      nidx = int(nidx)
1829
      if not isinstance(ndict, dict):
1830
        msg = "Invalid nic/%d value: expected dict, got %s" % (nidx, ndict)
1831
        raise errors.OpPrereqError(msg)
1832
      nics[nidx] = ndict
1833
  elif opts.no_nics:
1834
    # no nics
1835
    nics = []
1836
  elif mode == constants.INSTANCE_CREATE:
1837
    # default of one nic, all auto
1838
    nics = [{}]
1839
  else:
1840
    # mode == import
1841
    nics = []
1842

    
1843
  if opts.disk_template == constants.DT_DISKLESS:
1844
    if opts.disks or opts.sd_size is not None:
1845
      raise errors.OpPrereqError("Diskless instance but disk"
1846
                                 " information passed")
1847
    disks = []
1848
  else:
1849
    if (not opts.disks and not opts.sd_size
1850
        and mode == constants.INSTANCE_CREATE):
1851
      raise errors.OpPrereqError("No disk information specified")
1852
    if opts.disks and opts.sd_size is not None:
1853
      raise errors.OpPrereqError("Please use either the '--disk' or"
1854
                                 " '-s' option")
1855
    if opts.sd_size is not None:
1856
      opts.disks = [(0, {"size": opts.sd_size})]
1857

    
1858
    if opts.disks:
1859
      try:
1860
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
1861
      except ValueError, err:
1862
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
1863
      disks = [{}] * disk_max
1864
    else:
1865
      disks = []
1866
    for didx, ddict in opts.disks:
1867
      didx = int(didx)
1868
      if not isinstance(ddict, dict):
1869
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
1870
        raise errors.OpPrereqError(msg)
1871
      elif "size" in ddict:
1872
        if "adopt" in ddict:
1873
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
1874
                                     " (disk %d)" % didx)
1875
        try:
1876
          ddict["size"] = utils.ParseUnit(ddict["size"])
1877
        except ValueError, err:
1878
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
1879
                                     (didx, err))
1880
      elif "adopt" in ddict:
1881
        if mode == constants.INSTANCE_IMPORT:
1882
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
1883
                                     " import")
1884
        ddict["size"] = 0
1885
      else:
1886
        raise errors.OpPrereqError("Missing size or adoption source for"
1887
                                   " disk %d" % didx)
1888
      disks[didx] = ddict
1889

    
1890
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
1891
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
1892

    
1893
  if mode == constants.INSTANCE_CREATE:
1894
    start = opts.start
1895
    os_type = opts.os
1896
    force_variant = opts.force_variant
1897
    src_node = None
1898
    src_path = None
1899
    no_install = opts.no_install
1900
    identify_defaults = False
1901
  elif mode == constants.INSTANCE_IMPORT:
1902
    start = False
1903
    os_type = None
1904
    force_variant = False
1905
    src_node = opts.src_node
1906
    src_path = opts.src_dir
1907
    no_install = None
1908
    identify_defaults = opts.identify_defaults
1909
  else:
1910
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
1911

    
1912
  op = opcodes.OpCreateInstance(instance_name=instance,
1913
                                disks=disks,
1914
                                disk_template=opts.disk_template,
1915
                                nics=nics,
1916
                                pnode=pnode, snode=snode,
1917
                                ip_check=opts.ip_check,
1918
                                name_check=opts.name_check,
1919
                                wait_for_sync=opts.wait_for_sync,
1920
                                file_storage_dir=opts.file_storage_dir,
1921
                                file_driver=opts.file_driver,
1922
                                iallocator=opts.iallocator,
1923
                                hypervisor=hypervisor,
1924
                                hvparams=hvparams,
1925
                                beparams=opts.beparams,
1926
                                osparams=opts.osparams,
1927
                                mode=mode,
1928
                                start=start,
1929
                                os_type=os_type,
1930
                                force_variant=force_variant,
1931
                                src_node=src_node,
1932
                                src_path=src_path,
1933
                                no_install=no_install,
1934
                                identify_defaults=identify_defaults)
1935

    
1936
  SubmitOrSend(op, opts)
1937
  return 0
1938

    
1939

    
1940
class _RunWhileClusterStoppedHelper:
1941
  """Helper class for L{RunWhileClusterStopped} to simplify state management
1942

1943
  """
1944
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
1945
    """Initializes this class.
1946

1947
    @type feedback_fn: callable
1948
    @param feedback_fn: Feedback function
1949
    @type cluster_name: string
1950
    @param cluster_name: Cluster name
1951
    @type master_node: string
1952
    @param master_node Master node name
1953
    @type online_nodes: list
1954
    @param online_nodes: List of names of online nodes
1955

1956
    """
1957
    self.feedback_fn = feedback_fn
1958
    self.cluster_name = cluster_name
1959
    self.master_node = master_node
1960
    self.online_nodes = online_nodes
1961

    
1962
    self.ssh = ssh.SshRunner(self.cluster_name)
1963

    
1964
    self.nonmaster_nodes = [name for name in online_nodes
1965
                            if name != master_node]
1966

    
1967
    assert self.master_node not in self.nonmaster_nodes
1968

    
1969
  def _RunCmd(self, node_name, cmd):
1970
    """Runs a command on the local or a remote machine.
1971

1972
    @type node_name: string
1973
    @param node_name: Machine name
1974
    @type cmd: list
1975
    @param cmd: Command
1976

1977
    """
1978
    if node_name is None or node_name == self.master_node:
1979
      # No need to use SSH
1980
      result = utils.RunCmd(cmd)
1981
    else:
1982
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
1983

    
1984
    if result.failed:
1985
      errmsg = ["Failed to run command %s" % result.cmd]
1986
      if node_name:
1987
        errmsg.append("on node %s" % node_name)
1988
      errmsg.append(": exitcode %s and error %s" %
1989
                    (result.exit_code, result.output))
1990
      raise errors.OpExecError(" ".join(errmsg))
1991

    
1992
  def Call(self, fn, *args):
1993
    """Call function while all daemons are stopped.
1994

1995
    @type fn: callable
1996
    @param fn: Function to be called
1997

1998
    """
1999
    # Pause watcher by acquiring an exclusive lock on watcher state file
2000
    self.feedback_fn("Blocking watcher")
2001
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2002
    try:
2003
      # TODO: Currently, this just blocks. There's no timeout.
2004
      # TODO: Should it be a shared lock?
2005
      watcher_block.Exclusive(blocking=True)
2006

    
2007
      # Stop master daemons, so that no new jobs can come in and all running
2008
      # ones are finished
2009
      self.feedback_fn("Stopping master daemons")
2010
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2011
      try:
2012
        # Stop daemons on all nodes
2013
        for node_name in self.online_nodes:
2014
          self.feedback_fn("Stopping daemons on %s" % node_name)
2015
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2016

    
2017
        # All daemons are shut down now
2018
        try:
2019
          return fn(self, *args)
2020
        except Exception, err:
2021
          _, errmsg = FormatError(err)
2022
          logging.exception("Caught exception")
2023
          self.feedback_fn(errmsg)
2024
          raise
2025
      finally:
2026
        # Start cluster again, master node last
2027
        for node_name in self.nonmaster_nodes + [self.master_node]:
2028
          self.feedback_fn("Starting daemons on %s" % node_name)
2029
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2030
    finally:
2031
      # Resume watcher
2032
      watcher_block.Close()
2033

    
2034

    
2035
def RunWhileClusterStopped(feedback_fn, fn, *args):
2036
  """Calls a function while all cluster daemons are stopped.
2037

2038
  @type feedback_fn: callable
2039
  @param feedback_fn: Feedback function
2040
  @type fn: callable
2041
  @param fn: Function to be called when daemons are stopped
2042

2043
  """
2044
  feedback_fn("Gathering cluster information")
2045

    
2046
  # This ensures we're running on the master daemon
2047
  cl = GetClient()
2048

    
2049
  (cluster_name, master_node) = \
2050
    cl.QueryConfigValues(["cluster_name", "master_node"])
2051

    
2052
  online_nodes = GetOnlineNodes([], cl=cl)
2053

    
2054
  # Don't keep a reference to the client. The master daemon will go away.
2055
  del cl
2056

    
2057
  assert master_node in online_nodes
2058

    
2059
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2060
                                       online_nodes).Call(fn, *args)
2061

    
2062

    
2063
def GenerateTable(headers, fields, separator, data,
2064
                  numfields=None, unitfields=None,
2065
                  units=None):
2066
  """Prints a table with headers and different fields.
2067

2068
  @type headers: dict
2069
  @param headers: dictionary mapping field names to headers for
2070
      the table
2071
  @type fields: list
2072
  @param fields: the field names corresponding to each row in
2073
      the data field
2074
  @param separator: the separator to be used; if this is None,
2075
      the default 'smart' algorithm is used which computes optimal
2076
      field width, otherwise just the separator is used between
2077
      each field
2078
  @type data: list
2079
  @param data: a list of lists, each sublist being one row to be output
2080
  @type numfields: list
2081
  @param numfields: a list with the fields that hold numeric
2082
      values and thus should be right-aligned
2083
  @type unitfields: list
2084
  @param unitfields: a list with the fields that hold numeric
2085
      values that should be formatted with the units field
2086
  @type units: string or None
2087
  @param units: the units we should use for formatting, or None for
2088
      automatic choice (human-readable for non-separator usage, otherwise
2089
      megabytes); this is a one-letter string
2090

2091
  """
2092
  if units is None:
2093
    if separator:
2094
      units = "m"
2095
    else:
2096
      units = "h"
2097

    
2098
  if numfields is None:
2099
    numfields = []
2100
  if unitfields is None:
2101
    unitfields = []
2102

    
2103
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2104
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2105

    
2106
  format_fields = []
2107
  for field in fields:
2108
    if headers and field not in headers:
2109
      # TODO: handle better unknown fields (either revert to old
2110
      # style of raising exception, or deal more intelligently with
2111
      # variable fields)
2112
      headers[field] = field
2113
    if separator is not None:
2114
      format_fields.append("%s")
2115
    elif numfields.Matches(field):
2116
      format_fields.append("%*s")
2117
    else:
2118
      format_fields.append("%-*s")
2119

    
2120
  if separator is None:
2121
    mlens = [0 for name in fields]
2122
    format_str = ' '.join(format_fields)
2123
  else:
2124
    format_str = separator.replace("%", "%%").join(format_fields)
2125

    
2126
  for row in data:
2127
    if row is None:
2128
      continue
2129
    for idx, val in enumerate(row):
2130
      if unitfields.Matches(fields[idx]):
2131
        try:
2132
          val = int(val)
2133
        except (TypeError, ValueError):
2134
          pass
2135
        else:
2136
          val = row[idx] = utils.FormatUnit(val, units)
2137
      val = row[idx] = str(val)
2138
      if separator is None:
2139
        mlens[idx] = max(mlens[idx], len(val))
2140

    
2141
  result = []
2142
  if headers:
2143
    args = []
2144
    for idx, name in enumerate(fields):
2145
      hdr = headers[name]
2146
      if separator is None:
2147
        mlens[idx] = max(mlens[idx], len(hdr))
2148
        args.append(mlens[idx])
2149
      args.append(hdr)
2150
    result.append(format_str % tuple(args))
2151

    
2152
  if separator is None:
2153
    assert len(mlens) == len(fields)
2154

    
2155
    if fields and not numfields.Matches(fields[-1]):
2156
      mlens[-1] = 0
2157

    
2158
  for line in data:
2159
    args = []
2160
    if line is None:
2161
      line = ['-' for _ in fields]
2162
    for idx in range(len(fields)):
2163
      if separator is None:
2164
        args.append(mlens[idx])
2165
      args.append(line[idx])
2166
    result.append(format_str % tuple(args))
2167

    
2168
  return result
2169

    
2170

    
2171
def FormatTimestamp(ts):
2172
  """Formats a given timestamp.
2173

2174
  @type ts: timestamp
2175
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2176

2177
  @rtype: string
2178
  @return: a string with the formatted timestamp
2179

2180
  """
2181
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2182
    return '?'
2183
  sec, usec = ts
2184
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2185

    
2186

    
2187
def ParseTimespec(value):
2188
  """Parse a time specification.
2189

2190
  The following suffixed will be recognized:
2191

2192
    - s: seconds
2193
    - m: minutes
2194
    - h: hours
2195
    - d: day
2196
    - w: weeks
2197

2198
  Without any suffix, the value will be taken to be in seconds.
2199

2200
  """
2201
  value = str(value)
2202
  if not value:
2203
    raise errors.OpPrereqError("Empty time specification passed")
2204
  suffix_map = {
2205
    's': 1,
2206
    'm': 60,
2207
    'h': 3600,
2208
    'd': 86400,
2209
    'w': 604800,
2210
    }
2211
  if value[-1] not in suffix_map:
2212
    try:
2213
      value = int(value)
2214
    except (TypeError, ValueError):
2215
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2216
  else:
2217
    multiplier = suffix_map[value[-1]]
2218
    value = value[:-1]
2219
    if not value: # no data left after stripping the suffix
2220
      raise errors.OpPrereqError("Invalid time specification (only"
2221
                                 " suffix passed)")
2222
    try:
2223
      value = int(value) * multiplier
2224
    except (TypeError, ValueError):
2225
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2226
  return value
2227

    
2228

    
2229
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2230
                   filter_master=False):
2231
  """Returns the names of online nodes.
2232

2233
  This function will also log a warning on stderr with the names of
2234
  the online nodes.
2235

2236
  @param nodes: if not empty, use only this subset of nodes (minus the
2237
      offline ones)
2238
  @param cl: if not None, luxi client to use
2239
  @type nowarn: boolean
2240
  @param nowarn: by default, this function will output a note with the
2241
      offline nodes that are skipped; if this parameter is True the
2242
      note is not displayed
2243
  @type secondary_ips: boolean
2244
  @param secondary_ips: if True, return the secondary IPs instead of the
2245
      names, useful for doing network traffic over the replication interface
2246
      (if any)
2247
  @type filter_master: boolean
2248
  @param filter_master: if True, do not return the master node in the list
2249
      (useful in coordination with secondary_ips where we cannot check our
2250
      node name against the list)
2251

2252
  """
2253
  if cl is None:
2254
    cl = GetClient()
2255

    
2256
  if secondary_ips:
2257
    name_idx = 2
2258
  else:
2259
    name_idx = 0
2260

    
2261
  if filter_master:
2262
    master_node = cl.QueryConfigValues(["master_node"])[0]
2263
    filter_fn = lambda x: x != master_node
2264
  else:
2265
    filter_fn = lambda _: True
2266

    
2267
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2268
                         use_locking=False)
2269
  offline = [row[0] for row in result if row[1]]
2270
  if offline and not nowarn:
2271
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2272
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2273

    
2274

    
2275
def _ToStream(stream, txt, *args):
2276
  """Write a message to a stream, bypassing the logging system
2277

2278
  @type stream: file object
2279
  @param stream: the file to which we should write
2280
  @type txt: str
2281
  @param txt: the message
2282

2283
  """
2284
  if args:
2285
    args = tuple(args)
2286
    stream.write(txt % args)
2287
  else:
2288
    stream.write(txt)
2289
  stream.write('\n')
2290
  stream.flush()
2291

    
2292

    
2293
def ToStdout(txt, *args):
2294
  """Write a message to stdout only, bypassing the logging system
2295

2296
  This is just a wrapper over _ToStream.
2297

2298
  @type txt: str
2299
  @param txt: the message
2300

2301
  """
2302
  _ToStream(sys.stdout, txt, *args)
2303

    
2304

    
2305
def ToStderr(txt, *args):
2306
  """Write a message to stderr only, bypassing the logging system
2307

2308
  This is just a wrapper over _ToStream.
2309

2310
  @type txt: str
2311
  @param txt: the message
2312

2313
  """
2314
  _ToStream(sys.stderr, txt, *args)
2315

    
2316

    
2317
class JobExecutor(object):
2318
  """Class which manages the submission and execution of multiple jobs.
2319

2320
  Note that instances of this class should not be reused between
2321
  GetResults() calls.
2322

2323
  """
2324
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2325
    self.queue = []
2326
    if cl is None:
2327
      cl = GetClient()
2328
    self.cl = cl
2329
    self.verbose = verbose
2330
    self.jobs = []
2331
    self.opts = opts
2332
    self.feedback_fn = feedback_fn
2333

    
2334
  def QueueJob(self, name, *ops):
2335
    """Record a job for later submit.
2336

2337
    @type name: string
2338
    @param name: a description of the job, will be used in WaitJobSet
2339
    """
2340
    SetGenericOpcodeOpts(ops, self.opts)
2341
    self.queue.append((name, ops))
2342

    
2343
  def SubmitPending(self, each=False):
2344
    """Submit all pending jobs.
2345

2346
    """
2347
    if each:
2348
      results = []
2349
      for row in self.queue:
2350
        # SubmitJob will remove the success status, but raise an exception if
2351
        # the submission fails, so we'll notice that anyway.
2352
        results.append([True, self.cl.SubmitJob(row[1])])
2353
    else:
2354
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2355
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2356
                                                            self.queue)):
2357
      self.jobs.append((idx, status, data, name))
2358

    
2359
  def _ChooseJob(self):
2360
    """Choose a non-waiting/queued job to poll next.
2361

2362
    """
2363
    assert self.jobs, "_ChooseJob called with empty job list"
2364

    
2365
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2366
    assert result
2367

    
2368
    for job_data, status in zip(self.jobs, result):
2369
      if (isinstance(status, list) and status and
2370
          status[0] in (constants.JOB_STATUS_QUEUED,
2371
                        constants.JOB_STATUS_WAITLOCK,
2372
                        constants.JOB_STATUS_CANCELING)):
2373
        # job is still present and waiting
2374
        continue
2375
      # good candidate found (either running job or lost job)
2376
      self.jobs.remove(job_data)
2377
      return job_data
2378

    
2379
    # no job found
2380
    return self.jobs.pop(0)
2381

    
2382
  def GetResults(self):
2383
    """Wait for and return the results of all jobs.
2384

2385
    @rtype: list
2386
    @return: list of tuples (success, job results), in the same order
2387
        as the submitted jobs; if a job has failed, instead of the result
2388
        there will be the error message
2389

2390
    """
2391
    if not self.jobs:
2392
      self.SubmitPending()
2393
    results = []
2394
    if self.verbose:
2395
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2396
      if ok_jobs:
2397
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2398

    
2399
    # first, remove any non-submitted jobs
2400
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2401
    for idx, _, jid, name in failures:
2402
      ToStderr("Failed to submit job for %s: %s", name, jid)
2403
      results.append((idx, False, jid))
2404

    
2405
    while self.jobs:
2406
      (idx, _, jid, name) = self._ChooseJob()
2407
      ToStdout("Waiting for job %s for %s...", jid, name)
2408
      try:
2409
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2410
        success = True
2411
      except errors.JobLost, err:
2412
        _, job_result = FormatError(err)
2413
        ToStderr("Job %s for %s has been archived, cannot check its result",
2414
                 jid, name)
2415
        success = False
2416
      except (errors.GenericError, luxi.ProtocolError), err:
2417
        _, job_result = FormatError(err)
2418
        success = False
2419
        # the error message will always be shown, verbose or not
2420
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2421

    
2422
      results.append((idx, success, job_result))
2423

    
2424
    # sort based on the index, then drop it
2425
    results.sort()
2426
    results = [i[1:] for i in results]
2427

    
2428
    return results
2429

    
2430
  def WaitOrShow(self, wait):
2431
    """Wait for job results or only print the job IDs.
2432

2433
    @type wait: boolean
2434
    @param wait: whether to wait or not
2435

2436
    """
2437
    if wait:
2438
      return self.GetResults()
2439
    else:
2440
      if not self.jobs:
2441
        self.SubmitPending()
2442
      for _, status, result, name in self.jobs:
2443
        if status:
2444
          ToStdout("%s: %s", result, name)
2445
        else:
2446
          ToStderr("Failure for %s: %s", name, result)
2447
      return [row[1:3] for row in self.jobs]