Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 7e49b6ce

History | View | Annotate | Download (64.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40

    
41
from optparse import (OptionParser, TitledHelpFormatter,
42
                      Option, OptionValueError)
43

    
44

    
45
__all__ = [
46
  # Command line options
47
  "ALLOCATABLE_OPT",
48
  "ALL_OPT",
49
  "AUTO_PROMOTE_OPT",
50
  "AUTO_REPLACE_OPT",
51
  "BACKEND_OPT",
52
  "CLEANUP_OPT",
53
  "CONFIRM_OPT",
54
  "CP_SIZE_OPT",
55
  "DEBUG_OPT",
56
  "DEBUG_SIMERR_OPT",
57
  "DISKIDX_OPT",
58
  "DISK_OPT",
59
  "DISK_TEMPLATE_OPT",
60
  "DRAINED_OPT",
61
  "EARLY_RELEASE_OPT",
62
  "ENABLED_HV_OPT",
63
  "ERROR_CODES_OPT",
64
  "FIELDS_OPT",
65
  "FILESTORE_DIR_OPT",
66
  "FILESTORE_DRIVER_OPT",
67
  "FORCE_OPT",
68
  "FORCE_VARIANT_OPT",
69
  "GLOBAL_FILEDIR_OPT",
70
  "HVLIST_OPT",
71
  "HVOPTS_OPT",
72
  "HYPERVISOR_OPT",
73
  "IALLOCATOR_OPT",
74
  "IGNORE_CONSIST_OPT",
75
  "IGNORE_FAILURES_OPT",
76
  "IGNORE_SECONDARIES_OPT",
77
  "IGNORE_SIZE_OPT",
78
  "MAC_PREFIX_OPT",
79
  "MASTER_NETDEV_OPT",
80
  "MC_OPT",
81
  "NET_OPT",
82
  "NEW_SECONDARY_OPT",
83
  "NIC_PARAMS_OPT",
84
  "NODE_LIST_OPT",
85
  "NODE_PLACEMENT_OPT",
86
  "NOHDR_OPT",
87
  "NOIPCHECK_OPT",
88
  "NONAMECHECK_OPT",
89
  "NOLVM_STORAGE_OPT",
90
  "NOMODIFY_ETCHOSTS_OPT",
91
  "NOMODIFY_SSH_SETUP_OPT",
92
  "NONICS_OPT",
93
  "NONLIVE_OPT",
94
  "NONPLUS1_OPT",
95
  "NOSHUTDOWN_OPT",
96
  "NOSTART_OPT",
97
  "NOSSH_KEYCHECK_OPT",
98
  "NOVOTING_OPT",
99
  "NWSYNC_OPT",
100
  "ON_PRIMARY_OPT",
101
  "ON_SECONDARY_OPT",
102
  "OFFLINE_OPT",
103
  "OS_OPT",
104
  "OS_SIZE_OPT",
105
  "READD_OPT",
106
  "REBOOT_TYPE_OPT",
107
  "SECONDARY_IP_OPT",
108
  "SELECT_OS_OPT",
109
  "SEP_OPT",
110
  "SHOWCMD_OPT",
111
  "SHUTDOWN_TIMEOUT_OPT",
112
  "SINGLE_NODE_OPT",
113
  "SRC_DIR_OPT",
114
  "SRC_NODE_OPT",
115
  "SUBMIT_OPT",
116
  "STATIC_OPT",
117
  "SYNC_OPT",
118
  "TAG_SRC_OPT",
119
  "TIMEOUT_OPT",
120
  "USEUNITS_OPT",
121
  "VERBOSE_OPT",
122
  "VG_NAME_OPT",
123
  "YES_DOIT_OPT",
124
  # Generic functions for CLI programs
125
  "GenericMain",
126
  "GenericInstanceCreate",
127
  "GetClient",
128
  "GetOnlineNodes",
129
  "JobExecutor",
130
  "JobSubmittedException",
131
  "ParseTimespec",
132
  "RunWhileClusterStopped",
133
  "SubmitOpCode",
134
  "SubmitOrSend",
135
  "UsesRPC",
136
  # Formatting functions
137
  "ToStderr", "ToStdout",
138
  "FormatError",
139
  "GenerateTable",
140
  "AskUser",
141
  "FormatTimestamp",
142
  # Tags functions
143
  "ListTags",
144
  "AddTags",
145
  "RemoveTags",
146
  # command line options support infrastructure
147
  "ARGS_MANY_INSTANCES",
148
  "ARGS_MANY_NODES",
149
  "ARGS_NONE",
150
  "ARGS_ONE_INSTANCE",
151
  "ARGS_ONE_NODE",
152
  "ARGS_ONE_OS",
153
  "ArgChoice",
154
  "ArgCommand",
155
  "ArgFile",
156
  "ArgHost",
157
  "ArgInstance",
158
  "ArgJobId",
159
  "ArgNode",
160
  "ArgOs",
161
  "ArgSuggest",
162
  "ArgUnknown",
163
  "OPT_COMPL_INST_ADD_NODES",
164
  "OPT_COMPL_MANY_NODES",
165
  "OPT_COMPL_ONE_IALLOCATOR",
166
  "OPT_COMPL_ONE_INSTANCE",
167
  "OPT_COMPL_ONE_NODE",
168
  "OPT_COMPL_ONE_OS",
169
  "cli_option",
170
  "SplitNodeOption",
171
  "CalculateOSNames",
172
  ]
173

    
174
NO_PREFIX = "no_"
175
UN_PREFIX = "-"
176

    
177

    
178
class _Argument:
179
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
180
    self.min = min
181
    self.max = max
182

    
183
  def __repr__(self):
184
    return ("<%s min=%s max=%s>" %
185
            (self.__class__.__name__, self.min, self.max))
186

    
187

    
188
class ArgSuggest(_Argument):
189
  """Suggesting argument.
190

191
  Value can be any of the ones passed to the constructor.
192

193
  """
194
  # pylint: disable-msg=W0622
195
  def __init__(self, min=0, max=None, choices=None):
196
    _Argument.__init__(self, min=min, max=max)
197
    self.choices = choices
198

    
199
  def __repr__(self):
200
    return ("<%s min=%s max=%s choices=%r>" %
201
            (self.__class__.__name__, self.min, self.max, self.choices))
202

    
203

    
204
class ArgChoice(ArgSuggest):
205
  """Choice argument.
206

207
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
208
  but value must be one of the choices.
209

210
  """
211

    
212

    
213
class ArgUnknown(_Argument):
214
  """Unknown argument to program (e.g. determined at runtime).
215

216
  """
217

    
218

    
219
class ArgInstance(_Argument):
220
  """Instances argument.
221

222
  """
223

    
224

    
225
class ArgNode(_Argument):
226
  """Node argument.
227

228
  """
229

    
230
class ArgJobId(_Argument):
231
  """Job ID argument.
232

233
  """
234

    
235

    
236
class ArgFile(_Argument):
237
  """File path argument.
238

239
  """
240

    
241

    
242
class ArgCommand(_Argument):
243
  """Command argument.
244

245
  """
246

    
247

    
248
class ArgHost(_Argument):
249
  """Host argument.
250

251
  """
252

    
253

    
254
class ArgOs(_Argument):
255
  """OS argument.
256

257
  """
258

    
259

    
260
ARGS_NONE = []
261
ARGS_MANY_INSTANCES = [ArgInstance()]
262
ARGS_MANY_NODES = [ArgNode()]
263
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
264
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
265
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
266

    
267

    
268
def _ExtractTagsObject(opts, args):
269
  """Extract the tag type object.
270

271
  Note that this function will modify its args parameter.
272

273
  """
274
  if not hasattr(opts, "tag_type"):
275
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
276
  kind = opts.tag_type
277
  if kind == constants.TAG_CLUSTER:
278
    retval = kind, kind
279
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
280
    if not args:
281
      raise errors.OpPrereqError("no arguments passed to the command")
282
    name = args.pop(0)
283
    retval = kind, name
284
  else:
285
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
286
  return retval
287

    
288

    
289
def _ExtendTags(opts, args):
290
  """Extend the args if a source file has been given.
291

292
  This function will extend the tags with the contents of the file
293
  passed in the 'tags_source' attribute of the opts parameter. A file
294
  named '-' will be replaced by stdin.
295

296
  """
297
  fname = opts.tags_source
298
  if fname is None:
299
    return
300
  if fname == "-":
301
    new_fh = sys.stdin
302
  else:
303
    new_fh = open(fname, "r")
304
  new_data = []
305
  try:
306
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
307
    # because of python bug 1633941
308
    while True:
309
      line = new_fh.readline()
310
      if not line:
311
        break
312
      new_data.append(line.strip())
313
  finally:
314
    new_fh.close()
315
  args.extend(new_data)
316

    
317

    
318
def ListTags(opts, args):
319
  """List the tags on a given object.
320

321
  This is a generic implementation that knows how to deal with all
322
  three cases of tag objects (cluster, node, instance). The opts
323
  argument is expected to contain a tag_type field denoting what
324
  object type we work on.
325

326
  """
327
  kind, name = _ExtractTagsObject(opts, args)
328
  cl = GetClient()
329
  result = cl.QueryTags(kind, name)
330
  result = list(result)
331
  result.sort()
332
  for tag in result:
333
    ToStdout(tag)
334

    
335

    
336
def AddTags(opts, args):
337
  """Add tags on a given object.
338

339
  This is a generic implementation that knows how to deal with all
340
  three cases of tag objects (cluster, node, instance). The opts
341
  argument is expected to contain a tag_type field denoting what
342
  object type we work on.
343

344
  """
345
  kind, name = _ExtractTagsObject(opts, args)
346
  _ExtendTags(opts, args)
347
  if not args:
348
    raise errors.OpPrereqError("No tags to be added")
349
  op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
350
  SubmitOpCode(op)
351

    
352

    
353
def RemoveTags(opts, args):
354
  """Remove tags from a given object.
355

356
  This is a generic implementation that knows how to deal with all
357
  three cases of tag objects (cluster, node, instance). The opts
358
  argument is expected to contain a tag_type field denoting what
359
  object type we work on.
360

361
  """
362
  kind, name = _ExtractTagsObject(opts, args)
363
  _ExtendTags(opts, args)
364
  if not args:
365
    raise errors.OpPrereqError("No tags to be removed")
366
  op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
367
  SubmitOpCode(op)
368

    
369

    
370
def check_unit(option, opt, value): # pylint: disable-msg=W0613
371
  """OptParsers custom converter for units.
372

373
  """
374
  try:
375
    return utils.ParseUnit(value)
376
  except errors.UnitParseError, err:
377
    raise OptionValueError("option %s: %s" % (opt, err))
378

    
379

    
380
def _SplitKeyVal(opt, data):
381
  """Convert a KeyVal string into a dict.
382

383
  This function will convert a key=val[,...] string into a dict. Empty
384
  values will be converted specially: keys which have the prefix 'no_'
385
  will have the value=False and the prefix stripped, the others will
386
  have value=True.
387

388
  @type opt: string
389
  @param opt: a string holding the option name for which we process the
390
      data, used in building error messages
391
  @type data: string
392
  @param data: a string of the format key=val,key=val,...
393
  @rtype: dict
394
  @return: {key=val, key=val}
395
  @raises errors.ParameterError: if there are duplicate keys
396

397
  """
398
  kv_dict = {}
399
  if data:
400
    for elem in utils.UnescapeAndSplit(data, sep=","):
401
      if "=" in elem:
402
        key, val = elem.split("=", 1)
403
      else:
404
        if elem.startswith(NO_PREFIX):
405
          key, val = elem[len(NO_PREFIX):], False
406
        elif elem.startswith(UN_PREFIX):
407
          key, val = elem[len(UN_PREFIX):], None
408
        else:
409
          key, val = elem, True
410
      if key in kv_dict:
411
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
412
                                    (key, opt))
413
      kv_dict[key] = val
414
  return kv_dict
415

    
416

    
417
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
418
  """Custom parser for ident:key=val,key=val options.
419

420
  This will store the parsed values as a tuple (ident, {key: val}). As such,
421
  multiple uses of this option via action=append is possible.
422

423
  """
424
  if ":" not in value:
425
    ident, rest = value, ''
426
  else:
427
    ident, rest = value.split(":", 1)
428

    
429
  if ident.startswith(NO_PREFIX):
430
    if rest:
431
      msg = "Cannot pass options when removing parameter groups: %s" % value
432
      raise errors.ParameterError(msg)
433
    retval = (ident[len(NO_PREFIX):], False)
434
  elif ident.startswith(UN_PREFIX):
435
    if rest:
436
      msg = "Cannot pass options when removing parameter groups: %s" % value
437
      raise errors.ParameterError(msg)
438
    retval = (ident[len(UN_PREFIX):], None)
439
  else:
440
    kv_dict = _SplitKeyVal(opt, rest)
441
    retval = (ident, kv_dict)
442
  return retval
443

    
444

    
445
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
446
  """Custom parser class for key=val,key=val options.
447

448
  This will store the parsed values as a dict {key: val}.
449

450
  """
451
  return _SplitKeyVal(opt, value)
452

    
453

    
454
# completion_suggestion is normally a list. Using numeric values not evaluating
455
# to False for dynamic completion.
456
(OPT_COMPL_MANY_NODES,
457
 OPT_COMPL_ONE_NODE,
458
 OPT_COMPL_ONE_INSTANCE,
459
 OPT_COMPL_ONE_OS,
460
 OPT_COMPL_ONE_IALLOCATOR,
461
 OPT_COMPL_INST_ADD_NODES) = range(100, 106)
462

    
463
OPT_COMPL_ALL = frozenset([
464
  OPT_COMPL_MANY_NODES,
465
  OPT_COMPL_ONE_NODE,
466
  OPT_COMPL_ONE_INSTANCE,
467
  OPT_COMPL_ONE_OS,
468
  OPT_COMPL_ONE_IALLOCATOR,
469
  OPT_COMPL_INST_ADD_NODES,
470
  ])
471

    
472

    
473
class CliOption(Option):
474
  """Custom option class for optparse.
475

476
  """
477
  ATTRS = Option.ATTRS + [
478
    "completion_suggest",
479
    ]
480
  TYPES = Option.TYPES + (
481
    "identkeyval",
482
    "keyval",
483
    "unit",
484
    )
485
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
486
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
487
  TYPE_CHECKER["keyval"] = check_key_val
488
  TYPE_CHECKER["unit"] = check_unit
489

    
490

    
491
# optparse.py sets make_option, so we do it for our own option class, too
492
cli_option = CliOption
493

    
494

    
495
_YESNO = ("yes", "no")
496
_YORNO = "yes|no"
497

    
498
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
499
                       help="Increase debugging level")
500

    
501
NOHDR_OPT = cli_option("--no-headers", default=False,
502
                       action="store_true", dest="no_headers",
503
                       help="Don't display column headers")
504

    
505
SEP_OPT = cli_option("--separator", default=None,
506
                     action="store", dest="separator",
507
                     help=("Separator between output fields"
508
                           " (defaults to one space)"))
509

    
510
USEUNITS_OPT = cli_option("--units", default=None,
511
                          dest="units", choices=('h', 'm', 'g', 't'),
512
                          help="Specify units for output (one of hmgt)")
513

    
514
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
515
                        type="string", metavar="FIELDS",
516
                        help="Comma separated list of output fields")
517

    
518
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
519
                       default=False, help="Force the operation")
520

    
521
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
522
                         default=False, help="Do not require confirmation")
523

    
524
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
525
                         default=None, help="File with tag names")
526

    
527
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
528
                        default=False, action="store_true",
529
                        help=("Submit the job and return the job ID, but"
530
                              " don't wait for the job to finish"))
531

    
532
SYNC_OPT = cli_option("--sync", dest="do_locking",
533
                      default=False, action="store_true",
534
                      help=("Grab locks while doing the queries"
535
                            " in order to ensure more consistent results"))
536

    
537
_DRY_RUN_OPT = cli_option("--dry-run", default=False,
538
                          action="store_true",
539
                          help=("Do not execute the operation, just run the"
540
                                " check steps and verify it it could be"
541
                                " executed"))
542

    
543
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
544
                         action="store_true",
545
                         help="Increase the verbosity of the operation")
546

    
547
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
548
                              action="store_true", dest="simulate_errors",
549
                              help="Debugging option that makes the operation"
550
                              " treat most runtime checks as failed")
551

    
552
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
553
                        default=True, action="store_false",
554
                        help="Don't wait for sync (DANGEROUS!)")
555

    
556
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
557
                               help="Custom disk setup (diskless, file,"
558
                               " plain or drbd)",
559
                               default=None, metavar="TEMPL",
560
                               choices=list(constants.DISK_TEMPLATES))
561

    
562
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
563
                        help="Do not create any network cards for"
564
                        " the instance")
565

    
566
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
567
                               help="Relative path under default cluster-wide"
568
                               " file storage dir to store file-based disks",
569
                               default=None, metavar="<DIR>")
570

    
571
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
572
                                  help="Driver to use for image files",
573
                                  default="loop", metavar="<DRIVER>",
574
                                  choices=list(constants.FILE_DRIVER))
575

    
576
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
577
                            help="Select nodes for the instance automatically"
578
                            " using the <NAME> iallocator plugin",
579
                            default=None, type="string",
580
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
581

    
582
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
583
                    metavar="<os>",
584
                    completion_suggest=OPT_COMPL_ONE_OS)
585

    
586
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
587
                               action="store_true", default=False,
588
                               help="Force an unknown variant")
589

    
590
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
591
                         type="keyval", default={},
592
                         help="Backend parameters")
593

    
594
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
595
                         default={}, dest="hvparams",
596
                         help="Hypervisor parameters")
597

    
598
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
599
                            help="Hypervisor and hypervisor options, in the"
600
                            " format hypervisor:option=value,option=value,...",
601
                            default=None, type="identkeyval")
602

    
603
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
604
                        help="Hypervisor and hypervisor options, in the"
605
                        " format hypervisor:option=value,option=value,...",
606
                        default=[], action="append", type="identkeyval")
607

    
608
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
609
                           action="store_false",
610
                           help="Don't check that the instance's IP"
611
                           " is alive")
612

    
613
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
614
                             default=True, action="store_false",
615
                             help="Don't check that the instance's name"
616
                             " is resolvable")
617

    
618
NET_OPT = cli_option("--net",
619
                     help="NIC parameters", default=[],
620
                     dest="nics", action="append", type="identkeyval")
621

    
622
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
623
                      dest="disks", action="append", type="identkeyval")
624

    
625
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
626
                         help="Comma-separated list of disks"
627
                         " indices to act on (e.g. 0,2) (optional,"
628
                         " defaults to all disks)")
629

    
630
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
631
                         help="Enforces a single-disk configuration using the"
632
                         " given disk size, in MiB unless a suffix is used",
633
                         default=None, type="unit", metavar="<size>")
634

    
635
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
636
                                dest="ignore_consistency",
637
                                action="store_true", default=False,
638
                                help="Ignore the consistency of the disks on"
639
                                " the secondary")
640

    
641
NONLIVE_OPT = cli_option("--non-live", dest="live",
642
                         default=True, action="store_false",
643
                         help="Do a non-live migration (this usually means"
644
                         " freeze the instance, save the state, transfer and"
645
                         " only then resume running on the secondary node)")
646

    
647
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
648
                                help="Target node and optional secondary node",
649
                                metavar="<pnode>[:<snode>]",
650
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
651

    
652
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
653
                           action="append", metavar="<node>",
654
                           help="Use only this node (can be used multiple"
655
                           " times, if not given defaults to all nodes)",
656
                           completion_suggest=OPT_COMPL_ONE_NODE)
657

    
658
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
659
                             metavar="<node>",
660
                             completion_suggest=OPT_COMPL_ONE_NODE)
661

    
662
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
663
                         action="store_false",
664
                         help="Don't start the instance after creation")
665

    
666
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
667
                         action="store_true", default=False,
668
                         help="Show command instead of executing it")
669

    
670
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
671
                         default=False, action="store_true",
672
                         help="Instead of performing the migration, try to"
673
                         " recover from a failed cleanup. This is safe"
674
                         " to run even if the instance is healthy, but it"
675
                         " will create extra replication traffic and "
676
                         " disrupt briefly the replication (like during the"
677
                         " migration")
678

    
679
STATIC_OPT = cli_option("-s", "--static", dest="static",
680
                        action="store_true", default=False,
681
                        help="Only show configuration data, not runtime data")
682

    
683
ALL_OPT = cli_option("--all", dest="show_all",
684
                     default=False, action="store_true",
685
                     help="Show info on all instances on the cluster."
686
                     " This can take a long time to run, use wisely")
687

    
688
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
689
                           action="store_true", default=False,
690
                           help="Interactive OS reinstall, lists available"
691
                           " OS templates for selection")
692

    
693
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
694
                                 action="store_true", default=False,
695
                                 help="Remove the instance from the cluster"
696
                                 " configuration even if there are failures"
697
                                 " during the removal process")
698

    
699
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
700
                               help="Specifies the new secondary node",
701
                               metavar="NODE", default=None,
702
                               completion_suggest=OPT_COMPL_ONE_NODE)
703

    
704
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
705
                            default=False, action="store_true",
706
                            help="Replace the disk(s) on the primary"
707
                            " node (only for the drbd template)")
708

    
709
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
710
                              default=False, action="store_true",
711
                              help="Replace the disk(s) on the secondary"
712
                              " node (only for the drbd template)")
713

    
714
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
715
                              default=False, action="store_true",
716
                              help="Lock all nodes and auto-promote as needed"
717
                              " to MC status")
718

    
719
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
720
                              default=False, action="store_true",
721
                              help="Automatically replace faulty disks"
722
                              " (only for the drbd template)")
723

    
724
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
725
                             default=False, action="store_true",
726
                             help="Ignore current recorded size"
727
                             " (useful for forcing activation when"
728
                             " the recorded size is wrong)")
729

    
730
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
731
                          metavar="<node>",
732
                          completion_suggest=OPT_COMPL_ONE_NODE)
733

    
734
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
735
                         metavar="<dir>")
736

    
737
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
738
                              help="Specify the secondary ip for the node",
739
                              metavar="ADDRESS", default=None)
740

    
741
READD_OPT = cli_option("--readd", dest="readd",
742
                       default=False, action="store_true",
743
                       help="Readd old node after replacing it")
744

    
745
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
746
                                default=True, action="store_false",
747
                                help="Disable SSH key fingerprint checking")
748

    
749

    
750
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
751
                    choices=_YESNO, default=None, metavar=_YORNO,
752
                    help="Set the master_candidate flag on the node")
753

    
754
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
755
                         choices=_YESNO, default=None,
756
                         help="Set the offline flag on the node")
757

    
758
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
759
                         choices=_YESNO, default=None,
760
                         help="Set the drained flag on the node")
761

    
762
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
763
                             choices=_YESNO, default=None, metavar=_YORNO,
764
                             help="Set the allocatable flag on a volume")
765

    
766
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
767
                               help="Disable support for lvm based instances"
768
                               " (cluster-wide)",
769
                               action="store_false", default=True)
770

    
771
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
772
                            dest="enabled_hypervisors",
773
                            help="Comma-separated list of hypervisors",
774
                            type="string", default=None)
775

    
776
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
777
                            type="keyval", default={},
778
                            help="NIC parameters")
779

    
780
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
781
                         dest="candidate_pool_size", type="int",
782
                         help="Set the candidate pool size")
783

    
784
VG_NAME_OPT = cli_option("-g", "--vg-name", dest="vg_name",
785
                         help="Enables LVM and specifies the volume group"
786
                         " name (cluster-wide) for disk allocation [xenvg]",
787
                         metavar="VG", default=None)
788

    
789
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
790
                          help="Destroy cluster", action="store_true")
791

    
792
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
793
                          help="Skip node agreement check (dangerous)",
794
                          action="store_true", default=False)
795

    
796
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
797
                            help="Specify the mac prefix for the instance IP"
798
                            " addresses, in the format XX:XX:XX",
799
                            metavar="PREFIX",
800
                            default=None)
801

    
802
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
803
                               help="Specify the node interface (cluster-wide)"
804
                               " on which the master IP address will be added "
805
                               " [%s]" % constants.DEFAULT_BRIDGE,
806
                               metavar="NETDEV",
807
                               default=constants.DEFAULT_BRIDGE)
808

    
809

    
810
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
811
                                help="Specify the default directory (cluster-"
812
                                "wide) for storing the file-based disks [%s]" %
813
                                constants.DEFAULT_FILE_STORAGE_DIR,
814
                                metavar="DIR",
815
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
816

    
817
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
818
                                   help="Don't modify /etc/hosts",
819
                                   action="store_false", default=True)
820

    
821
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
822
                                    help="Don't initialize SSH keys",
823
                                    action="store_false", default=True)
824

    
825
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
826
                             help="Enable parseable error messages",
827
                             action="store_true", default=False)
828

    
829
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
830
                          help="Skip N+1 memory redundancy tests",
831
                          action="store_true", default=False)
832

    
833
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
834
                             help="Type of reboot: soft/hard/full",
835
                             default=constants.INSTANCE_REBOOT_HARD,
836
                             metavar="<REBOOT>",
837
                             choices=list(constants.REBOOT_TYPES))
838

    
839
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
840
                                    dest="ignore_secondaries",
841
                                    default=False, action="store_true",
842
                                    help="Ignore errors from secondaries")
843

    
844
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
845
                            action="store_false", default=True,
846
                            help="Don't shutdown the instance (unsafe)")
847

    
848
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
849
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
850
                         help="Maximum time to wait")
851

    
852
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
853
                         dest="shutdown_timeout", type="int",
854
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
855
                         help="Maximum time to wait for instance shutdown")
856

    
857
EARLY_RELEASE_OPT = cli_option("--early-release",
858
                               dest="early_release", default=False,
859
                               action="store_true",
860
                               help="Release the locks on the secondary"
861
                               " node(s) early")
862

    
863

    
864
def _ParseArgs(argv, commands, aliases):
865
  """Parser for the command line arguments.
866

867
  This function parses the arguments and returns the function which
868
  must be executed together with its (modified) arguments.
869

870
  @param argv: the command line
871
  @param commands: dictionary with special contents, see the design
872
      doc for cmdline handling
873
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
874

875
  """
876
  if len(argv) == 0:
877
    binary = "<command>"
878
  else:
879
    binary = argv[0].split("/")[-1]
880

    
881
  if len(argv) > 1 and argv[1] == "--version":
882
    ToStdout("%s (ganeti) %s", binary, constants.RELEASE_VERSION)
883
    # Quit right away. That way we don't have to care about this special
884
    # argument. optparse.py does it the same.
885
    sys.exit(0)
886

    
887
  if len(argv) < 2 or not (argv[1] in commands or
888
                           argv[1] in aliases):
889
    # let's do a nice thing
890
    sortedcmds = commands.keys()
891
    sortedcmds.sort()
892

    
893
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
894
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
895
    ToStdout("")
896

    
897
    # compute the max line length for cmd + usage
898
    mlen = max([len(" %s" % cmd) for cmd in commands])
899
    mlen = min(60, mlen) # should not get here...
900

    
901
    # and format a nice command list
902
    ToStdout("Commands:")
903
    for cmd in sortedcmds:
904
      cmdstr = " %s" % (cmd,)
905
      help_text = commands[cmd][4]
906
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
907
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
908
      for line in help_lines:
909
        ToStdout("%-*s   %s", mlen, "", line)
910

    
911
    ToStdout("")
912

    
913
    return None, None, None
914

    
915
  # get command, unalias it, and look it up in commands
916
  cmd = argv.pop(1)
917
  if cmd in aliases:
918
    if cmd in commands:
919
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
920
                                   " command" % cmd)
921

    
922
    if aliases[cmd] not in commands:
923
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
924
                                   " command '%s'" % (cmd, aliases[cmd]))
925

    
926
    cmd = aliases[cmd]
927

    
928
  func, args_def, parser_opts, usage, description = commands[cmd]
929
  parser = OptionParser(option_list=parser_opts + [_DRY_RUN_OPT, DEBUG_OPT],
930
                        description=description,
931
                        formatter=TitledHelpFormatter(),
932
                        usage="%%prog %s %s" % (cmd, usage))
933
  parser.disable_interspersed_args()
934
  options, args = parser.parse_args()
935

    
936
  if not _CheckArguments(cmd, args_def, args):
937
    return None, None, None
938

    
939
  return func, options, args
940

    
941

    
942
def _CheckArguments(cmd, args_def, args):
943
  """Verifies the arguments using the argument definition.
944

945
  Algorithm:
946

947
    1. Abort with error if values specified by user but none expected.
948

949
    1. For each argument in definition
950

951
      1. Keep running count of minimum number of values (min_count)
952
      1. Keep running count of maximum number of values (max_count)
953
      1. If it has an unlimited number of values
954

955
        1. Abort with error if it's not the last argument in the definition
956

957
    1. If last argument has limited number of values
958

959
      1. Abort with error if number of values doesn't match or is too large
960

961
    1. Abort with error if user didn't pass enough values (min_count)
962

963
  """
964
  if args and not args_def:
965
    ToStderr("Error: Command %s expects no arguments", cmd)
966
    return False
967

    
968
  min_count = None
969
  max_count = None
970
  check_max = None
971

    
972
  last_idx = len(args_def) - 1
973

    
974
  for idx, arg in enumerate(args_def):
975
    if min_count is None:
976
      min_count = arg.min
977
    elif arg.min is not None:
978
      min_count += arg.min
979

    
980
    if max_count is None:
981
      max_count = arg.max
982
    elif arg.max is not None:
983
      max_count += arg.max
984

    
985
    if idx == last_idx:
986
      check_max = (arg.max is not None)
987

    
988
    elif arg.max is None:
989
      raise errors.ProgrammerError("Only the last argument can have max=None")
990

    
991
  if check_max:
992
    # Command with exact number of arguments
993
    if (min_count is not None and max_count is not None and
994
        min_count == max_count and len(args) != min_count):
995
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
996
      return False
997

    
998
    # Command with limited number of arguments
999
    if max_count is not None and len(args) > max_count:
1000
      ToStderr("Error: Command %s expects only %d argument(s)",
1001
               cmd, max_count)
1002
      return False
1003

    
1004
  # Command with some required arguments
1005
  if min_count is not None and len(args) < min_count:
1006
    ToStderr("Error: Command %s expects at least %d argument(s)",
1007
             cmd, min_count)
1008
    return False
1009

    
1010
  return True
1011

    
1012

    
1013
def SplitNodeOption(value):
1014
  """Splits the value of a --node option.
1015

1016
  """
1017
  if value and ':' in value:
1018
    return value.split(':', 1)
1019
  else:
1020
    return (value, None)
1021

    
1022

    
1023
def CalculateOSNames(os_name, os_variants):
1024
  """Calculates all the names an OS can be called, according to its variants.
1025

1026
  @type os_name: string
1027
  @param os_name: base name of the os
1028
  @type os_variants: list or None
1029
  @param os_variants: list of supported variants
1030
  @rtype: list
1031
  @return: list of valid names
1032

1033
  """
1034
  if os_variants:
1035
    return ['%s+%s' % (os_name, v) for v in os_variants]
1036
  else:
1037
    return [os_name]
1038

    
1039

    
1040
def UsesRPC(fn):
1041
  def wrapper(*args, **kwargs):
1042
    rpc.Init()
1043
    try:
1044
      return fn(*args, **kwargs)
1045
    finally:
1046
      rpc.Shutdown()
1047
  return wrapper
1048

    
1049

    
1050
def AskUser(text, choices=None):
1051
  """Ask the user a question.
1052

1053
  @param text: the question to ask
1054

1055
  @param choices: list with elements tuples (input_char, return_value,
1056
      description); if not given, it will default to: [('y', True,
1057
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1058
      note that the '?' char is reserved for help
1059

1060
  @return: one of the return values from the choices list; if input is
1061
      not possible (i.e. not running with a tty, we return the last
1062
      entry from the list
1063

1064
  """
1065
  if choices is None:
1066
    choices = [('y', True, 'Perform the operation'),
1067
               ('n', False, 'Do not perform the operation')]
1068
  if not choices or not isinstance(choices, list):
1069
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1070
  for entry in choices:
1071
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1072
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1073

    
1074
  answer = choices[-1][1]
1075
  new_text = []
1076
  for line in text.splitlines():
1077
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1078
  text = "\n".join(new_text)
1079
  try:
1080
    f = file("/dev/tty", "a+")
1081
  except IOError:
1082
    return answer
1083
  try:
1084
    chars = [entry[0] for entry in choices]
1085
    chars[-1] = "[%s]" % chars[-1]
1086
    chars.append('?')
1087
    maps = dict([(entry[0], entry[1]) for entry in choices])
1088
    while True:
1089
      f.write(text)
1090
      f.write('\n')
1091
      f.write("/".join(chars))
1092
      f.write(": ")
1093
      line = f.readline(2).strip().lower()
1094
      if line in maps:
1095
        answer = maps[line]
1096
        break
1097
      elif line == '?':
1098
        for entry in choices:
1099
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1100
        f.write("\n")
1101
        continue
1102
  finally:
1103
    f.close()
1104
  return answer
1105

    
1106

    
1107
class JobSubmittedException(Exception):
1108
  """Job was submitted, client should exit.
1109

1110
  This exception has one argument, the ID of the job that was
1111
  submitted. The handler should print this ID.
1112

1113
  This is not an error, just a structured way to exit from clients.
1114

1115
  """
1116

    
1117

    
1118
def SendJob(ops, cl=None):
1119
  """Function to submit an opcode without waiting for the results.
1120

1121
  @type ops: list
1122
  @param ops: list of opcodes
1123
  @type cl: luxi.Client
1124
  @param cl: the luxi client to use for communicating with the master;
1125
             if None, a new client will be created
1126

1127
  """
1128
  if cl is None:
1129
    cl = GetClient()
1130

    
1131
  job_id = cl.SubmitJob(ops)
1132

    
1133
  return job_id
1134

    
1135

    
1136
def PollJob(job_id, cl=None, feedback_fn=None):
1137
  """Function to poll for the result of a job.
1138

1139
  @type job_id: job identified
1140
  @param job_id: the job to poll for results
1141
  @type cl: luxi.Client
1142
  @param cl: the luxi client to use for communicating with the master;
1143
             if None, a new client will be created
1144

1145
  """
1146
  if cl is None:
1147
    cl = GetClient()
1148

    
1149
  prev_job_info = None
1150
  prev_logmsg_serial = None
1151

    
1152
  status = None
1153

    
1154
  notified_queued = False
1155
  notified_waitlock = False
1156

    
1157
  while True:
1158
    result = cl.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1159
                                     prev_logmsg_serial)
1160
    if not result:
1161
      # job not found, go away!
1162
      raise errors.JobLost("Job with id %s lost" % job_id)
1163
    elif result == constants.JOB_NOTCHANGED:
1164
      if status is not None and not callable(feedback_fn):
1165
        if status == constants.JOB_STATUS_QUEUED and not notified_queued:
1166
          ToStderr("Job %s is waiting in queue", job_id)
1167
          notified_queued = True
1168
        elif status == constants.JOB_STATUS_WAITLOCK and not notified_waitlock:
1169
          ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1170
          notified_waitlock = True
1171

    
1172
      # Wait again
1173
      continue
1174

    
1175
    # Split result, a tuple of (field values, log entries)
1176
    (job_info, log_entries) = result
1177
    (status, ) = job_info
1178

    
1179
    if log_entries:
1180
      for log_entry in log_entries:
1181
        (serial, timestamp, _, message) = log_entry
1182
        if callable(feedback_fn):
1183
          feedback_fn(log_entry[1:])
1184
        else:
1185
          encoded = utils.SafeEncode(message)
1186
          ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)), encoded)
1187
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1188

    
1189
    # TODO: Handle canceled and archived jobs
1190
    elif status in (constants.JOB_STATUS_SUCCESS,
1191
                    constants.JOB_STATUS_ERROR,
1192
                    constants.JOB_STATUS_CANCELING,
1193
                    constants.JOB_STATUS_CANCELED):
1194
      break
1195

    
1196
    prev_job_info = job_info
1197

    
1198
  jobs = cl.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1199
  if not jobs:
1200
    raise errors.JobLost("Job with id %s lost" % job_id)
1201

    
1202
  status, opstatus, result = jobs[0]
1203
  if status == constants.JOB_STATUS_SUCCESS:
1204
    return result
1205
  elif status in (constants.JOB_STATUS_CANCELING,
1206
                  constants.JOB_STATUS_CANCELED):
1207
    raise errors.OpExecError("Job was canceled")
1208
  else:
1209
    has_ok = False
1210
    for idx, (status, msg) in enumerate(zip(opstatus, result)):
1211
      if status == constants.OP_STATUS_SUCCESS:
1212
        has_ok = True
1213
      elif status == constants.OP_STATUS_ERROR:
1214
        errors.MaybeRaise(msg)
1215
        if has_ok:
1216
          raise errors.OpExecError("partial failure (opcode %d): %s" %
1217
                                   (idx, msg))
1218
        else:
1219
          raise errors.OpExecError(str(msg))
1220
    # default failure mode
1221
    raise errors.OpExecError(result)
1222

    
1223

    
1224
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None):
1225
  """Legacy function to submit an opcode.
1226

1227
  This is just a simple wrapper over the construction of the processor
1228
  instance. It should be extended to better handle feedback and
1229
  interaction functions.
1230

1231
  """
1232
  if cl is None:
1233
    cl = GetClient()
1234

    
1235
  SetGenericOpcodeOpts([op], opts)
1236

    
1237
  job_id = SendJob([op], cl)
1238

    
1239
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn)
1240

    
1241
  return op_results[0]
1242

    
1243

    
1244
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1245
  """Wrapper around SubmitOpCode or SendJob.
1246

1247
  This function will decide, based on the 'opts' parameter, whether to
1248
  submit and wait for the result of the opcode (and return it), or
1249
  whether to just send the job and print its identifier. It is used in
1250
  order to simplify the implementation of the '--submit' option.
1251

1252
  It will also process the opcodes if we're sending the via SendJob
1253
  (otherwise SubmitOpCode does it).
1254

1255
  """
1256
  if opts and opts.submit_only:
1257
    job = [op]
1258
    SetGenericOpcodeOpts(job, opts)
1259
    job_id = SendJob(job, cl=cl)
1260
    raise JobSubmittedException(job_id)
1261
  else:
1262
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1263

    
1264

    
1265
def SetGenericOpcodeOpts(opcode_list, options):
1266
  """Processor for generic options.
1267

1268
  This function updates the given opcodes based on generic command
1269
  line options (like debug, dry-run, etc.).
1270

1271
  @param opcode_list: list of opcodes
1272
  @param options: command line options or None
1273
  @return: None (in-place modification)
1274

1275
  """
1276
  if not options:
1277
    return
1278
  for op in opcode_list:
1279
    op.dry_run = options.dry_run
1280
    op.debug_level = options.debug
1281

    
1282

    
1283
def GetClient():
1284
  # TODO: Cache object?
1285
  try:
1286
    client = luxi.Client()
1287
  except luxi.NoMasterError:
1288
    ss = ssconf.SimpleStore()
1289

    
1290
    # Try to read ssconf file
1291
    try:
1292
      ss.GetMasterNode()
1293
    except errors.ConfigurationError:
1294
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1295
                                 " not part of a cluster")
1296

    
1297
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1298
    if master != myself:
1299
      raise errors.OpPrereqError("This is not the master node, please connect"
1300
                                 " to node '%s' and rerun the command" %
1301
                                 master)
1302
    raise
1303
  return client
1304

    
1305

    
1306
def FormatError(err):
1307
  """Return a formatted error message for a given error.
1308

1309
  This function takes an exception instance and returns a tuple
1310
  consisting of two values: first, the recommended exit code, and
1311
  second, a string describing the error message (not
1312
  newline-terminated).
1313

1314
  """
1315
  retcode = 1
1316
  obuf = StringIO()
1317
  msg = str(err)
1318
  if isinstance(err, errors.ConfigurationError):
1319
    txt = "Corrupt configuration file: %s" % msg
1320
    logging.error(txt)
1321
    obuf.write(txt + "\n")
1322
    obuf.write("Aborting.")
1323
    retcode = 2
1324
  elif isinstance(err, errors.HooksAbort):
1325
    obuf.write("Failure: hooks execution failed:\n")
1326
    for node, script, out in err.args[0]:
1327
      if out:
1328
        obuf.write("  node: %s, script: %s, output: %s\n" %
1329
                   (node, script, out))
1330
      else:
1331
        obuf.write("  node: %s, script: %s (no output)\n" %
1332
                   (node, script))
1333
  elif isinstance(err, errors.HooksFailure):
1334
    obuf.write("Failure: hooks general failure: %s" % msg)
1335
  elif isinstance(err, errors.ResolverError):
1336
    this_host = utils.HostInfo.SysName()
1337
    if err.args[0] == this_host:
1338
      msg = "Failure: can't resolve my own hostname ('%s')"
1339
    else:
1340
      msg = "Failure: can't resolve hostname '%s'"
1341
    obuf.write(msg % err.args[0])
1342
  elif isinstance(err, errors.OpPrereqError):
1343
    if len(err.args) == 2:
1344
      obuf.write("Failure: prerequisites not met for this"
1345
               " operation:\nerror type: %s, error details:\n%s" %
1346
                 (err.args[1], err.args[0]))
1347
    else:
1348
      obuf.write("Failure: prerequisites not met for this"
1349
                 " operation:\n%s" % msg)
1350
  elif isinstance(err, errors.OpExecError):
1351
    obuf.write("Failure: command execution error:\n%s" % msg)
1352
  elif isinstance(err, errors.TagError):
1353
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1354
  elif isinstance(err, errors.JobQueueDrainError):
1355
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1356
               " accept new requests\n")
1357
  elif isinstance(err, errors.JobQueueFull):
1358
    obuf.write("Failure: the job queue is full and doesn't accept new"
1359
               " job submissions until old jobs are archived\n")
1360
  elif isinstance(err, errors.TypeEnforcementError):
1361
    obuf.write("Parameter Error: %s" % msg)
1362
  elif isinstance(err, errors.ParameterError):
1363
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1364
  elif isinstance(err, errors.GenericError):
1365
    obuf.write("Unhandled Ganeti error: %s" % msg)
1366
  elif isinstance(err, luxi.NoMasterError):
1367
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1368
               " and listening for connections?")
1369
  elif isinstance(err, luxi.TimeoutError):
1370
    obuf.write("Timeout while talking to the master daemon. Error:\n"
1371
               "%s" % msg)
1372
  elif isinstance(err, luxi.ProtocolError):
1373
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1374
               "%s" % msg)
1375
  elif isinstance(err, JobSubmittedException):
1376
    obuf.write("JobID: %s\n" % err.args[0])
1377
    retcode = 0
1378
  else:
1379
    obuf.write("Unhandled exception: %s" % msg)
1380
  return retcode, obuf.getvalue().rstrip('\n')
1381

    
1382

    
1383
def GenericMain(commands, override=None, aliases=None):
1384
  """Generic main function for all the gnt-* commands.
1385

1386
  Arguments:
1387
    - commands: a dictionary with a special structure, see the design doc
1388
                for command line handling.
1389
    - override: if not None, we expect a dictionary with keys that will
1390
                override command line options; this can be used to pass
1391
                options from the scripts to generic functions
1392
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1393

1394
  """
1395
  # save the program name and the entire command line for later logging
1396
  if sys.argv:
1397
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1398
    if len(sys.argv) >= 2:
1399
      binary += " " + sys.argv[1]
1400
      old_cmdline = " ".join(sys.argv[2:])
1401
    else:
1402
      old_cmdline = ""
1403
  else:
1404
    binary = "<unknown program>"
1405
    old_cmdline = ""
1406

    
1407
  if aliases is None:
1408
    aliases = {}
1409

    
1410
  try:
1411
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1412
  except errors.ParameterError, err:
1413
    result, err_msg = FormatError(err)
1414
    ToStderr(err_msg)
1415
    return 1
1416

    
1417
  if func is None: # parse error
1418
    return 1
1419

    
1420
  if override is not None:
1421
    for key, val in override.iteritems():
1422
      setattr(options, key, val)
1423

    
1424
  utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1425
                     stderr_logging=True, program=binary)
1426

    
1427
  if old_cmdline:
1428
    logging.info("run with arguments '%s'", old_cmdline)
1429
  else:
1430
    logging.info("run with no arguments")
1431

    
1432
  try:
1433
    result = func(options, args)
1434
  except (errors.GenericError, luxi.ProtocolError,
1435
          JobSubmittedException), err:
1436
    result, err_msg = FormatError(err)
1437
    logging.exception("Error during command processing")
1438
    ToStderr(err_msg)
1439

    
1440
  return result
1441

    
1442

    
1443
def GenericInstanceCreate(mode, opts, args):
1444
  """Add an instance to the cluster via either creation or import.
1445

1446
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1447
  @param opts: the command line options selected by the user
1448
  @type args: list
1449
  @param args: should contain only one element, the new instance name
1450
  @rtype: int
1451
  @return: the desired exit code
1452

1453
  """
1454
  instance = args[0]
1455

    
1456
  (pnode, snode) = SplitNodeOption(opts.node)
1457

    
1458
  hypervisor = None
1459
  hvparams = {}
1460
  if opts.hypervisor:
1461
    hypervisor, hvparams = opts.hypervisor
1462

    
1463
  if opts.nics:
1464
    try:
1465
      nic_max = max(int(nidx[0]) + 1 for nidx in opts.nics)
1466
    except ValueError, err:
1467
      raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1468
    nics = [{}] * nic_max
1469
    for nidx, ndict in opts.nics:
1470
      nidx = int(nidx)
1471
      if not isinstance(ndict, dict):
1472
        msg = "Invalid nic/%d value: expected dict, got %s" % (nidx, ndict)
1473
        raise errors.OpPrereqError(msg)
1474
      nics[nidx] = ndict
1475
  elif opts.no_nics:
1476
    # no nics
1477
    nics = []
1478
  else:
1479
    # default of one nic, all auto
1480
    nics = [{}]
1481

    
1482
  if opts.disk_template == constants.DT_DISKLESS:
1483
    if opts.disks or opts.sd_size is not None:
1484
      raise errors.OpPrereqError("Diskless instance but disk"
1485
                                 " information passed")
1486
    disks = []
1487
  else:
1488
    if not opts.disks and not opts.sd_size:
1489
      raise errors.OpPrereqError("No disk information specified")
1490
    if opts.disks and opts.sd_size is not None:
1491
      raise errors.OpPrereqError("Please use either the '--disk' or"
1492
                                 " '-s' option")
1493
    if opts.sd_size is not None:
1494
      opts.disks = [(0, {"size": opts.sd_size})]
1495
    try:
1496
      disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
1497
    except ValueError, err:
1498
      raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
1499
    disks = [{}] * disk_max
1500
    for didx, ddict in opts.disks:
1501
      didx = int(didx)
1502
      if not isinstance(ddict, dict):
1503
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
1504
        raise errors.OpPrereqError(msg)
1505
      elif "size" not in ddict:
1506
        raise errors.OpPrereqError("Missing size for disk %d" % didx)
1507
      try:
1508
        ddict["size"] = utils.ParseUnit(ddict["size"])
1509
      except ValueError, err:
1510
        raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
1511
                                   (didx, err))
1512
      disks[didx] = ddict
1513

    
1514
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
1515
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
1516

    
1517
  if mode == constants.INSTANCE_CREATE:
1518
    start = opts.start
1519
    os_type = opts.os
1520
    src_node = None
1521
    src_path = None
1522
  elif mode == constants.INSTANCE_IMPORT:
1523
    start = False
1524
    os_type = None
1525
    src_node = opts.src_node
1526
    src_path = opts.src_dir
1527
  else:
1528
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
1529

    
1530
  op = opcodes.OpCreateInstance(instance_name=instance,
1531
                                disks=disks,
1532
                                disk_template=opts.disk_template,
1533
                                nics=nics,
1534
                                pnode=pnode, snode=snode,
1535
                                ip_check=opts.ip_check,
1536
                                name_check=opts.name_check,
1537
                                wait_for_sync=opts.wait_for_sync,
1538
                                file_storage_dir=opts.file_storage_dir,
1539
                                file_driver=opts.file_driver,
1540
                                iallocator=opts.iallocator,
1541
                                hypervisor=hypervisor,
1542
                                hvparams=hvparams,
1543
                                beparams=opts.beparams,
1544
                                mode=mode,
1545
                                start=start,
1546
                                os_type=os_type,
1547
                                src_node=src_node,
1548
                                src_path=src_path)
1549

    
1550
  SubmitOrSend(op, opts)
1551
  return 0
1552

    
1553

    
1554
class _RunWhileClusterStoppedHelper:
1555
  """Helper class for L{RunWhileClusterStopped} to simplify state management
1556

1557
  """
1558
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
1559
    """Initializes this class.
1560

1561
    @type feedback_fn: callable
1562
    @param feedback_fn: Feedback function
1563
    @type cluster_name: string
1564
    @param cluster_name: Cluster name
1565
    @type master_node: string
1566
    @param master_node Master node name
1567
    @type online_nodes: list
1568
    @param online_nodes: List of names of online nodes
1569

1570
    """
1571
    self.feedback_fn = feedback_fn
1572
    self.cluster_name = cluster_name
1573
    self.master_node = master_node
1574
    self.online_nodes = online_nodes
1575

    
1576
    self.ssh = ssh.SshRunner(self.cluster_name)
1577

    
1578
    self.nonmaster_nodes = [name for name in online_nodes
1579
                            if name != master_node]
1580

    
1581
    assert self.master_node not in self.nonmaster_nodes
1582

    
1583
  def _RunCmd(self, node_name, cmd):
1584
    """Runs a command on the local or a remote machine.
1585

1586
    @type node_name: string
1587
    @param node_name: Machine name
1588
    @type cmd: list
1589
    @param cmd: Command
1590

1591
    """
1592
    if node_name is None or node_name == self.master_node:
1593
      # No need to use SSH
1594
      result = utils.RunCmd(cmd)
1595
    else:
1596
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
1597

    
1598
    if result.failed:
1599
      errmsg = ["Failed to run command %s" % result.cmd]
1600
      if node_name:
1601
        errmsg.append("on node %s" % node_name)
1602
      errmsg.append(": exitcode %s and error %s" %
1603
                    (result.exit_code, result.output))
1604
      raise errors.OpExecError(" ".join(errmsg))
1605

    
1606
  def Call(self, fn, *args):
1607
    """Call function while all daemons are stopped.
1608

1609
    @type fn: callable
1610
    @param fn: Function to be called
1611

1612
    """
1613
    # Pause watcher by acquiring an exclusive lock on watcher state file
1614
    self.feedback_fn("Blocking watcher")
1615
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
1616
    try:
1617
      # TODO: Currently, this just blocks. There's no timeout.
1618
      # TODO: Should it be a shared lock?
1619
      watcher_block.Exclusive(blocking=True)
1620

    
1621
      # Stop master daemons, so that no new jobs can come in and all running
1622
      # ones are finished
1623
      self.feedback_fn("Stopping master daemons")
1624
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
1625
      try:
1626
        # Stop daemons on all nodes
1627
        for node_name in self.online_nodes:
1628
          self.feedback_fn("Stopping daemons on %s" % node_name)
1629
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
1630

    
1631
        # All daemons are shut down now
1632
        try:
1633
          return fn(self, *args)
1634
        except Exception:
1635
          logging.exception("Caught exception")
1636
          raise
1637
      finally:
1638
        # Start cluster again, master node last
1639
        for node_name in self.nonmaster_nodes + [self.master_node]:
1640
          self.feedback_fn("Starting daemons on %s" % node_name)
1641
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
1642
    finally:
1643
      # Resume watcher
1644
      watcher_block.Close()
1645

    
1646

    
1647
def RunWhileClusterStopped(feedback_fn, fn, *args):
1648
  """Calls a function while all cluster daemons are stopped.
1649

1650
  @type feedback_fn: callable
1651
  @param feedback_fn: Feedback function
1652
  @type fn: callable
1653
  @param fn: Function to be called when daemons are stopped
1654

1655
  """
1656
  feedback_fn("Gathering cluster information")
1657

    
1658
  # This ensures we're running on the master daemon
1659
  cl = GetClient()
1660

    
1661
  (cluster_name, master_node) = \
1662
    cl.QueryConfigValues(["cluster_name", "master_node"])
1663

    
1664
  online_nodes = GetOnlineNodes([], cl=cl)
1665

    
1666
  # Don't keep a reference to the client. The master daemon will go away.
1667
  del cl
1668

    
1669
  assert master_node in online_nodes
1670

    
1671
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
1672
                                       online_nodes).Call(fn, *args)
1673

    
1674

    
1675
def GenerateTable(headers, fields, separator, data,
1676
                  numfields=None, unitfields=None,
1677
                  units=None):
1678
  """Prints a table with headers and different fields.
1679

1680
  @type headers: dict
1681
  @param headers: dictionary mapping field names to headers for
1682
      the table
1683
  @type fields: list
1684
  @param fields: the field names corresponding to each row in
1685
      the data field
1686
  @param separator: the separator to be used; if this is None,
1687
      the default 'smart' algorithm is used which computes optimal
1688
      field width, otherwise just the separator is used between
1689
      each field
1690
  @type data: list
1691
  @param data: a list of lists, each sublist being one row to be output
1692
  @type numfields: list
1693
  @param numfields: a list with the fields that hold numeric
1694
      values and thus should be right-aligned
1695
  @type unitfields: list
1696
  @param unitfields: a list with the fields that hold numeric
1697
      values that should be formatted with the units field
1698
  @type units: string or None
1699
  @param units: the units we should use for formatting, or None for
1700
      automatic choice (human-readable for non-separator usage, otherwise
1701
      megabytes); this is a one-letter string
1702

1703
  """
1704
  if units is None:
1705
    if separator:
1706
      units = "m"
1707
    else:
1708
      units = "h"
1709

    
1710
  if numfields is None:
1711
    numfields = []
1712
  if unitfields is None:
1713
    unitfields = []
1714

    
1715
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
1716
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
1717

    
1718
  format_fields = []
1719
  for field in fields:
1720
    if headers and field not in headers:
1721
      # TODO: handle better unknown fields (either revert to old
1722
      # style of raising exception, or deal more intelligently with
1723
      # variable fields)
1724
      headers[field] = field
1725
    if separator is not None:
1726
      format_fields.append("%s")
1727
    elif numfields.Matches(field):
1728
      format_fields.append("%*s")
1729
    else:
1730
      format_fields.append("%-*s")
1731

    
1732
  if separator is None:
1733
    mlens = [0 for name in fields]
1734
    format = ' '.join(format_fields)
1735
  else:
1736
    format = separator.replace("%", "%%").join(format_fields)
1737

    
1738
  for row in data:
1739
    if row is None:
1740
      continue
1741
    for idx, val in enumerate(row):
1742
      if unitfields.Matches(fields[idx]):
1743
        try:
1744
          val = int(val)
1745
        except (TypeError, ValueError):
1746
          pass
1747
        else:
1748
          val = row[idx] = utils.FormatUnit(val, units)
1749
      val = row[idx] = str(val)
1750
      if separator is None:
1751
        mlens[idx] = max(mlens[idx], len(val))
1752

    
1753
  result = []
1754
  if headers:
1755
    args = []
1756
    for idx, name in enumerate(fields):
1757
      hdr = headers[name]
1758
      if separator is None:
1759
        mlens[idx] = max(mlens[idx], len(hdr))
1760
        args.append(mlens[idx])
1761
      args.append(hdr)
1762
    result.append(format % tuple(args))
1763

    
1764
  if separator is None:
1765
    assert len(mlens) == len(fields)
1766

    
1767
    if fields and not numfields.Matches(fields[-1]):
1768
      mlens[-1] = 0
1769

    
1770
  for line in data:
1771
    args = []
1772
    if line is None:
1773
      line = ['-' for _ in fields]
1774
    for idx in range(len(fields)):
1775
      if separator is None:
1776
        args.append(mlens[idx])
1777
      args.append(line[idx])
1778
    result.append(format % tuple(args))
1779

    
1780
  return result
1781

    
1782

    
1783
def FormatTimestamp(ts):
1784
  """Formats a given timestamp.
1785

1786
  @type ts: timestamp
1787
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
1788

1789
  @rtype: string
1790
  @return: a string with the formatted timestamp
1791

1792
  """
1793
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
1794
    return '?'
1795
  sec, usec = ts
1796
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
1797

    
1798

    
1799
def ParseTimespec(value):
1800
  """Parse a time specification.
1801

1802
  The following suffixed will be recognized:
1803

1804
    - s: seconds
1805
    - m: minutes
1806
    - h: hours
1807
    - d: day
1808
    - w: weeks
1809

1810
  Without any suffix, the value will be taken to be in seconds.
1811

1812
  """
1813
  value = str(value)
1814
  if not value:
1815
    raise errors.OpPrereqError("Empty time specification passed")
1816
  suffix_map = {
1817
    's': 1,
1818
    'm': 60,
1819
    'h': 3600,
1820
    'd': 86400,
1821
    'w': 604800,
1822
    }
1823
  if value[-1] not in suffix_map:
1824
    try:
1825
      value = int(value)
1826
    except (TypeError, ValueError):
1827
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
1828
  else:
1829
    multiplier = suffix_map[value[-1]]
1830
    value = value[:-1]
1831
    if not value: # no data left after stripping the suffix
1832
      raise errors.OpPrereqError("Invalid time specification (only"
1833
                                 " suffix passed)")
1834
    try:
1835
      value = int(value) * multiplier
1836
    except (TypeError, ValueError):
1837
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
1838
  return value
1839

    
1840

    
1841
def GetOnlineNodes(nodes, cl=None, nowarn=False):
1842
  """Returns the names of online nodes.
1843

1844
  This function will also log a warning on stderr with the names of
1845
  the online nodes.
1846

1847
  @param nodes: if not empty, use only this subset of nodes (minus the
1848
      offline ones)
1849
  @param cl: if not None, luxi client to use
1850
  @type nowarn: boolean
1851
  @param nowarn: by default, this function will output a note with the
1852
      offline nodes that are skipped; if this parameter is True the
1853
      note is not displayed
1854

1855
  """
1856
  if cl is None:
1857
    cl = GetClient()
1858

    
1859
  result = cl.QueryNodes(names=nodes, fields=["name", "offline"],
1860
                         use_locking=False)
1861
  offline = [row[0] for row in result if row[1]]
1862
  if offline and not nowarn:
1863
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
1864
  return [row[0] for row in result if not row[1]]
1865

    
1866

    
1867
def _ToStream(stream, txt, *args):
1868
  """Write a message to a stream, bypassing the logging system
1869

1870
  @type stream: file object
1871
  @param stream: the file to which we should write
1872
  @type txt: str
1873
  @param txt: the message
1874

1875
  """
1876
  if args:
1877
    args = tuple(args)
1878
    stream.write(txt % args)
1879
  else:
1880
    stream.write(txt)
1881
  stream.write('\n')
1882
  stream.flush()
1883

    
1884

    
1885
def ToStdout(txt, *args):
1886
  """Write a message to stdout only, bypassing the logging system
1887

1888
  This is just a wrapper over _ToStream.
1889

1890
  @type txt: str
1891
  @param txt: the message
1892

1893
  """
1894
  _ToStream(sys.stdout, txt, *args)
1895

    
1896

    
1897
def ToStderr(txt, *args):
1898
  """Write a message to stderr only, bypassing the logging system
1899

1900
  This is just a wrapper over _ToStream.
1901

1902
  @type txt: str
1903
  @param txt: the message
1904

1905
  """
1906
  _ToStream(sys.stderr, txt, *args)
1907

    
1908

    
1909
class JobExecutor(object):
1910
  """Class which manages the submission and execution of multiple jobs.
1911

1912
  Note that instances of this class should not be reused between
1913
  GetResults() calls.
1914

1915
  """
1916
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
1917
    self.queue = []
1918
    if cl is None:
1919
      cl = GetClient()
1920
    self.cl = cl
1921
    self.verbose = verbose
1922
    self.jobs = []
1923
    self.opts = opts
1924
    self.feedback_fn = feedback_fn
1925

    
1926
  def QueueJob(self, name, *ops):
1927
    """Record a job for later submit.
1928

1929
    @type name: string
1930
    @param name: a description of the job, will be used in WaitJobSet
1931
    """
1932
    SetGenericOpcodeOpts(ops, self.opts)
1933
    self.queue.append((name, ops))
1934

    
1935
  def SubmitPending(self):
1936
    """Submit all pending jobs.
1937

1938
    """
1939
    results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
1940
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
1941
                                                            self.queue)):
1942
      self.jobs.append((idx, status, data, name))
1943

    
1944
  def _ChooseJob(self):
1945
    """Choose a non-waiting/queued job to poll next.
1946

1947
    """
1948
    assert self.jobs, "_ChooseJob called with empty job list"
1949

    
1950
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
1951
    assert result
1952

    
1953
    for job_data, status in zip(self.jobs, result):
1954
      if status[0] in (constants.JOB_STATUS_QUEUED,
1955
                    constants.JOB_STATUS_WAITLOCK,
1956
                    constants.JOB_STATUS_CANCELING):
1957
        # job is still waiting
1958
        continue
1959
      # good candidate found
1960
      self.jobs.remove(job_data)
1961
      return job_data
1962

    
1963
    # no job found
1964
    return self.jobs.pop(0)
1965

    
1966
  def GetResults(self):
1967
    """Wait for and return the results of all jobs.
1968

1969
    @rtype: list
1970
    @return: list of tuples (success, job results), in the same order
1971
        as the submitted jobs; if a job has failed, instead of the result
1972
        there will be the error message
1973

1974
    """
1975
    if not self.jobs:
1976
      self.SubmitPending()
1977
    results = []
1978
    if self.verbose:
1979
      ok_jobs = [row[2] for row in self.jobs if row[1]]
1980
      if ok_jobs:
1981
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
1982

    
1983
    # first, remove any non-submitted jobs
1984
    self.jobs, failures = utils.partition(self.jobs, lambda x: x[1])
1985
    for idx, _, jid, name in failures:
1986
      ToStderr("Failed to submit job for %s: %s", name, jid)
1987
      results.append((idx, False, jid))
1988

    
1989
    while self.jobs:
1990
      (idx, _, jid, name) = self._ChooseJob()
1991
      ToStdout("Waiting for job %s for %s...", jid, name)
1992
      try:
1993
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
1994
        success = True
1995
      except (errors.GenericError, luxi.ProtocolError), err:
1996
        _, job_result = FormatError(err)
1997
        success = False
1998
        # the error message will always be shown, verbose or not
1999
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2000

    
2001
      results.append((idx, success, job_result))
2002

    
2003
    # sort based on the index, then drop it
2004
    results.sort()
2005
    results = [i[1:] for i in results]
2006

    
2007
    return results
2008

    
2009
  def WaitOrShow(self, wait):
2010
    """Wait for job results or only print the job IDs.
2011

2012
    @type wait: boolean
2013
    @param wait: whether to wait or not
2014

2015
    """
2016
    if wait:
2017
      return self.GetResults()
2018
    else:
2019
      if not self.jobs:
2020
        self.SubmitPending()
2021
      for status, result, name in self.jobs:
2022
        if status:
2023
          ToStdout("%s: %s", result, name)
2024
        else:
2025
          ToStderr("Failure for %s: %s", name, result)