Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ c04bc777

History | View | Annotate | Download (73.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41

    
42
from optparse import (OptionParser, TitledHelpFormatter,
43
                      Option, OptionValueError)
44

    
45

    
46
__all__ = [
47
  # Command line options
48
  "ADD_UIDS_OPT",
49
  "ALLOCATABLE_OPT",
50
  "ALL_OPT",
51
  "AUTO_PROMOTE_OPT",
52
  "AUTO_REPLACE_OPT",
53
  "BACKEND_OPT",
54
  "CLEANUP_OPT",
55
  "CONFIRM_OPT",
56
  "CP_SIZE_OPT",
57
  "DEBUG_OPT",
58
  "DEBUG_SIMERR_OPT",
59
  "DISKIDX_OPT",
60
  "DISK_OPT",
61
  "DISK_TEMPLATE_OPT",
62
  "DRAINED_OPT",
63
  "EARLY_RELEASE_OPT",
64
  "ENABLED_HV_OPT",
65
  "ERROR_CODES_OPT",
66
  "FIELDS_OPT",
67
  "FILESTORE_DIR_OPT",
68
  "FILESTORE_DRIVER_OPT",
69
  "FORCE_OPT",
70
  "FORCE_VARIANT_OPT",
71
  "GLOBAL_FILEDIR_OPT",
72
  "HVLIST_OPT",
73
  "HVOPTS_OPT",
74
  "HYPERVISOR_OPT",
75
  "IALLOCATOR_OPT",
76
  "IDENTIFY_DEFAULTS_OPT",
77
  "IGNORE_CONSIST_OPT",
78
  "IGNORE_FAILURES_OPT",
79
  "IGNORE_SECONDARIES_OPT",
80
  "IGNORE_SIZE_OPT",
81
  "MAC_PREFIX_OPT",
82
  "MAINTAIN_NODE_HEALTH_OPT",
83
  "MASTER_NETDEV_OPT",
84
  "MC_OPT",
85
  "NET_OPT",
86
  "NEW_CLUSTER_CERT_OPT",
87
  "NEW_CONFD_HMAC_KEY_OPT",
88
  "NEW_RAPI_CERT_OPT",
89
  "NEW_SECONDARY_OPT",
90
  "NIC_PARAMS_OPT",
91
  "NODE_LIST_OPT",
92
  "NODE_PLACEMENT_OPT",
93
  "NOHDR_OPT",
94
  "NOIPCHECK_OPT",
95
  "NO_INSTALL_OPT",
96
  "NONAMECHECK_OPT",
97
  "NOLVM_STORAGE_OPT",
98
  "NOMODIFY_ETCHOSTS_OPT",
99
  "NOMODIFY_SSH_SETUP_OPT",
100
  "NONICS_OPT",
101
  "NONLIVE_OPT",
102
  "NONPLUS1_OPT",
103
  "NOSHUTDOWN_OPT",
104
  "NOSTART_OPT",
105
  "NOSSH_KEYCHECK_OPT",
106
  "NOVOTING_OPT",
107
  "NWSYNC_OPT",
108
  "ON_PRIMARY_OPT",
109
  "ON_SECONDARY_OPT",
110
  "OFFLINE_OPT",
111
  "OS_OPT",
112
  "OS_SIZE_OPT",
113
  "RAPI_CERT_OPT",
114
  "READD_OPT",
115
  "REBOOT_TYPE_OPT",
116
  "REMOVE_UIDS_OPT",
117
  "ROMAN_OPT",
118
  "SECONDARY_IP_OPT",
119
  "SELECT_OS_OPT",
120
  "SEP_OPT",
121
  "SHOWCMD_OPT",
122
  "SHUTDOWN_TIMEOUT_OPT",
123
  "SINGLE_NODE_OPT",
124
  "SRC_DIR_OPT",
125
  "SRC_NODE_OPT",
126
  "SUBMIT_OPT",
127
  "STATIC_OPT",
128
  "SYNC_OPT",
129
  "TAG_SRC_OPT",
130
  "TIMEOUT_OPT",
131
  "UIDPOOL_OPT",
132
  "USEUNITS_OPT",
133
  "USE_REPL_NET_OPT",
134
  "VERBOSE_OPT",
135
  "VG_NAME_OPT",
136
  "YES_DOIT_OPT",
137
  # Generic functions for CLI programs
138
  "GenericMain",
139
  "GenericInstanceCreate",
140
  "GetClient",
141
  "GetOnlineNodes",
142
  "JobExecutor",
143
  "JobSubmittedException",
144
  "ParseTimespec",
145
  "RunWhileClusterStopped",
146
  "SubmitOpCode",
147
  "SubmitOrSend",
148
  "UsesRPC",
149
  # Formatting functions
150
  "ToStderr", "ToStdout",
151
  "FormatError",
152
  "GenerateTable",
153
  "AskUser",
154
  "FormatTimestamp",
155
  # Tags functions
156
  "ListTags",
157
  "AddTags",
158
  "RemoveTags",
159
  # command line options support infrastructure
160
  "ARGS_MANY_INSTANCES",
161
  "ARGS_MANY_NODES",
162
  "ARGS_NONE",
163
  "ARGS_ONE_INSTANCE",
164
  "ARGS_ONE_NODE",
165
  "ARGS_ONE_OS",
166
  "ArgChoice",
167
  "ArgCommand",
168
  "ArgFile",
169
  "ArgHost",
170
  "ArgInstance",
171
  "ArgJobId",
172
  "ArgNode",
173
  "ArgOs",
174
  "ArgSuggest",
175
  "ArgUnknown",
176
  "OPT_COMPL_INST_ADD_NODES",
177
  "OPT_COMPL_MANY_NODES",
178
  "OPT_COMPL_ONE_IALLOCATOR",
179
  "OPT_COMPL_ONE_INSTANCE",
180
  "OPT_COMPL_ONE_NODE",
181
  "OPT_COMPL_ONE_OS",
182
  "cli_option",
183
  "SplitNodeOption",
184
  "CalculateOSNames",
185
  ]
186

    
187
NO_PREFIX = "no_"
188
UN_PREFIX = "-"
189

    
190

    
191
class _Argument:
192
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
193
    self.min = min
194
    self.max = max
195

    
196
  def __repr__(self):
197
    return ("<%s min=%s max=%s>" %
198
            (self.__class__.__name__, self.min, self.max))
199

    
200

    
201
class ArgSuggest(_Argument):
202
  """Suggesting argument.
203

204
  Value can be any of the ones passed to the constructor.
205

206
  """
207
  # pylint: disable-msg=W0622
208
  def __init__(self, min=0, max=None, choices=None):
209
    _Argument.__init__(self, min=min, max=max)
210
    self.choices = choices
211

    
212
  def __repr__(self):
213
    return ("<%s min=%s max=%s choices=%r>" %
214
            (self.__class__.__name__, self.min, self.max, self.choices))
215

    
216

    
217
class ArgChoice(ArgSuggest):
218
  """Choice argument.
219

220
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
221
  but value must be one of the choices.
222

223
  """
224

    
225

    
226
class ArgUnknown(_Argument):
227
  """Unknown argument to program (e.g. determined at runtime).
228

229
  """
230

    
231

    
232
class ArgInstance(_Argument):
233
  """Instances argument.
234

235
  """
236

    
237

    
238
class ArgNode(_Argument):
239
  """Node argument.
240

241
  """
242

    
243
class ArgJobId(_Argument):
244
  """Job ID argument.
245

246
  """
247

    
248

    
249
class ArgFile(_Argument):
250
  """File path argument.
251

252
  """
253

    
254

    
255
class ArgCommand(_Argument):
256
  """Command argument.
257

258
  """
259

    
260

    
261
class ArgHost(_Argument):
262
  """Host argument.
263

264
  """
265

    
266

    
267
class ArgOs(_Argument):
268
  """OS argument.
269

270
  """
271

    
272

    
273
ARGS_NONE = []
274
ARGS_MANY_INSTANCES = [ArgInstance()]
275
ARGS_MANY_NODES = [ArgNode()]
276
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
277
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
278
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
279

    
280

    
281
def _ExtractTagsObject(opts, args):
282
  """Extract the tag type object.
283

284
  Note that this function will modify its args parameter.
285

286
  """
287
  if not hasattr(opts, "tag_type"):
288
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
289
  kind = opts.tag_type
290
  if kind == constants.TAG_CLUSTER:
291
    retval = kind, kind
292
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
293
    if not args:
294
      raise errors.OpPrereqError("no arguments passed to the command")
295
    name = args.pop(0)
296
    retval = kind, name
297
  else:
298
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
299
  return retval
300

    
301

    
302
def _ExtendTags(opts, args):
303
  """Extend the args if a source file has been given.
304

305
  This function will extend the tags with the contents of the file
306
  passed in the 'tags_source' attribute of the opts parameter. A file
307
  named '-' will be replaced by stdin.
308

309
  """
310
  fname = opts.tags_source
311
  if fname is None:
312
    return
313
  if fname == "-":
314
    new_fh = sys.stdin
315
  else:
316
    new_fh = open(fname, "r")
317
  new_data = []
318
  try:
319
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
320
    # because of python bug 1633941
321
    while True:
322
      line = new_fh.readline()
323
      if not line:
324
        break
325
      new_data.append(line.strip())
326
  finally:
327
    new_fh.close()
328
  args.extend(new_data)
329

    
330

    
331
def ListTags(opts, args):
332
  """List the tags on a given object.
333

334
  This is a generic implementation that knows how to deal with all
335
  three cases of tag objects (cluster, node, instance). The opts
336
  argument is expected to contain a tag_type field denoting what
337
  object type we work on.
338

339
  """
340
  kind, name = _ExtractTagsObject(opts, args)
341
  cl = GetClient()
342
  result = cl.QueryTags(kind, name)
343
  result = list(result)
344
  result.sort()
345
  for tag in result:
346
    ToStdout(tag)
347

    
348

    
349
def AddTags(opts, args):
350
  """Add tags on a given object.
351

352
  This is a generic implementation that knows how to deal with all
353
  three cases of tag objects (cluster, node, instance). The opts
354
  argument is expected to contain a tag_type field denoting what
355
  object type we work on.
356

357
  """
358
  kind, name = _ExtractTagsObject(opts, args)
359
  _ExtendTags(opts, args)
360
  if not args:
361
    raise errors.OpPrereqError("No tags to be added")
362
  op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
363
  SubmitOpCode(op)
364

    
365

    
366
def RemoveTags(opts, args):
367
  """Remove tags from a given object.
368

369
  This is a generic implementation that knows how to deal with all
370
  three cases of tag objects (cluster, node, instance). The opts
371
  argument is expected to contain a tag_type field denoting what
372
  object type we work on.
373

374
  """
375
  kind, name = _ExtractTagsObject(opts, args)
376
  _ExtendTags(opts, args)
377
  if not args:
378
    raise errors.OpPrereqError("No tags to be removed")
379
  op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
380
  SubmitOpCode(op)
381

    
382

    
383
def check_unit(option, opt, value): # pylint: disable-msg=W0613
384
  """OptParsers custom converter for units.
385

386
  """
387
  try:
388
    return utils.ParseUnit(value)
389
  except errors.UnitParseError, err:
390
    raise OptionValueError("option %s: %s" % (opt, err))
391

    
392

    
393
def _SplitKeyVal(opt, data):
394
  """Convert a KeyVal string into a dict.
395

396
  This function will convert a key=val[,...] string into a dict. Empty
397
  values will be converted specially: keys which have the prefix 'no_'
398
  will have the value=False and the prefix stripped, the others will
399
  have value=True.
400

401
  @type opt: string
402
  @param opt: a string holding the option name for which we process the
403
      data, used in building error messages
404
  @type data: string
405
  @param data: a string of the format key=val,key=val,...
406
  @rtype: dict
407
  @return: {key=val, key=val}
408
  @raises errors.ParameterError: if there are duplicate keys
409

410
  """
411
  kv_dict = {}
412
  if data:
413
    for elem in utils.UnescapeAndSplit(data, sep=","):
414
      if "=" in elem:
415
        key, val = elem.split("=", 1)
416
      else:
417
        if elem.startswith(NO_PREFIX):
418
          key, val = elem[len(NO_PREFIX):], False
419
        elif elem.startswith(UN_PREFIX):
420
          key, val = elem[len(UN_PREFIX):], None
421
        else:
422
          key, val = elem, True
423
      if key in kv_dict:
424
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
425
                                    (key, opt))
426
      kv_dict[key] = val
427
  return kv_dict
428

    
429

    
430
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
431
  """Custom parser for ident:key=val,key=val options.
432

433
  This will store the parsed values as a tuple (ident, {key: val}). As such,
434
  multiple uses of this option via action=append is possible.
435

436
  """
437
  if ":" not in value:
438
    ident, rest = value, ''
439
  else:
440
    ident, rest = value.split(":", 1)
441

    
442
  if ident.startswith(NO_PREFIX):
443
    if rest:
444
      msg = "Cannot pass options when removing parameter groups: %s" % value
445
      raise errors.ParameterError(msg)
446
    retval = (ident[len(NO_PREFIX):], False)
447
  elif ident.startswith(UN_PREFIX):
448
    if rest:
449
      msg = "Cannot pass options when removing parameter groups: %s" % value
450
      raise errors.ParameterError(msg)
451
    retval = (ident[len(UN_PREFIX):], None)
452
  else:
453
    kv_dict = _SplitKeyVal(opt, rest)
454
    retval = (ident, kv_dict)
455
  return retval
456

    
457

    
458
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
459
  """Custom parser class for key=val,key=val options.
460

461
  This will store the parsed values as a dict {key: val}.
462

463
  """
464
  return _SplitKeyVal(opt, value)
465

    
466

    
467
def check_bool(option, opt, value): # pylint: disable-msg=W0613
468
  """Custom parser for yes/no options.
469

470
  This will store the parsed value as either True or False.
471

472
  """
473
  value = value.lower()
474
  if value == constants.VALUE_FALSE or value == "no":
475
    return False
476
  elif value == constants.VALUE_TRUE or value == "yes":
477
    return True
478
  else:
479
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
480

    
481

    
482
# completion_suggestion is normally a list. Using numeric values not evaluating
483
# to False for dynamic completion.
484
(OPT_COMPL_MANY_NODES,
485
 OPT_COMPL_ONE_NODE,
486
 OPT_COMPL_ONE_INSTANCE,
487
 OPT_COMPL_ONE_OS,
488
 OPT_COMPL_ONE_IALLOCATOR,
489
 OPT_COMPL_INST_ADD_NODES) = range(100, 106)
490

    
491
OPT_COMPL_ALL = frozenset([
492
  OPT_COMPL_MANY_NODES,
493
  OPT_COMPL_ONE_NODE,
494
  OPT_COMPL_ONE_INSTANCE,
495
  OPT_COMPL_ONE_OS,
496
  OPT_COMPL_ONE_IALLOCATOR,
497
  OPT_COMPL_INST_ADD_NODES,
498
  ])
499

    
500

    
501
class CliOption(Option):
502
  """Custom option class for optparse.
503

504
  """
505
  ATTRS = Option.ATTRS + [
506
    "completion_suggest",
507
    ]
508
  TYPES = Option.TYPES + (
509
    "identkeyval",
510
    "keyval",
511
    "unit",
512
    "bool",
513
    )
514
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
515
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
516
  TYPE_CHECKER["keyval"] = check_key_val
517
  TYPE_CHECKER["unit"] = check_unit
518
  TYPE_CHECKER["bool"] = check_bool
519

    
520

    
521
# optparse.py sets make_option, so we do it for our own option class, too
522
cli_option = CliOption
523

    
524

    
525
_YORNO = "yes|no"
526

    
527
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
528
                       help="Increase debugging level")
529

    
530
NOHDR_OPT = cli_option("--no-headers", default=False,
531
                       action="store_true", dest="no_headers",
532
                       help="Don't display column headers")
533

    
534
SEP_OPT = cli_option("--separator", default=None,
535
                     action="store", dest="separator",
536
                     help=("Separator between output fields"
537
                           " (defaults to one space)"))
538

    
539
USEUNITS_OPT = cli_option("--units", default=None,
540
                          dest="units", choices=('h', 'm', 'g', 't'),
541
                          help="Specify units for output (one of hmgt)")
542

    
543
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
544
                        type="string", metavar="FIELDS",
545
                        help="Comma separated list of output fields")
546

    
547
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
548
                       default=False, help="Force the operation")
549

    
550
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
551
                         default=False, help="Do not require confirmation")
552

    
553
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
554
                         default=None, help="File with tag names")
555

    
556
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
557
                        default=False, action="store_true",
558
                        help=("Submit the job and return the job ID, but"
559
                              " don't wait for the job to finish"))
560

    
561
SYNC_OPT = cli_option("--sync", dest="do_locking",
562
                      default=False, action="store_true",
563
                      help=("Grab locks while doing the queries"
564
                            " in order to ensure more consistent results"))
565

    
566
_DRY_RUN_OPT = cli_option("--dry-run", default=False,
567
                          action="store_true",
568
                          help=("Do not execute the operation, just run the"
569
                                " check steps and verify it it could be"
570
                                " executed"))
571

    
572
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
573
                         action="store_true",
574
                         help="Increase the verbosity of the operation")
575

    
576
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
577
                              action="store_true", dest="simulate_errors",
578
                              help="Debugging option that makes the operation"
579
                              " treat most runtime checks as failed")
580

    
581
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
582
                        default=True, action="store_false",
583
                        help="Don't wait for sync (DANGEROUS!)")
584

    
585
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
586
                               help="Custom disk setup (diskless, file,"
587
                               " plain or drbd)",
588
                               default=None, metavar="TEMPL",
589
                               choices=list(constants.DISK_TEMPLATES))
590

    
591
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
592
                        help="Do not create any network cards for"
593
                        " the instance")
594

    
595
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
596
                               help="Relative path under default cluster-wide"
597
                               " file storage dir to store file-based disks",
598
                               default=None, metavar="<DIR>")
599

    
600
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
601
                                  help="Driver to use for image files",
602
                                  default="loop", metavar="<DRIVER>",
603
                                  choices=list(constants.FILE_DRIVER))
604

    
605
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
606
                            help="Select nodes for the instance automatically"
607
                            " using the <NAME> iallocator plugin",
608
                            default=None, type="string",
609
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
610

    
611
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
612
                    metavar="<os>",
613
                    completion_suggest=OPT_COMPL_ONE_OS)
614

    
615
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
616
                               action="store_true", default=False,
617
                               help="Force an unknown variant")
618

    
619
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
620
                            action="store_true", default=False,
621
                            help="Do not install the OS (will"
622
                            " enable no-start)")
623

    
624
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
625
                         type="keyval", default={},
626
                         help="Backend parameters")
627

    
628
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
629
                         default={}, dest="hvparams",
630
                         help="Hypervisor parameters")
631

    
632
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
633
                            help="Hypervisor and hypervisor options, in the"
634
                            " format hypervisor:option=value,option=value,...",
635
                            default=None, type="identkeyval")
636

    
637
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
638
                        help="Hypervisor and hypervisor options, in the"
639
                        " format hypervisor:option=value,option=value,...",
640
                        default=[], action="append", type="identkeyval")
641

    
642
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
643
                           action="store_false",
644
                           help="Don't check that the instance's IP"
645
                           " is alive")
646

    
647
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
648
                             default=True, action="store_false",
649
                             help="Don't check that the instance's name"
650
                             " is resolvable")
651

    
652
NET_OPT = cli_option("--net",
653
                     help="NIC parameters", default=[],
654
                     dest="nics", action="append", type="identkeyval")
655

    
656
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
657
                      dest="disks", action="append", type="identkeyval")
658

    
659
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
660
                         help="Comma-separated list of disks"
661
                         " indices to act on (e.g. 0,2) (optional,"
662
                         " defaults to all disks)")
663

    
664
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
665
                         help="Enforces a single-disk configuration using the"
666
                         " given disk size, in MiB unless a suffix is used",
667
                         default=None, type="unit", metavar="<size>")
668

    
669
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
670
                                dest="ignore_consistency",
671
                                action="store_true", default=False,
672
                                help="Ignore the consistency of the disks on"
673
                                " the secondary")
674

    
675
NONLIVE_OPT = cli_option("--non-live", dest="live",
676
                         default=True, action="store_false",
677
                         help="Do a non-live migration (this usually means"
678
                         " freeze the instance, save the state, transfer and"
679
                         " only then resume running on the secondary node)")
680

    
681
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
682
                                help="Target node and optional secondary node",
683
                                metavar="<pnode>[:<snode>]",
684
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
685

    
686
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
687
                           action="append", metavar="<node>",
688
                           help="Use only this node (can be used multiple"
689
                           " times, if not given defaults to all nodes)",
690
                           completion_suggest=OPT_COMPL_ONE_NODE)
691

    
692
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
693
                             metavar="<node>",
694
                             completion_suggest=OPT_COMPL_ONE_NODE)
695

    
696
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
697
                         action="store_false",
698
                         help="Don't start the instance after creation")
699

    
700
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
701
                         action="store_true", default=False,
702
                         help="Show command instead of executing it")
703

    
704
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
705
                         default=False, action="store_true",
706
                         help="Instead of performing the migration, try to"
707
                         " recover from a failed cleanup. This is safe"
708
                         " to run even if the instance is healthy, but it"
709
                         " will create extra replication traffic and "
710
                         " disrupt briefly the replication (like during the"
711
                         " migration")
712

    
713
STATIC_OPT = cli_option("-s", "--static", dest="static",
714
                        action="store_true", default=False,
715
                        help="Only show configuration data, not runtime data")
716

    
717
ALL_OPT = cli_option("--all", dest="show_all",
718
                     default=False, action="store_true",
719
                     help="Show info on all instances on the cluster."
720
                     " This can take a long time to run, use wisely")
721

    
722
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
723
                           action="store_true", default=False,
724
                           help="Interactive OS reinstall, lists available"
725
                           " OS templates for selection")
726

    
727
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
728
                                 action="store_true", default=False,
729
                                 help="Remove the instance from the cluster"
730
                                 " configuration even if there are failures"
731
                                 " during the removal process")
732

    
733
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
734
                               help="Specifies the new secondary node",
735
                               metavar="NODE", default=None,
736
                               completion_suggest=OPT_COMPL_ONE_NODE)
737

    
738
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
739
                            default=False, action="store_true",
740
                            help="Replace the disk(s) on the primary"
741
                            " node (only for the drbd template)")
742

    
743
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
744
                              default=False, action="store_true",
745
                              help="Replace the disk(s) on the secondary"
746
                              " node (only for the drbd template)")
747

    
748
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
749
                              default=False, action="store_true",
750
                              help="Lock all nodes and auto-promote as needed"
751
                              " to MC status")
752

    
753
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
754
                              default=False, action="store_true",
755
                              help="Automatically replace faulty disks"
756
                              " (only for the drbd template)")
757

    
758
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
759
                             default=False, action="store_true",
760
                             help="Ignore current recorded size"
761
                             " (useful for forcing activation when"
762
                             " the recorded size is wrong)")
763

    
764
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
765
                          metavar="<node>",
766
                          completion_suggest=OPT_COMPL_ONE_NODE)
767

    
768
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
769
                         metavar="<dir>")
770

    
771
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
772
                              help="Specify the secondary ip for the node",
773
                              metavar="ADDRESS", default=None)
774

    
775
READD_OPT = cli_option("--readd", dest="readd",
776
                       default=False, action="store_true",
777
                       help="Readd old node after replacing it")
778

    
779
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
780
                                default=True, action="store_false",
781
                                help="Disable SSH key fingerprint checking")
782

    
783

    
784
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
785
                    type="bool", default=None, metavar=_YORNO,
786
                    help="Set the master_candidate flag on the node")
787

    
788
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
789
                         type="bool", default=None,
790
                         help="Set the offline flag on the node")
791

    
792
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
793
                         type="bool", default=None,
794
                         help="Set the drained flag on the node")
795

    
796
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
797
                             type="bool", default=None, metavar=_YORNO,
798
                             help="Set the allocatable flag on a volume")
799

    
800
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
801
                               help="Disable support for lvm based instances"
802
                               " (cluster-wide)",
803
                               action="store_false", default=True)
804

    
805
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
806
                            dest="enabled_hypervisors",
807
                            help="Comma-separated list of hypervisors",
808
                            type="string", default=None)
809

    
810
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
811
                            type="keyval", default={},
812
                            help="NIC parameters")
813

    
814
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
815
                         dest="candidate_pool_size", type="int",
816
                         help="Set the candidate pool size")
817

    
818
VG_NAME_OPT = cli_option("-g", "--vg-name", dest="vg_name",
819
                         help="Enables LVM and specifies the volume group"
820
                         " name (cluster-wide) for disk allocation [xenvg]",
821
                         metavar="VG", default=None)
822

    
823
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
824
                          help="Destroy cluster", action="store_true")
825

    
826
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
827
                          help="Skip node agreement check (dangerous)",
828
                          action="store_true", default=False)
829

    
830
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
831
                            help="Specify the mac prefix for the instance IP"
832
                            " addresses, in the format XX:XX:XX",
833
                            metavar="PREFIX",
834
                            default=None)
835

    
836
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
837
                               help="Specify the node interface (cluster-wide)"
838
                               " on which the master IP address will be added "
839
                               " [%s]" % constants.DEFAULT_BRIDGE,
840
                               metavar="NETDEV",
841
                               default=constants.DEFAULT_BRIDGE)
842

    
843

    
844
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
845
                                help="Specify the default directory (cluster-"
846
                                "wide) for storing the file-based disks [%s]" %
847
                                constants.DEFAULT_FILE_STORAGE_DIR,
848
                                metavar="DIR",
849
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
850

    
851
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
852
                                   help="Don't modify /etc/hosts",
853
                                   action="store_false", default=True)
854

    
855
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
856
                                    help="Don't initialize SSH keys",
857
                                    action="store_false", default=True)
858

    
859
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
860
                             help="Enable parseable error messages",
861
                             action="store_true", default=False)
862

    
863
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
864
                          help="Skip N+1 memory redundancy tests",
865
                          action="store_true", default=False)
866

    
867
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
868
                             help="Type of reboot: soft/hard/full",
869
                             default=constants.INSTANCE_REBOOT_HARD,
870
                             metavar="<REBOOT>",
871
                             choices=list(constants.REBOOT_TYPES))
872

    
873
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
874
                                    dest="ignore_secondaries",
875
                                    default=False, action="store_true",
876
                                    help="Ignore errors from secondaries")
877

    
878
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
879
                            action="store_false", default=True,
880
                            help="Don't shutdown the instance (unsafe)")
881

    
882
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
883
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
884
                         help="Maximum time to wait")
885

    
886
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
887
                         dest="shutdown_timeout", type="int",
888
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
889
                         help="Maximum time to wait for instance shutdown")
890

    
891
EARLY_RELEASE_OPT = cli_option("--early-release",
892
                               dest="early_release", default=False,
893
                               action="store_true",
894
                               help="Release the locks on the secondary"
895
                               " node(s) early")
896

    
897
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
898
                                  dest="new_cluster_cert",
899
                                  default=False, action="store_true",
900
                                  help="Generate a new cluster certificate")
901

    
902
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
903
                           default=None,
904
                           help="File containing new RAPI certificate")
905

    
906
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
907
                               default=None, action="store_true",
908
                               help=("Generate a new self-signed RAPI"
909
                                     " certificate"))
910

    
911
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
912
                                    dest="new_confd_hmac_key",
913
                                    default=False, action="store_true",
914
                                    help=("Create a new HMAC key for %s" %
915
                                          constants.CONFD))
916

    
917
USE_REPL_NET_OPT = cli_option("--use-replication-network",
918
                              dest="use_replication_network",
919
                              help="Whether to use the replication network"
920
                              " for talking to the nodes",
921
                              action="store_true", default=False)
922

    
923
MAINTAIN_NODE_HEALTH_OPT = \
924
    cli_option("--maintain-node-health", dest="maintain_node_health",
925
               metavar=_YORNO, default=None, type="bool",
926
               help="Configure the cluster to automatically maintain node"
927
               " health, by shutting down unknown instances, shutting down"
928
               " unknown DRBD devices, etc.")
929

    
930
IDENTIFY_DEFAULTS_OPT = \
931
    cli_option("--identify-defaults", dest="identify_defaults",
932
               default=False, action="store_true",
933
               help="Identify which saved instance parameters are equal to"
934
               " the current cluster defaults and set them as such, instead"
935
               " of marking them as overridden")
936

    
937
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
938
                         action="store", dest="uid_pool",
939
                         help=("A list of user-ids or user-id"
940
                               " ranges separated by commas"))
941

    
942
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
943
                          action="store", dest="add_uids",
944
                          help=("A list of user-ids or user-id"
945
                                " ranges separated by commas, to be"
946
                                " added to the user-id pool"))
947

    
948
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
949
                             action="store", dest="remove_uids",
950
                             help=("A list of user-ids or user-id"
951
                                   " ranges separated by commas, to be"
952
                                   " removed from the user-id pool"))
953

    
954
ROMAN_OPT = cli_option("--roman",
955
                       dest="roman_integers", default=False,
956
                       action="store_true",
957
                       help="Use roman numbers for positive integers")
958

    
959

    
960

    
961
def _ParseArgs(argv, commands, aliases):
962
  """Parser for the command line arguments.
963

964
  This function parses the arguments and returns the function which
965
  must be executed together with its (modified) arguments.
966

967
  @param argv: the command line
968
  @param commands: dictionary with special contents, see the design
969
      doc for cmdline handling
970
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
971

972
  """
973
  if len(argv) == 0:
974
    binary = "<command>"
975
  else:
976
    binary = argv[0].split("/")[-1]
977

    
978
  if len(argv) > 1 and argv[1] == "--version":
979
    ToStdout("%s (ganeti) %s", binary, constants.RELEASE_VERSION)
980
    # Quit right away. That way we don't have to care about this special
981
    # argument. optparse.py does it the same.
982
    sys.exit(0)
983

    
984
  if len(argv) < 2 or not (argv[1] in commands or
985
                           argv[1] in aliases):
986
    # let's do a nice thing
987
    sortedcmds = commands.keys()
988
    sortedcmds.sort()
989

    
990
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
991
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
992
    ToStdout("")
993

    
994
    # compute the max line length for cmd + usage
995
    mlen = max([len(" %s" % cmd) for cmd in commands])
996
    mlen = min(60, mlen) # should not get here...
997

    
998
    # and format a nice command list
999
    ToStdout("Commands:")
1000
    for cmd in sortedcmds:
1001
      cmdstr = " %s" % (cmd,)
1002
      help_text = commands[cmd][4]
1003
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1004
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1005
      for line in help_lines:
1006
        ToStdout("%-*s   %s", mlen, "", line)
1007

    
1008
    ToStdout("")
1009

    
1010
    return None, None, None
1011

    
1012
  # get command, unalias it, and look it up in commands
1013
  cmd = argv.pop(1)
1014
  if cmd in aliases:
1015
    if cmd in commands:
1016
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1017
                                   " command" % cmd)
1018

    
1019
    if aliases[cmd] not in commands:
1020
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1021
                                   " command '%s'" % (cmd, aliases[cmd]))
1022

    
1023
    cmd = aliases[cmd]
1024

    
1025
  func, args_def, parser_opts, usage, description = commands[cmd]
1026
  parser = OptionParser(option_list=parser_opts + [_DRY_RUN_OPT, DEBUG_OPT],
1027
                        description=description,
1028
                        formatter=TitledHelpFormatter(),
1029
                        usage="%%prog %s %s" % (cmd, usage))
1030
  parser.disable_interspersed_args()
1031
  options, args = parser.parse_args()
1032

    
1033
  if not _CheckArguments(cmd, args_def, args):
1034
    return None, None, None
1035

    
1036
  return func, options, args
1037

    
1038

    
1039
def _CheckArguments(cmd, args_def, args):
1040
  """Verifies the arguments using the argument definition.
1041

1042
  Algorithm:
1043

1044
    1. Abort with error if values specified by user but none expected.
1045

1046
    1. For each argument in definition
1047

1048
      1. Keep running count of minimum number of values (min_count)
1049
      1. Keep running count of maximum number of values (max_count)
1050
      1. If it has an unlimited number of values
1051

1052
        1. Abort with error if it's not the last argument in the definition
1053

1054
    1. If last argument has limited number of values
1055

1056
      1. Abort with error if number of values doesn't match or is too large
1057

1058
    1. Abort with error if user didn't pass enough values (min_count)
1059

1060
  """
1061
  if args and not args_def:
1062
    ToStderr("Error: Command %s expects no arguments", cmd)
1063
    return False
1064

    
1065
  min_count = None
1066
  max_count = None
1067
  check_max = None
1068

    
1069
  last_idx = len(args_def) - 1
1070

    
1071
  for idx, arg in enumerate(args_def):
1072
    if min_count is None:
1073
      min_count = arg.min
1074
    elif arg.min is not None:
1075
      min_count += arg.min
1076

    
1077
    if max_count is None:
1078
      max_count = arg.max
1079
    elif arg.max is not None:
1080
      max_count += arg.max
1081

    
1082
    if idx == last_idx:
1083
      check_max = (arg.max is not None)
1084

    
1085
    elif arg.max is None:
1086
      raise errors.ProgrammerError("Only the last argument can have max=None")
1087

    
1088
  if check_max:
1089
    # Command with exact number of arguments
1090
    if (min_count is not None and max_count is not None and
1091
        min_count == max_count and len(args) != min_count):
1092
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1093
      return False
1094

    
1095
    # Command with limited number of arguments
1096
    if max_count is not None and len(args) > max_count:
1097
      ToStderr("Error: Command %s expects only %d argument(s)",
1098
               cmd, max_count)
1099
      return False
1100

    
1101
  # Command with some required arguments
1102
  if min_count is not None and len(args) < min_count:
1103
    ToStderr("Error: Command %s expects at least %d argument(s)",
1104
             cmd, min_count)
1105
    return False
1106

    
1107
  return True
1108

    
1109

    
1110
def SplitNodeOption(value):
1111
  """Splits the value of a --node option.
1112

1113
  """
1114
  if value and ':' in value:
1115
    return value.split(':', 1)
1116
  else:
1117
    return (value, None)
1118

    
1119

    
1120
def CalculateOSNames(os_name, os_variants):
1121
  """Calculates all the names an OS can be called, according to its variants.
1122

1123
  @type os_name: string
1124
  @param os_name: base name of the os
1125
  @type os_variants: list or None
1126
  @param os_variants: list of supported variants
1127
  @rtype: list
1128
  @return: list of valid names
1129

1130
  """
1131
  if os_variants:
1132
    return ['%s+%s' % (os_name, v) for v in os_variants]
1133
  else:
1134
    return [os_name]
1135

    
1136

    
1137
def UsesRPC(fn):
1138
  def wrapper(*args, **kwargs):
1139
    rpc.Init()
1140
    try:
1141
      return fn(*args, **kwargs)
1142
    finally:
1143
      rpc.Shutdown()
1144
  return wrapper
1145

    
1146

    
1147
def AskUser(text, choices=None):
1148
  """Ask the user a question.
1149

1150
  @param text: the question to ask
1151

1152
  @param choices: list with elements tuples (input_char, return_value,
1153
      description); if not given, it will default to: [('y', True,
1154
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1155
      note that the '?' char is reserved for help
1156

1157
  @return: one of the return values from the choices list; if input is
1158
      not possible (i.e. not running with a tty, we return the last
1159
      entry from the list
1160

1161
  """
1162
  if choices is None:
1163
    choices = [('y', True, 'Perform the operation'),
1164
               ('n', False, 'Do not perform the operation')]
1165
  if not choices or not isinstance(choices, list):
1166
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1167
  for entry in choices:
1168
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1169
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1170

    
1171
  answer = choices[-1][1]
1172
  new_text = []
1173
  for line in text.splitlines():
1174
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1175
  text = "\n".join(new_text)
1176
  try:
1177
    f = file("/dev/tty", "a+")
1178
  except IOError:
1179
    return answer
1180
  try:
1181
    chars = [entry[0] for entry in choices]
1182
    chars[-1] = "[%s]" % chars[-1]
1183
    chars.append('?')
1184
    maps = dict([(entry[0], entry[1]) for entry in choices])
1185
    while True:
1186
      f.write(text)
1187
      f.write('\n')
1188
      f.write("/".join(chars))
1189
      f.write(": ")
1190
      line = f.readline(2).strip().lower()
1191
      if line in maps:
1192
        answer = maps[line]
1193
        break
1194
      elif line == '?':
1195
        for entry in choices:
1196
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1197
        f.write("\n")
1198
        continue
1199
  finally:
1200
    f.close()
1201
  return answer
1202

    
1203

    
1204
class JobSubmittedException(Exception):
1205
  """Job was submitted, client should exit.
1206

1207
  This exception has one argument, the ID of the job that was
1208
  submitted. The handler should print this ID.
1209

1210
  This is not an error, just a structured way to exit from clients.
1211

1212
  """
1213

    
1214

    
1215
def SendJob(ops, cl=None):
1216
  """Function to submit an opcode without waiting for the results.
1217

1218
  @type ops: list
1219
  @param ops: list of opcodes
1220
  @type cl: luxi.Client
1221
  @param cl: the luxi client to use for communicating with the master;
1222
             if None, a new client will be created
1223

1224
  """
1225
  if cl is None:
1226
    cl = GetClient()
1227

    
1228
  job_id = cl.SubmitJob(ops)
1229

    
1230
  return job_id
1231

    
1232

    
1233
def GenericPollJob(job_id, cbs, report_cbs):
1234
  """Generic job-polling function.
1235

1236
  @type job_id: number
1237
  @param job_id: Job ID
1238
  @type cbs: Instance of L{JobPollCbBase}
1239
  @param cbs: Data callbacks
1240
  @type report_cbs: Instance of L{JobPollReportCbBase}
1241
  @param report_cbs: Reporting callbacks
1242

1243
  """
1244
  prev_job_info = None
1245
  prev_logmsg_serial = None
1246

    
1247
  status = None
1248

    
1249
  while True:
1250
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1251
                                      prev_logmsg_serial)
1252
    if not result:
1253
      # job not found, go away!
1254
      raise errors.JobLost("Job with id %s lost" % job_id)
1255

    
1256
    if result == constants.JOB_NOTCHANGED:
1257
      report_cbs.ReportNotChanged(job_id, status)
1258

    
1259
      # Wait again
1260
      continue
1261

    
1262
    # Split result, a tuple of (field values, log entries)
1263
    (job_info, log_entries) = result
1264
    (status, ) = job_info
1265

    
1266
    if log_entries:
1267
      for log_entry in log_entries:
1268
        (serial, timestamp, log_type, message) = log_entry
1269
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1270
                                    log_type, message)
1271
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1272

    
1273
    # TODO: Handle canceled and archived jobs
1274
    elif status in (constants.JOB_STATUS_SUCCESS,
1275
                    constants.JOB_STATUS_ERROR,
1276
                    constants.JOB_STATUS_CANCELING,
1277
                    constants.JOB_STATUS_CANCELED):
1278
      break
1279

    
1280
    prev_job_info = job_info
1281

    
1282
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1283
  if not jobs:
1284
    raise errors.JobLost("Job with id %s lost" % job_id)
1285

    
1286
  status, opstatus, result = jobs[0]
1287

    
1288
  if status == constants.JOB_STATUS_SUCCESS:
1289
    return result
1290

    
1291
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1292
    raise errors.OpExecError("Job was canceled")
1293

    
1294
  has_ok = False
1295
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1296
    if status == constants.OP_STATUS_SUCCESS:
1297
      has_ok = True
1298
    elif status == constants.OP_STATUS_ERROR:
1299
      errors.MaybeRaise(msg)
1300

    
1301
      if has_ok:
1302
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1303
                                 (idx, msg))
1304

    
1305
      raise errors.OpExecError(str(msg))
1306

    
1307
  # default failure mode
1308
  raise errors.OpExecError(result)
1309

    
1310

    
1311
class JobPollCbBase:
1312
  """Base class for L{GenericPollJob} callbacks.
1313

1314
  """
1315
  def __init__(self):
1316
    """Initializes this class.
1317

1318
    """
1319

    
1320
  def WaitForJobChangeOnce(self, job_id, fields,
1321
                           prev_job_info, prev_log_serial):
1322
    """Waits for changes on a job.
1323

1324
    """
1325
    raise NotImplementedError()
1326

    
1327
  def QueryJobs(self, job_ids, fields):
1328
    """Returns the selected fields for the selected job IDs.
1329

1330
    @type job_ids: list of numbers
1331
    @param job_ids: Job IDs
1332
    @type fields: list of strings
1333
    @param fields: Fields
1334

1335
    """
1336
    raise NotImplementedError()
1337

    
1338

    
1339
class JobPollReportCbBase:
1340
  """Base class for L{GenericPollJob} reporting callbacks.
1341

1342
  """
1343
  def __init__(self):
1344
    """Initializes this class.
1345

1346
    """
1347

    
1348
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1349
    """Handles a log message.
1350

1351
    """
1352
    raise NotImplementedError()
1353

    
1354
  def ReportNotChanged(self, job_id, status):
1355
    """Called for if a job hasn't changed in a while.
1356

1357
    @type job_id: number
1358
    @param job_id: Job ID
1359
    @type status: string or None
1360
    @param status: Job status if available
1361

1362
    """
1363
    raise NotImplementedError()
1364

    
1365

    
1366
class _LuxiJobPollCb(JobPollCbBase):
1367
  def __init__(self, cl):
1368
    """Initializes this class.
1369

1370
    """
1371
    JobPollCbBase.__init__(self)
1372
    self.cl = cl
1373

    
1374
  def WaitForJobChangeOnce(self, job_id, fields,
1375
                           prev_job_info, prev_log_serial):
1376
    """Waits for changes on a job.
1377

1378
    """
1379
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1380
                                        prev_job_info, prev_log_serial)
1381

    
1382
  def QueryJobs(self, job_ids, fields):
1383
    """Returns the selected fields for the selected job IDs.
1384

1385
    """
1386
    return self.cl.QueryJobs(job_ids, fields)
1387

    
1388

    
1389
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1390
  def __init__(self, feedback_fn):
1391
    """Initializes this class.
1392

1393
    """
1394
    JobPollReportCbBase.__init__(self)
1395

    
1396
    self.feedback_fn = feedback_fn
1397

    
1398
    assert callable(feedback_fn)
1399

    
1400
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1401
    """Handles a log message.
1402

1403
    """
1404
    self.feedback_fn((timestamp, log_type, log_msg))
1405

    
1406
  def ReportNotChanged(self, job_id, status):
1407
    """Called if a job hasn't changed in a while.
1408

1409
    """
1410
    # Ignore
1411

    
1412

    
1413
class StdioJobPollReportCb(JobPollReportCbBase):
1414
  def __init__(self):
1415
    """Initializes this class.
1416

1417
    """
1418
    JobPollReportCbBase.__init__(self)
1419

    
1420
    self.notified_queued = False
1421
    self.notified_waitlock = False
1422

    
1423
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1424
    """Handles a log message.
1425

1426
    """
1427
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1428
             utils.SafeEncode(log_msg))
1429

    
1430
  def ReportNotChanged(self, job_id, status):
1431
    """Called if a job hasn't changed in a while.
1432

1433
    """
1434
    if status is None:
1435
      return
1436

    
1437
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1438
      ToStderr("Job %s is waiting in queue", job_id)
1439
      self.notified_queued = True
1440

    
1441
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1442
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1443
      self.notified_waitlock = True
1444

    
1445

    
1446
def PollJob(job_id, cl=None, feedback_fn=None):
1447
  """Function to poll for the result of a job.
1448

1449
  @type job_id: job identified
1450
  @param job_id: the job to poll for results
1451
  @type cl: luxi.Client
1452
  @param cl: the luxi client to use for communicating with the master;
1453
             if None, a new client will be created
1454

1455
  """
1456
  if cl is None:
1457
    cl = GetClient()
1458

    
1459
  if feedback_fn:
1460
    reporter = FeedbackFnJobPollReportCb(feedback_fn)
1461
  else:
1462
    reporter = StdioJobPollReportCb()
1463

    
1464
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1465

    
1466

    
1467
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None):
1468
  """Legacy function to submit an opcode.
1469

1470
  This is just a simple wrapper over the construction of the processor
1471
  instance. It should be extended to better handle feedback and
1472
  interaction functions.
1473

1474
  """
1475
  if cl is None:
1476
    cl = GetClient()
1477

    
1478
  SetGenericOpcodeOpts([op], opts)
1479

    
1480
  job_id = SendJob([op], cl)
1481

    
1482
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn)
1483

    
1484
  return op_results[0]
1485

    
1486

    
1487
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1488
  """Wrapper around SubmitOpCode or SendJob.
1489

1490
  This function will decide, based on the 'opts' parameter, whether to
1491
  submit and wait for the result of the opcode (and return it), or
1492
  whether to just send the job and print its identifier. It is used in
1493
  order to simplify the implementation of the '--submit' option.
1494

1495
  It will also process the opcodes if we're sending the via SendJob
1496
  (otherwise SubmitOpCode does it).
1497

1498
  """
1499
  if opts and opts.submit_only:
1500
    job = [op]
1501
    SetGenericOpcodeOpts(job, opts)
1502
    job_id = SendJob(job, cl=cl)
1503
    raise JobSubmittedException(job_id)
1504
  else:
1505
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1506

    
1507

    
1508
def SetGenericOpcodeOpts(opcode_list, options):
1509
  """Processor for generic options.
1510

1511
  This function updates the given opcodes based on generic command
1512
  line options (like debug, dry-run, etc.).
1513

1514
  @param opcode_list: list of opcodes
1515
  @param options: command line options or None
1516
  @return: None (in-place modification)
1517

1518
  """
1519
  if not options:
1520
    return
1521
  for op in opcode_list:
1522
    op.dry_run = options.dry_run
1523
    op.debug_level = options.debug
1524

    
1525

    
1526
def GetClient():
1527
  # TODO: Cache object?
1528
  try:
1529
    client = luxi.Client()
1530
  except luxi.NoMasterError:
1531
    ss = ssconf.SimpleStore()
1532

    
1533
    # Try to read ssconf file
1534
    try:
1535
      ss.GetMasterNode()
1536
    except errors.ConfigurationError:
1537
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1538
                                 " not part of a cluster")
1539

    
1540
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1541
    if master != myself:
1542
      raise errors.OpPrereqError("This is not the master node, please connect"
1543
                                 " to node '%s' and rerun the command" %
1544
                                 master)
1545
    raise
1546
  return client
1547

    
1548

    
1549
def FormatError(err):
1550
  """Return a formatted error message for a given error.
1551

1552
  This function takes an exception instance and returns a tuple
1553
  consisting of two values: first, the recommended exit code, and
1554
  second, a string describing the error message (not
1555
  newline-terminated).
1556

1557
  """
1558
  retcode = 1
1559
  obuf = StringIO()
1560
  msg = str(err)
1561
  if isinstance(err, errors.ConfigurationError):
1562
    txt = "Corrupt configuration file: %s" % msg
1563
    logging.error(txt)
1564
    obuf.write(txt + "\n")
1565
    obuf.write("Aborting.")
1566
    retcode = 2
1567
  elif isinstance(err, errors.HooksAbort):
1568
    obuf.write("Failure: hooks execution failed:\n")
1569
    for node, script, out in err.args[0]:
1570
      if out:
1571
        obuf.write("  node: %s, script: %s, output: %s\n" %
1572
                   (node, script, out))
1573
      else:
1574
        obuf.write("  node: %s, script: %s (no output)\n" %
1575
                   (node, script))
1576
  elif isinstance(err, errors.HooksFailure):
1577
    obuf.write("Failure: hooks general failure: %s" % msg)
1578
  elif isinstance(err, errors.ResolverError):
1579
    this_host = utils.HostInfo.SysName()
1580
    if err.args[0] == this_host:
1581
      msg = "Failure: can't resolve my own hostname ('%s')"
1582
    else:
1583
      msg = "Failure: can't resolve hostname '%s'"
1584
    obuf.write(msg % err.args[0])
1585
  elif isinstance(err, errors.OpPrereqError):
1586
    if len(err.args) == 2:
1587
      obuf.write("Failure: prerequisites not met for this"
1588
               " operation:\nerror type: %s, error details:\n%s" %
1589
                 (err.args[1], err.args[0]))
1590
    else:
1591
      obuf.write("Failure: prerequisites not met for this"
1592
                 " operation:\n%s" % msg)
1593
  elif isinstance(err, errors.OpExecError):
1594
    obuf.write("Failure: command execution error:\n%s" % msg)
1595
  elif isinstance(err, errors.TagError):
1596
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1597
  elif isinstance(err, errors.JobQueueDrainError):
1598
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1599
               " accept new requests\n")
1600
  elif isinstance(err, errors.JobQueueFull):
1601
    obuf.write("Failure: the job queue is full and doesn't accept new"
1602
               " job submissions until old jobs are archived\n")
1603
  elif isinstance(err, errors.TypeEnforcementError):
1604
    obuf.write("Parameter Error: %s" % msg)
1605
  elif isinstance(err, errors.ParameterError):
1606
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1607
  elif isinstance(err, errors.GenericError):
1608
    obuf.write("Unhandled Ganeti error: %s" % msg)
1609
  elif isinstance(err, luxi.NoMasterError):
1610
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1611
               " and listening for connections?")
1612
  elif isinstance(err, luxi.TimeoutError):
1613
    obuf.write("Timeout while talking to the master daemon. Error:\n"
1614
               "%s" % msg)
1615
  elif isinstance(err, luxi.ProtocolError):
1616
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1617
               "%s" % msg)
1618
  elif isinstance(err, JobSubmittedException):
1619
    obuf.write("JobID: %s\n" % err.args[0])
1620
    retcode = 0
1621
  else:
1622
    obuf.write("Unhandled exception: %s" % msg)
1623
  return retcode, obuf.getvalue().rstrip('\n')
1624

    
1625

    
1626
def GenericMain(commands, override=None, aliases=None):
1627
  """Generic main function for all the gnt-* commands.
1628

1629
  Arguments:
1630
    - commands: a dictionary with a special structure, see the design doc
1631
                for command line handling.
1632
    - override: if not None, we expect a dictionary with keys that will
1633
                override command line options; this can be used to pass
1634
                options from the scripts to generic functions
1635
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1636

1637
  """
1638
  # save the program name and the entire command line for later logging
1639
  if sys.argv:
1640
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1641
    if len(sys.argv) >= 2:
1642
      binary += " " + sys.argv[1]
1643
      old_cmdline = " ".join(sys.argv[2:])
1644
    else:
1645
      old_cmdline = ""
1646
  else:
1647
    binary = "<unknown program>"
1648
    old_cmdline = ""
1649

    
1650
  if aliases is None:
1651
    aliases = {}
1652

    
1653
  try:
1654
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1655
  except errors.ParameterError, err:
1656
    result, err_msg = FormatError(err)
1657
    ToStderr(err_msg)
1658
    return 1
1659

    
1660
  if func is None: # parse error
1661
    return 1
1662

    
1663
  if override is not None:
1664
    for key, val in override.iteritems():
1665
      setattr(options, key, val)
1666

    
1667
  utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1668
                     stderr_logging=True, program=binary)
1669

    
1670
  if old_cmdline:
1671
    logging.info("run with arguments '%s'", old_cmdline)
1672
  else:
1673
    logging.info("run with no arguments")
1674

    
1675
  try:
1676
    result = func(options, args)
1677
  except (errors.GenericError, luxi.ProtocolError,
1678
          JobSubmittedException), err:
1679
    result, err_msg = FormatError(err)
1680
    logging.exception("Error during command processing")
1681
    ToStderr(err_msg)
1682

    
1683
  return result
1684

    
1685

    
1686
def GenericInstanceCreate(mode, opts, args):
1687
  """Add an instance to the cluster via either creation or import.
1688

1689
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1690
  @param opts: the command line options selected by the user
1691
  @type args: list
1692
  @param args: should contain only one element, the new instance name
1693
  @rtype: int
1694
  @return: the desired exit code
1695

1696
  """
1697
  instance = args[0]
1698

    
1699
  (pnode, snode) = SplitNodeOption(opts.node)
1700

    
1701
  hypervisor = None
1702
  hvparams = {}
1703
  if opts.hypervisor:
1704
    hypervisor, hvparams = opts.hypervisor
1705

    
1706
  if opts.nics:
1707
    try:
1708
      nic_max = max(int(nidx[0]) + 1 for nidx in opts.nics)
1709
    except ValueError, err:
1710
      raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1711
    nics = [{}] * nic_max
1712
    for nidx, ndict in opts.nics:
1713
      nidx = int(nidx)
1714
      if not isinstance(ndict, dict):
1715
        msg = "Invalid nic/%d value: expected dict, got %s" % (nidx, ndict)
1716
        raise errors.OpPrereqError(msg)
1717
      nics[nidx] = ndict
1718
  elif opts.no_nics:
1719
    # no nics
1720
    nics = []
1721
  elif mode == constants.INSTANCE_CREATE:
1722
    # default of one nic, all auto
1723
    nics = [{}]
1724
  else:
1725
    # mode == import
1726
    nics = []
1727

    
1728
  if opts.disk_template == constants.DT_DISKLESS:
1729
    if opts.disks or opts.sd_size is not None:
1730
      raise errors.OpPrereqError("Diskless instance but disk"
1731
                                 " information passed")
1732
    disks = []
1733
  else:
1734
    if (not opts.disks and not opts.sd_size
1735
        and mode == constants.INSTANCE_CREATE):
1736
      raise errors.OpPrereqError("No disk information specified")
1737
    if opts.disks and opts.sd_size is not None:
1738
      raise errors.OpPrereqError("Please use either the '--disk' or"
1739
                                 " '-s' option")
1740
    if opts.sd_size is not None:
1741
      opts.disks = [(0, {"size": opts.sd_size})]
1742

    
1743
    if opts.disks:
1744
      try:
1745
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
1746
      except ValueError, err:
1747
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
1748
      disks = [{}] * disk_max
1749
    else:
1750
      disks = []
1751
    for didx, ddict in opts.disks:
1752
      didx = int(didx)
1753
      if not isinstance(ddict, dict):
1754
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
1755
        raise errors.OpPrereqError(msg)
1756
      elif "size" in ddict:
1757
        if "adopt" in ddict:
1758
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
1759
                                     " (disk %d)" % didx)
1760
        try:
1761
          ddict["size"] = utils.ParseUnit(ddict["size"])
1762
        except ValueError, err:
1763
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
1764
                                     (didx, err))
1765
      elif "adopt" in ddict:
1766
        if mode == constants.INSTANCE_IMPORT:
1767
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
1768
                                     " import")
1769
        ddict["size"] = 0
1770
      else:
1771
        raise errors.OpPrereqError("Missing size or adoption source for"
1772
                                   " disk %d" % didx)
1773
      disks[didx] = ddict
1774

    
1775
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
1776
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
1777

    
1778
  if mode == constants.INSTANCE_CREATE:
1779
    start = opts.start
1780
    os_type = opts.os
1781
    src_node = None
1782
    src_path = None
1783
    no_install = opts.no_install
1784
    identify_defaults = False
1785
  elif mode == constants.INSTANCE_IMPORT:
1786
    start = False
1787
    os_type = None
1788
    src_node = opts.src_node
1789
    src_path = opts.src_dir
1790
    no_install = None
1791
    identify_defaults = opts.identify_defaults
1792
  else:
1793
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
1794

    
1795
  op = opcodes.OpCreateInstance(instance_name=instance,
1796
                                disks=disks,
1797
                                disk_template=opts.disk_template,
1798
                                nics=nics,
1799
                                pnode=pnode, snode=snode,
1800
                                ip_check=opts.ip_check,
1801
                                name_check=opts.name_check,
1802
                                wait_for_sync=opts.wait_for_sync,
1803
                                file_storage_dir=opts.file_storage_dir,
1804
                                file_driver=opts.file_driver,
1805
                                iallocator=opts.iallocator,
1806
                                hypervisor=hypervisor,
1807
                                hvparams=hvparams,
1808
                                beparams=opts.beparams,
1809
                                mode=mode,
1810
                                start=start,
1811
                                os_type=os_type,
1812
                                src_node=src_node,
1813
                                src_path=src_path,
1814
                                no_install=no_install,
1815
                                identify_defaults=identify_defaults)
1816

    
1817
  SubmitOrSend(op, opts)
1818
  return 0
1819

    
1820

    
1821
class _RunWhileClusterStoppedHelper:
1822
  """Helper class for L{RunWhileClusterStopped} to simplify state management
1823

1824
  """
1825
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
1826
    """Initializes this class.
1827

1828
    @type feedback_fn: callable
1829
    @param feedback_fn: Feedback function
1830
    @type cluster_name: string
1831
    @param cluster_name: Cluster name
1832
    @type master_node: string
1833
    @param master_node Master node name
1834
    @type online_nodes: list
1835
    @param online_nodes: List of names of online nodes
1836

1837
    """
1838
    self.feedback_fn = feedback_fn
1839
    self.cluster_name = cluster_name
1840
    self.master_node = master_node
1841
    self.online_nodes = online_nodes
1842

    
1843
    self.ssh = ssh.SshRunner(self.cluster_name)
1844

    
1845
    self.nonmaster_nodes = [name for name in online_nodes
1846
                            if name != master_node]
1847

    
1848
    assert self.master_node not in self.nonmaster_nodes
1849

    
1850
  def _RunCmd(self, node_name, cmd):
1851
    """Runs a command on the local or a remote machine.
1852

1853
    @type node_name: string
1854
    @param node_name: Machine name
1855
    @type cmd: list
1856
    @param cmd: Command
1857

1858
    """
1859
    if node_name is None or node_name == self.master_node:
1860
      # No need to use SSH
1861
      result = utils.RunCmd(cmd)
1862
    else:
1863
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
1864

    
1865
    if result.failed:
1866
      errmsg = ["Failed to run command %s" % result.cmd]
1867
      if node_name:
1868
        errmsg.append("on node %s" % node_name)
1869
      errmsg.append(": exitcode %s and error %s" %
1870
                    (result.exit_code, result.output))
1871
      raise errors.OpExecError(" ".join(errmsg))
1872

    
1873
  def Call(self, fn, *args):
1874
    """Call function while all daemons are stopped.
1875

1876
    @type fn: callable
1877
    @param fn: Function to be called
1878

1879
    """
1880
    # Pause watcher by acquiring an exclusive lock on watcher state file
1881
    self.feedback_fn("Blocking watcher")
1882
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
1883
    try:
1884
      # TODO: Currently, this just blocks. There's no timeout.
1885
      # TODO: Should it be a shared lock?
1886
      watcher_block.Exclusive(blocking=True)
1887

    
1888
      # Stop master daemons, so that no new jobs can come in and all running
1889
      # ones are finished
1890
      self.feedback_fn("Stopping master daemons")
1891
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
1892
      try:
1893
        # Stop daemons on all nodes
1894
        for node_name in self.online_nodes:
1895
          self.feedback_fn("Stopping daemons on %s" % node_name)
1896
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
1897

    
1898
        # All daemons are shut down now
1899
        try:
1900
          return fn(self, *args)
1901
        except Exception, err:
1902
          _, errmsg = FormatError(err)
1903
          logging.exception("Caught exception")
1904
          self.feedback_fn(errmsg)
1905
          raise
1906
      finally:
1907
        # Start cluster again, master node last
1908
        for node_name in self.nonmaster_nodes + [self.master_node]:
1909
          self.feedback_fn("Starting daemons on %s" % node_name)
1910
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
1911
    finally:
1912
      # Resume watcher
1913
      watcher_block.Close()
1914

    
1915

    
1916
def RunWhileClusterStopped(feedback_fn, fn, *args):
1917
  """Calls a function while all cluster daemons are stopped.
1918

1919
  @type feedback_fn: callable
1920
  @param feedback_fn: Feedback function
1921
  @type fn: callable
1922
  @param fn: Function to be called when daemons are stopped
1923

1924
  """
1925
  feedback_fn("Gathering cluster information")
1926

    
1927
  # This ensures we're running on the master daemon
1928
  cl = GetClient()
1929

    
1930
  (cluster_name, master_node) = \
1931
    cl.QueryConfigValues(["cluster_name", "master_node"])
1932

    
1933
  online_nodes = GetOnlineNodes([], cl=cl)
1934

    
1935
  # Don't keep a reference to the client. The master daemon will go away.
1936
  del cl
1937

    
1938
  assert master_node in online_nodes
1939

    
1940
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
1941
                                       online_nodes).Call(fn, *args)
1942

    
1943

    
1944
def GenerateTable(headers, fields, separator, data,
1945
                  numfields=None, unitfields=None,
1946
                  units=None):
1947
  """Prints a table with headers and different fields.
1948

1949
  @type headers: dict
1950
  @param headers: dictionary mapping field names to headers for
1951
      the table
1952
  @type fields: list
1953
  @param fields: the field names corresponding to each row in
1954
      the data field
1955
  @param separator: the separator to be used; if this is None,
1956
      the default 'smart' algorithm is used which computes optimal
1957
      field width, otherwise just the separator is used between
1958
      each field
1959
  @type data: list
1960
  @param data: a list of lists, each sublist being one row to be output
1961
  @type numfields: list
1962
  @param numfields: a list with the fields that hold numeric
1963
      values and thus should be right-aligned
1964
  @type unitfields: list
1965
  @param unitfields: a list with the fields that hold numeric
1966
      values that should be formatted with the units field
1967
  @type units: string or None
1968
  @param units: the units we should use for formatting, or None for
1969
      automatic choice (human-readable for non-separator usage, otherwise
1970
      megabytes); this is a one-letter string
1971

1972
  """
1973
  if units is None:
1974
    if separator:
1975
      units = "m"
1976
    else:
1977
      units = "h"
1978

    
1979
  if numfields is None:
1980
    numfields = []
1981
  if unitfields is None:
1982
    unitfields = []
1983

    
1984
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
1985
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
1986

    
1987
  format_fields = []
1988
  for field in fields:
1989
    if headers and field not in headers:
1990
      # TODO: handle better unknown fields (either revert to old
1991
      # style of raising exception, or deal more intelligently with
1992
      # variable fields)
1993
      headers[field] = field
1994
    if separator is not None:
1995
      format_fields.append("%s")
1996
    elif numfields.Matches(field):
1997
      format_fields.append("%*s")
1998
    else:
1999
      format_fields.append("%-*s")
2000

    
2001
  if separator is None:
2002
    mlens = [0 for name in fields]
2003
    format_str = ' '.join(format_fields)
2004
  else:
2005
    format_str = separator.replace("%", "%%").join(format_fields)
2006

    
2007
  for row in data:
2008
    if row is None:
2009
      continue
2010
    for idx, val in enumerate(row):
2011
      if unitfields.Matches(fields[idx]):
2012
        try:
2013
          val = int(val)
2014
        except (TypeError, ValueError):
2015
          pass
2016
        else:
2017
          val = row[idx] = utils.FormatUnit(val, units)
2018
      val = row[idx] = str(val)
2019
      if separator is None:
2020
        mlens[idx] = max(mlens[idx], len(val))
2021

    
2022
  result = []
2023
  if headers:
2024
    args = []
2025
    for idx, name in enumerate(fields):
2026
      hdr = headers[name]
2027
      if separator is None:
2028
        mlens[idx] = max(mlens[idx], len(hdr))
2029
        args.append(mlens[idx])
2030
      args.append(hdr)
2031
    result.append(format_str % tuple(args))
2032

    
2033
  if separator is None:
2034
    assert len(mlens) == len(fields)
2035

    
2036
    if fields and not numfields.Matches(fields[-1]):
2037
      mlens[-1] = 0
2038

    
2039
  for line in data:
2040
    args = []
2041
    if line is None:
2042
      line = ['-' for _ in fields]
2043
    for idx in range(len(fields)):
2044
      if separator is None:
2045
        args.append(mlens[idx])
2046
      args.append(line[idx])
2047
    result.append(format_str % tuple(args))
2048

    
2049
  return result
2050

    
2051

    
2052
def FormatTimestamp(ts):
2053
  """Formats a given timestamp.
2054

2055
  @type ts: timestamp
2056
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2057

2058
  @rtype: string
2059
  @return: a string with the formatted timestamp
2060

2061
  """
2062
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2063
    return '?'
2064
  sec, usec = ts
2065
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2066

    
2067

    
2068
def ParseTimespec(value):
2069
  """Parse a time specification.
2070

2071
  The following suffixed will be recognized:
2072

2073
    - s: seconds
2074
    - m: minutes
2075
    - h: hours
2076
    - d: day
2077
    - w: weeks
2078

2079
  Without any suffix, the value will be taken to be in seconds.
2080

2081
  """
2082
  value = str(value)
2083
  if not value:
2084
    raise errors.OpPrereqError("Empty time specification passed")
2085
  suffix_map = {
2086
    's': 1,
2087
    'm': 60,
2088
    'h': 3600,
2089
    'd': 86400,
2090
    'w': 604800,
2091
    }
2092
  if value[-1] not in suffix_map:
2093
    try:
2094
      value = int(value)
2095
    except (TypeError, ValueError):
2096
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2097
  else:
2098
    multiplier = suffix_map[value[-1]]
2099
    value = value[:-1]
2100
    if not value: # no data left after stripping the suffix
2101
      raise errors.OpPrereqError("Invalid time specification (only"
2102
                                 " suffix passed)")
2103
    try:
2104
      value = int(value) * multiplier
2105
    except (TypeError, ValueError):
2106
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2107
  return value
2108

    
2109

    
2110
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2111
                   filter_master=False):
2112
  """Returns the names of online nodes.
2113

2114
  This function will also log a warning on stderr with the names of
2115
  the online nodes.
2116

2117
  @param nodes: if not empty, use only this subset of nodes (minus the
2118
      offline ones)
2119
  @param cl: if not None, luxi client to use
2120
  @type nowarn: boolean
2121
  @param nowarn: by default, this function will output a note with the
2122
      offline nodes that are skipped; if this parameter is True the
2123
      note is not displayed
2124
  @type secondary_ips: boolean
2125
  @param secondary_ips: if True, return the secondary IPs instead of the
2126
      names, useful for doing network traffic over the replication interface
2127
      (if any)
2128
  @type filter_master: boolean
2129
  @param filter_master: if True, do not return the master node in the list
2130
      (useful in coordination with secondary_ips where we cannot check our
2131
      node name against the list)
2132

2133
  """
2134
  if cl is None:
2135
    cl = GetClient()
2136

    
2137
  if secondary_ips:
2138
    name_idx = 2
2139
  else:
2140
    name_idx = 0
2141

    
2142
  if filter_master:
2143
    master_node = cl.QueryConfigValues(["master_node"])[0]
2144
    filter_fn = lambda x: x != master_node
2145
  else:
2146
    filter_fn = lambda _: True
2147

    
2148
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2149
                         use_locking=False)
2150
  offline = [row[0] for row in result if row[1]]
2151
  if offline and not nowarn:
2152
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2153
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2154

    
2155

    
2156
def _ToStream(stream, txt, *args):
2157
  """Write a message to a stream, bypassing the logging system
2158

2159
  @type stream: file object
2160
  @param stream: the file to which we should write
2161
  @type txt: str
2162
  @param txt: the message
2163

2164
  """
2165
  if args:
2166
    args = tuple(args)
2167
    stream.write(txt % args)
2168
  else:
2169
    stream.write(txt)
2170
  stream.write('\n')
2171
  stream.flush()
2172

    
2173

    
2174
def ToStdout(txt, *args):
2175
  """Write a message to stdout only, bypassing the logging system
2176

2177
  This is just a wrapper over _ToStream.
2178

2179
  @type txt: str
2180
  @param txt: the message
2181

2182
  """
2183
  _ToStream(sys.stdout, txt, *args)
2184

    
2185

    
2186
def ToStderr(txt, *args):
2187
  """Write a message to stderr only, bypassing the logging system
2188

2189
  This is just a wrapper over _ToStream.
2190

2191
  @type txt: str
2192
  @param txt: the message
2193

2194
  """
2195
  _ToStream(sys.stderr, txt, *args)
2196

    
2197

    
2198
class JobExecutor(object):
2199
  """Class which manages the submission and execution of multiple jobs.
2200

2201
  Note that instances of this class should not be reused between
2202
  GetResults() calls.
2203

2204
  """
2205
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2206
    self.queue = []
2207
    if cl is None:
2208
      cl = GetClient()
2209
    self.cl = cl
2210
    self.verbose = verbose
2211
    self.jobs = []
2212
    self.opts = opts
2213
    self.feedback_fn = feedback_fn
2214

    
2215
  def QueueJob(self, name, *ops):
2216
    """Record a job for later submit.
2217

2218
    @type name: string
2219
    @param name: a description of the job, will be used in WaitJobSet
2220
    """
2221
    SetGenericOpcodeOpts(ops, self.opts)
2222
    self.queue.append((name, ops))
2223

    
2224
  def SubmitPending(self):
2225
    """Submit all pending jobs.
2226

2227
    """
2228
    results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2229
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2230
                                                            self.queue)):
2231
      self.jobs.append((idx, status, data, name))
2232

    
2233
  def _ChooseJob(self):
2234
    """Choose a non-waiting/queued job to poll next.
2235

2236
    """
2237
    assert self.jobs, "_ChooseJob called with empty job list"
2238

    
2239
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2240
    assert result
2241

    
2242
    for job_data, status in zip(self.jobs, result):
2243
      if status[0] in (constants.JOB_STATUS_QUEUED,
2244
                    constants.JOB_STATUS_WAITLOCK,
2245
                    constants.JOB_STATUS_CANCELING):
2246
        # job is still waiting
2247
        continue
2248
      # good candidate found
2249
      self.jobs.remove(job_data)
2250
      return job_data
2251

    
2252
    # no job found
2253
    return self.jobs.pop(0)
2254

    
2255
  def GetResults(self):
2256
    """Wait for and return the results of all jobs.
2257

2258
    @rtype: list
2259
    @return: list of tuples (success, job results), in the same order
2260
        as the submitted jobs; if a job has failed, instead of the result
2261
        there will be the error message
2262

2263
    """
2264
    if not self.jobs:
2265
      self.SubmitPending()
2266
    results = []
2267
    if self.verbose:
2268
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2269
      if ok_jobs:
2270
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2271

    
2272
    # first, remove any non-submitted jobs
2273
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2274
    for idx, _, jid, name in failures:
2275
      ToStderr("Failed to submit job for %s: %s", name, jid)
2276
      results.append((idx, False, jid))
2277

    
2278
    while self.jobs:
2279
      (idx, _, jid, name) = self._ChooseJob()
2280
      ToStdout("Waiting for job %s for %s...", jid, name)
2281
      try:
2282
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2283
        success = True
2284
      except (errors.GenericError, luxi.ProtocolError), err:
2285
        _, job_result = FormatError(err)
2286
        success = False
2287
        # the error message will always be shown, verbose or not
2288
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2289

    
2290
      results.append((idx, success, job_result))
2291

    
2292
    # sort based on the index, then drop it
2293
    results.sort()
2294
    results = [i[1:] for i in results]
2295

    
2296
    return results
2297

    
2298
  def WaitOrShow(self, wait):
2299
    """Wait for job results or only print the job IDs.
2300

2301
    @type wait: boolean
2302
    @param wait: whether to wait or not
2303

2304
    """
2305
    if wait:
2306
      return self.GetResults()
2307
    else:
2308
      if not self.jobs:
2309
        self.SubmitPending()
2310
      for _, status, result, name in self.jobs:
2311
        if status:
2312
          ToStdout("%s: %s", result, name)
2313
        else:
2314
          ToStderr("Failure for %s: %s", name, result)