Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ f91e255a

History | View | Annotate | Download (81.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42

    
43
from optparse import (OptionParser, TitledHelpFormatter,
44
                      Option, OptionValueError)
45

    
46

    
47
__all__ = [
48
  # Command line options
49
  "ADD_UIDS_OPT",
50
  "ALLOCATABLE_OPT",
51
  "ALL_OPT",
52
  "AUTO_PROMOTE_OPT",
53
  "AUTO_REPLACE_OPT",
54
  "BACKEND_OPT",
55
  "BLK_OS_OPT",
56
  "CAPAB_MASTER_OPT",
57
  "CLEANUP_OPT",
58
  "CLUSTER_DOMAIN_SECRET_OPT",
59
  "CONFIRM_OPT",
60
  "CP_SIZE_OPT",
61
  "DEBUG_OPT",
62
  "DEBUG_SIMERR_OPT",
63
  "DISKIDX_OPT",
64
  "DISK_OPT",
65
  "DISK_TEMPLATE_OPT",
66
  "DRAINED_OPT",
67
  "DRY_RUN_OPT",
68
  "DRBD_HELPER_OPT",
69
  "EARLY_RELEASE_OPT",
70
  "ENABLED_HV_OPT",
71
  "ERROR_CODES_OPT",
72
  "FIELDS_OPT",
73
  "FILESTORE_DIR_OPT",
74
  "FILESTORE_DRIVER_OPT",
75
  "FORCE_OPT",
76
  "FORCE_VARIANT_OPT",
77
  "GLOBAL_FILEDIR_OPT",
78
  "HID_OS_OPT",
79
  "HVLIST_OPT",
80
  "HVOPTS_OPT",
81
  "HYPERVISOR_OPT",
82
  "IALLOCATOR_OPT",
83
  "DEFAULT_IALLOCATOR_OPT",
84
  "IDENTIFY_DEFAULTS_OPT",
85
  "IGNORE_CONSIST_OPT",
86
  "IGNORE_FAILURES_OPT",
87
  "IGNORE_OFFLINE_OPT",
88
  "IGNORE_REMOVE_FAILURES_OPT",
89
  "IGNORE_SECONDARIES_OPT",
90
  "IGNORE_SIZE_OPT",
91
  "INTERVAL_OPT",
92
  "MAC_PREFIX_OPT",
93
  "MAINTAIN_NODE_HEALTH_OPT",
94
  "MASTER_NETDEV_OPT",
95
  "MC_OPT",
96
  "MIGRATION_MODE_OPT",
97
  "NET_OPT",
98
  "NEW_CLUSTER_CERT_OPT",
99
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
100
  "NEW_CONFD_HMAC_KEY_OPT",
101
  "NEW_RAPI_CERT_OPT",
102
  "NEW_SECONDARY_OPT",
103
  "NIC_PARAMS_OPT",
104
  "NODE_LIST_OPT",
105
  "NODE_PLACEMENT_OPT",
106
  "NODEGROUP_OPT",
107
  "NODRBD_STORAGE_OPT",
108
  "NOHDR_OPT",
109
  "NOIPCHECK_OPT",
110
  "NO_INSTALL_OPT",
111
  "NONAMECHECK_OPT",
112
  "NOLVM_STORAGE_OPT",
113
  "NOMODIFY_ETCHOSTS_OPT",
114
  "NOMODIFY_SSH_SETUP_OPT",
115
  "NONICS_OPT",
116
  "NONLIVE_OPT",
117
  "NONPLUS1_OPT",
118
  "NOSHUTDOWN_OPT",
119
  "NOSTART_OPT",
120
  "NOSSH_KEYCHECK_OPT",
121
  "NOVOTING_OPT",
122
  "NWSYNC_OPT",
123
  "ON_PRIMARY_OPT",
124
  "ON_SECONDARY_OPT",
125
  "OFFLINE_OPT",
126
  "OSPARAMS_OPT",
127
  "OS_OPT",
128
  "OS_SIZE_OPT",
129
  "PREALLOC_WIPE_DISKS_OPT",
130
  "PRIMARY_IP_VERSION_OPT",
131
  "PRIORITY_OPT",
132
  "RAPI_CERT_OPT",
133
  "READD_OPT",
134
  "REBOOT_TYPE_OPT",
135
  "REMOVE_INSTANCE_OPT",
136
  "REMOVE_UIDS_OPT",
137
  "RESERVED_LVS_OPT",
138
  "ROMAN_OPT",
139
  "SECONDARY_IP_OPT",
140
  "SELECT_OS_OPT",
141
  "SEP_OPT",
142
  "SHOWCMD_OPT",
143
  "SHUTDOWN_TIMEOUT_OPT",
144
  "SINGLE_NODE_OPT",
145
  "SRC_DIR_OPT",
146
  "SRC_NODE_OPT",
147
  "SUBMIT_OPT",
148
  "STATIC_OPT",
149
  "SYNC_OPT",
150
  "TAG_SRC_OPT",
151
  "TIMEOUT_OPT",
152
  "UIDPOOL_OPT",
153
  "USEUNITS_OPT",
154
  "USE_REPL_NET_OPT",
155
  "VERBOSE_OPT",
156
  "VG_NAME_OPT",
157
  "YES_DOIT_OPT",
158
  # Generic functions for CLI programs
159
  "GenericMain",
160
  "GenericInstanceCreate",
161
  "GetClient",
162
  "GetOnlineNodes",
163
  "JobExecutor",
164
  "JobSubmittedException",
165
  "ParseTimespec",
166
  "RunWhileClusterStopped",
167
  "SubmitOpCode",
168
  "SubmitOrSend",
169
  "UsesRPC",
170
  # Formatting functions
171
  "ToStderr", "ToStdout",
172
  "FormatError",
173
  "GenerateTable",
174
  "AskUser",
175
  "FormatTimestamp",
176
  "FormatLogMessage",
177
  # Tags functions
178
  "ListTags",
179
  "AddTags",
180
  "RemoveTags",
181
  # command line options support infrastructure
182
  "ARGS_MANY_INSTANCES",
183
  "ARGS_MANY_NODES",
184
  "ARGS_NONE",
185
  "ARGS_ONE_INSTANCE",
186
  "ARGS_ONE_NODE",
187
  "ARGS_ONE_OS",
188
  "ArgChoice",
189
  "ArgCommand",
190
  "ArgFile",
191
  "ArgHost",
192
  "ArgInstance",
193
  "ArgJobId",
194
  "ArgNode",
195
  "ArgOs",
196
  "ArgSuggest",
197
  "ArgUnknown",
198
  "OPT_COMPL_INST_ADD_NODES",
199
  "OPT_COMPL_MANY_NODES",
200
  "OPT_COMPL_ONE_IALLOCATOR",
201
  "OPT_COMPL_ONE_INSTANCE",
202
  "OPT_COMPL_ONE_NODE",
203
  "OPT_COMPL_ONE_NODEGROUP",
204
  "OPT_COMPL_ONE_OS",
205
  "cli_option",
206
  "SplitNodeOption",
207
  "CalculateOSNames",
208
  "ParseFields",
209
  ]
210

    
211
NO_PREFIX = "no_"
212
UN_PREFIX = "-"
213

    
214
#: Priorities (sorted)
215
_PRIORITY_NAMES = [
216
  ("low", constants.OP_PRIO_LOW),
217
  ("normal", constants.OP_PRIO_NORMAL),
218
  ("high", constants.OP_PRIO_HIGH),
219
  ]
220

    
221
#: Priority dictionary for easier lookup
222
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
223
# we migrate to Python 2.6
224
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
225

    
226

    
227
class _Argument:
228
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
229
    self.min = min
230
    self.max = max
231

    
232
  def __repr__(self):
233
    return ("<%s min=%s max=%s>" %
234
            (self.__class__.__name__, self.min, self.max))
235

    
236

    
237
class ArgSuggest(_Argument):
238
  """Suggesting argument.
239

240
  Value can be any of the ones passed to the constructor.
241

242
  """
243
  # pylint: disable-msg=W0622
244
  def __init__(self, min=0, max=None, choices=None):
245
    _Argument.__init__(self, min=min, max=max)
246
    self.choices = choices
247

    
248
  def __repr__(self):
249
    return ("<%s min=%s max=%s choices=%r>" %
250
            (self.__class__.__name__, self.min, self.max, self.choices))
251

    
252

    
253
class ArgChoice(ArgSuggest):
254
  """Choice argument.
255

256
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
257
  but value must be one of the choices.
258

259
  """
260

    
261

    
262
class ArgUnknown(_Argument):
263
  """Unknown argument to program (e.g. determined at runtime).
264

265
  """
266

    
267

    
268
class ArgInstance(_Argument):
269
  """Instances argument.
270

271
  """
272

    
273

    
274
class ArgNode(_Argument):
275
  """Node argument.
276

277
  """
278

    
279
class ArgJobId(_Argument):
280
  """Job ID argument.
281

282
  """
283

    
284

    
285
class ArgFile(_Argument):
286
  """File path argument.
287

288
  """
289

    
290

    
291
class ArgCommand(_Argument):
292
  """Command argument.
293

294
  """
295

    
296

    
297
class ArgHost(_Argument):
298
  """Host argument.
299

300
  """
301

    
302

    
303
class ArgOs(_Argument):
304
  """OS argument.
305

306
  """
307

    
308

    
309
ARGS_NONE = []
310
ARGS_MANY_INSTANCES = [ArgInstance()]
311
ARGS_MANY_NODES = [ArgNode()]
312
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
313
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
314
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
315

    
316

    
317
def _ExtractTagsObject(opts, args):
318
  """Extract the tag type object.
319

320
  Note that this function will modify its args parameter.
321

322
  """
323
  if not hasattr(opts, "tag_type"):
324
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
325
  kind = opts.tag_type
326
  if kind == constants.TAG_CLUSTER:
327
    retval = kind, kind
328
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
329
    if not args:
330
      raise errors.OpPrereqError("no arguments passed to the command")
331
    name = args.pop(0)
332
    retval = kind, name
333
  else:
334
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
335
  return retval
336

    
337

    
338
def _ExtendTags(opts, args):
339
  """Extend the args if a source file has been given.
340

341
  This function will extend the tags with the contents of the file
342
  passed in the 'tags_source' attribute of the opts parameter. A file
343
  named '-' will be replaced by stdin.
344

345
  """
346
  fname = opts.tags_source
347
  if fname is None:
348
    return
349
  if fname == "-":
350
    new_fh = sys.stdin
351
  else:
352
    new_fh = open(fname, "r")
353
  new_data = []
354
  try:
355
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
356
    # because of python bug 1633941
357
    while True:
358
      line = new_fh.readline()
359
      if not line:
360
        break
361
      new_data.append(line.strip())
362
  finally:
363
    new_fh.close()
364
  args.extend(new_data)
365

    
366

    
367
def ListTags(opts, args):
368
  """List the tags on a given object.
369

370
  This is a generic implementation that knows how to deal with all
371
  three cases of tag objects (cluster, node, instance). The opts
372
  argument is expected to contain a tag_type field denoting what
373
  object type we work on.
374

375
  """
376
  kind, name = _ExtractTagsObject(opts, args)
377
  cl = GetClient()
378
  result = cl.QueryTags(kind, name)
379
  result = list(result)
380
  result.sort()
381
  for tag in result:
382
    ToStdout(tag)
383

    
384

    
385
def AddTags(opts, args):
386
  """Add tags on a given object.
387

388
  This is a generic implementation that knows how to deal with all
389
  three cases of tag objects (cluster, node, instance). The opts
390
  argument is expected to contain a tag_type field denoting what
391
  object type we work on.
392

393
  """
394
  kind, name = _ExtractTagsObject(opts, args)
395
  _ExtendTags(opts, args)
396
  if not args:
397
    raise errors.OpPrereqError("No tags to be added")
398
  op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
399
  SubmitOpCode(op, opts=opts)
400

    
401

    
402
def RemoveTags(opts, args):
403
  """Remove tags from a given object.
404

405
  This is a generic implementation that knows how to deal with all
406
  three cases of tag objects (cluster, node, instance). The opts
407
  argument is expected to contain a tag_type field denoting what
408
  object type we work on.
409

410
  """
411
  kind, name = _ExtractTagsObject(opts, args)
412
  _ExtendTags(opts, args)
413
  if not args:
414
    raise errors.OpPrereqError("No tags to be removed")
415
  op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
416
  SubmitOpCode(op, opts=opts)
417

    
418

    
419
def check_unit(option, opt, value): # pylint: disable-msg=W0613
420
  """OptParsers custom converter for units.
421

422
  """
423
  try:
424
    return utils.ParseUnit(value)
425
  except errors.UnitParseError, err:
426
    raise OptionValueError("option %s: %s" % (opt, err))
427

    
428

    
429
def _SplitKeyVal(opt, data):
430
  """Convert a KeyVal string into a dict.
431

432
  This function will convert a key=val[,...] string into a dict. Empty
433
  values will be converted specially: keys which have the prefix 'no_'
434
  will have the value=False and the prefix stripped, the others will
435
  have value=True.
436

437
  @type opt: string
438
  @param opt: a string holding the option name for which we process the
439
      data, used in building error messages
440
  @type data: string
441
  @param data: a string of the format key=val,key=val,...
442
  @rtype: dict
443
  @return: {key=val, key=val}
444
  @raises errors.ParameterError: if there are duplicate keys
445

446
  """
447
  kv_dict = {}
448
  if data:
449
    for elem in utils.UnescapeAndSplit(data, sep=","):
450
      if "=" in elem:
451
        key, val = elem.split("=", 1)
452
      else:
453
        if elem.startswith(NO_PREFIX):
454
          key, val = elem[len(NO_PREFIX):], False
455
        elif elem.startswith(UN_PREFIX):
456
          key, val = elem[len(UN_PREFIX):], None
457
        else:
458
          key, val = elem, True
459
      if key in kv_dict:
460
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
461
                                    (key, opt))
462
      kv_dict[key] = val
463
  return kv_dict
464

    
465

    
466
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
467
  """Custom parser for ident:key=val,key=val options.
468

469
  This will store the parsed values as a tuple (ident, {key: val}). As such,
470
  multiple uses of this option via action=append is possible.
471

472
  """
473
  if ":" not in value:
474
    ident, rest = value, ''
475
  else:
476
    ident, rest = value.split(":", 1)
477

    
478
  if ident.startswith(NO_PREFIX):
479
    if rest:
480
      msg = "Cannot pass options when removing parameter groups: %s" % value
481
      raise errors.ParameterError(msg)
482
    retval = (ident[len(NO_PREFIX):], False)
483
  elif ident.startswith(UN_PREFIX):
484
    if rest:
485
      msg = "Cannot pass options when removing parameter groups: %s" % value
486
      raise errors.ParameterError(msg)
487
    retval = (ident[len(UN_PREFIX):], None)
488
  else:
489
    kv_dict = _SplitKeyVal(opt, rest)
490
    retval = (ident, kv_dict)
491
  return retval
492

    
493

    
494
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
495
  """Custom parser class for key=val,key=val options.
496

497
  This will store the parsed values as a dict {key: val}.
498

499
  """
500
  return _SplitKeyVal(opt, value)
501

    
502

    
503
def check_bool(option, opt, value): # pylint: disable-msg=W0613
504
  """Custom parser for yes/no options.
505

506
  This will store the parsed value as either True or False.
507

508
  """
509
  value = value.lower()
510
  if value == constants.VALUE_FALSE or value == "no":
511
    return False
512
  elif value == constants.VALUE_TRUE or value == "yes":
513
    return True
514
  else:
515
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
516

    
517

    
518
# completion_suggestion is normally a list. Using numeric values not evaluating
519
# to False for dynamic completion.
520
(OPT_COMPL_MANY_NODES,
521
 OPT_COMPL_ONE_NODE,
522
 OPT_COMPL_ONE_INSTANCE,
523
 OPT_COMPL_ONE_OS,
524
 OPT_COMPL_ONE_IALLOCATOR,
525
 OPT_COMPL_INST_ADD_NODES,
526
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
527

    
528
OPT_COMPL_ALL = frozenset([
529
  OPT_COMPL_MANY_NODES,
530
  OPT_COMPL_ONE_NODE,
531
  OPT_COMPL_ONE_INSTANCE,
532
  OPT_COMPL_ONE_OS,
533
  OPT_COMPL_ONE_IALLOCATOR,
534
  OPT_COMPL_INST_ADD_NODES,
535
  OPT_COMPL_ONE_NODEGROUP,
536
  ])
537

    
538

    
539
class CliOption(Option):
540
  """Custom option class for optparse.
541

542
  """
543
  ATTRS = Option.ATTRS + [
544
    "completion_suggest",
545
    ]
546
  TYPES = Option.TYPES + (
547
    "identkeyval",
548
    "keyval",
549
    "unit",
550
    "bool",
551
    )
552
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
553
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
554
  TYPE_CHECKER["keyval"] = check_key_val
555
  TYPE_CHECKER["unit"] = check_unit
556
  TYPE_CHECKER["bool"] = check_bool
557

    
558

    
559
# optparse.py sets make_option, so we do it for our own option class, too
560
cli_option = CliOption
561

    
562

    
563
_YORNO = "yes|no"
564

    
565
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
566
                       help="Increase debugging level")
567

    
568
NOHDR_OPT = cli_option("--no-headers", default=False,
569
                       action="store_true", dest="no_headers",
570
                       help="Don't display column headers")
571

    
572
SEP_OPT = cli_option("--separator", default=None,
573
                     action="store", dest="separator",
574
                     help=("Separator between output fields"
575
                           " (defaults to one space)"))
576

    
577
USEUNITS_OPT = cli_option("--units", default=None,
578
                          dest="units", choices=('h', 'm', 'g', 't'),
579
                          help="Specify units for output (one of hmgt)")
580

    
581
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
582
                        type="string", metavar="FIELDS",
583
                        help="Comma separated list of output fields")
584

    
585
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
586
                       default=False, help="Force the operation")
587

    
588
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
589
                         default=False, help="Do not require confirmation")
590

    
591
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
592
                                  action="store_true", default=False,
593
                                  help=("Ignore offline nodes and do as much"
594
                                        " as possible"))
595

    
596
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
597
                         default=None, help="File with tag names")
598

    
599
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
600
                        default=False, action="store_true",
601
                        help=("Submit the job and return the job ID, but"
602
                              " don't wait for the job to finish"))
603

    
604
SYNC_OPT = cli_option("--sync", dest="do_locking",
605
                      default=False, action="store_true",
606
                      help=("Grab locks while doing the queries"
607
                            " in order to ensure more consistent results"))
608

    
609
DRY_RUN_OPT = cli_option("--dry-run", default=False,
610
                         action="store_true",
611
                         help=("Do not execute the operation, just run the"
612
                               " check steps and verify it it could be"
613
                               " executed"))
614

    
615
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
616
                         action="store_true",
617
                         help="Increase the verbosity of the operation")
618

    
619
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
620
                              action="store_true", dest="simulate_errors",
621
                              help="Debugging option that makes the operation"
622
                              " treat most runtime checks as failed")
623

    
624
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
625
                        default=True, action="store_false",
626
                        help="Don't wait for sync (DANGEROUS!)")
627

    
628
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
629
                               help="Custom disk setup (diskless, file,"
630
                               " plain or drbd)",
631
                               default=None, metavar="TEMPL",
632
                               choices=list(constants.DISK_TEMPLATES))
633

    
634
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
635
                        help="Do not create any network cards for"
636
                        " the instance")
637

    
638
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
639
                               help="Relative path under default cluster-wide"
640
                               " file storage dir to store file-based disks",
641
                               default=None, metavar="<DIR>")
642

    
643
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
644
                                  help="Driver to use for image files",
645
                                  default="loop", metavar="<DRIVER>",
646
                                  choices=list(constants.FILE_DRIVER))
647

    
648
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
649
                            help="Select nodes for the instance automatically"
650
                            " using the <NAME> iallocator plugin",
651
                            default=None, type="string",
652
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
653

    
654
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
655
                            metavar="<NAME>",
656
                            help="Set the default instance allocator plugin",
657
                            default=None, type="string",
658
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
659

    
660
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
661
                    metavar="<os>",
662
                    completion_suggest=OPT_COMPL_ONE_OS)
663

    
664
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
665
                         type="keyval", default={},
666
                         help="OS parameters")
667

    
668
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
669
                               action="store_true", default=False,
670
                               help="Force an unknown variant")
671

    
672
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
673
                            action="store_true", default=False,
674
                            help="Do not install the OS (will"
675
                            " enable no-start)")
676

    
677
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
678
                         type="keyval", default={},
679
                         help="Backend parameters")
680

    
681
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
682
                         default={}, dest="hvparams",
683
                         help="Hypervisor parameters")
684

    
685
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
686
                            help="Hypervisor and hypervisor options, in the"
687
                            " format hypervisor:option=value,option=value,...",
688
                            default=None, type="identkeyval")
689

    
690
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
691
                        help="Hypervisor and hypervisor options, in the"
692
                        " format hypervisor:option=value,option=value,...",
693
                        default=[], action="append", type="identkeyval")
694

    
695
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
696
                           action="store_false",
697
                           help="Don't check that the instance's IP"
698
                           " is alive")
699

    
700
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
701
                             default=True, action="store_false",
702
                             help="Don't check that the instance's name"
703
                             " is resolvable")
704

    
705
NET_OPT = cli_option("--net",
706
                     help="NIC parameters", default=[],
707
                     dest="nics", action="append", type="identkeyval")
708

    
709
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
710
                      dest="disks", action="append", type="identkeyval")
711

    
712
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
713
                         help="Comma-separated list of disks"
714
                         " indices to act on (e.g. 0,2) (optional,"
715
                         " defaults to all disks)")
716

    
717
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
718
                         help="Enforces a single-disk configuration using the"
719
                         " given disk size, in MiB unless a suffix is used",
720
                         default=None, type="unit", metavar="<size>")
721

    
722
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
723
                                dest="ignore_consistency",
724
                                action="store_true", default=False,
725
                                help="Ignore the consistency of the disks on"
726
                                " the secondary")
727

    
728
NONLIVE_OPT = cli_option("--non-live", dest="live",
729
                         default=True, action="store_false",
730
                         help="Do a non-live migration (this usually means"
731
                         " freeze the instance, save the state, transfer and"
732
                         " only then resume running on the secondary node)")
733

    
734
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
735
                                default=None,
736
                                choices=list(constants.HT_MIGRATION_MODES),
737
                                help="Override default migration mode (choose"
738
                                " either live or non-live")
739

    
740
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
741
                                help="Target node and optional secondary node",
742
                                metavar="<pnode>[:<snode>]",
743
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
744

    
745
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
746
                           action="append", metavar="<node>",
747
                           help="Use only this node (can be used multiple"
748
                           " times, if not given defaults to all nodes)",
749
                           completion_suggest=OPT_COMPL_ONE_NODE)
750

    
751
NODEGROUP_OPT = cli_option("-g", "--node-group",
752
                           dest="nodegroup",
753
                           help="Node group (name or uuid)",
754
                           metavar="<nodegroup>",
755
                           default=None, type="string",
756
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
757

    
758
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
759
                             metavar="<node>",
760
                             completion_suggest=OPT_COMPL_ONE_NODE)
761

    
762
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
763
                         action="store_false",
764
                         help="Don't start the instance after creation")
765

    
766
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
767
                         action="store_true", default=False,
768
                         help="Show command instead of executing it")
769

    
770
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
771
                         default=False, action="store_true",
772
                         help="Instead of performing the migration, try to"
773
                         " recover from a failed cleanup. This is safe"
774
                         " to run even if the instance is healthy, but it"
775
                         " will create extra replication traffic and "
776
                         " disrupt briefly the replication (like during the"
777
                         " migration")
778

    
779
STATIC_OPT = cli_option("-s", "--static", dest="static",
780
                        action="store_true", default=False,
781
                        help="Only show configuration data, not runtime data")
782

    
783
ALL_OPT = cli_option("--all", dest="show_all",
784
                     default=False, action="store_true",
785
                     help="Show info on all instances on the cluster."
786
                     " This can take a long time to run, use wisely")
787

    
788
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
789
                           action="store_true", default=False,
790
                           help="Interactive OS reinstall, lists available"
791
                           " OS templates for selection")
792

    
793
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
794
                                 action="store_true", default=False,
795
                                 help="Remove the instance from the cluster"
796
                                 " configuration even if there are failures"
797
                                 " during the removal process")
798

    
799
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
800
                                        dest="ignore_remove_failures",
801
                                        action="store_true", default=False,
802
                                        help="Remove the instance from the"
803
                                        " cluster configuration even if there"
804
                                        " are failures during the removal"
805
                                        " process")
806

    
807
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
808
                                 action="store_true", default=False,
809
                                 help="Remove the instance from the cluster")
810

    
811
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
812
                               help="Specifies the new secondary node",
813
                               metavar="NODE", default=None,
814
                               completion_suggest=OPT_COMPL_ONE_NODE)
815

    
816
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
817
                            default=False, action="store_true",
818
                            help="Replace the disk(s) on the primary"
819
                            " node (only for the drbd template)")
820

    
821
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
822
                              default=False, action="store_true",
823
                              help="Replace the disk(s) on the secondary"
824
                              " node (only for the drbd template)")
825

    
826
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
827
                              default=False, action="store_true",
828
                              help="Lock all nodes and auto-promote as needed"
829
                              " to MC status")
830

    
831
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
832
                              default=False, action="store_true",
833
                              help="Automatically replace faulty disks"
834
                              " (only for the drbd template)")
835

    
836
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
837
                             default=False, action="store_true",
838
                             help="Ignore current recorded size"
839
                             " (useful for forcing activation when"
840
                             " the recorded size is wrong)")
841

    
842
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
843
                          metavar="<node>",
844
                          completion_suggest=OPT_COMPL_ONE_NODE)
845

    
846
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
847
                         metavar="<dir>")
848

    
849
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
850
                              help="Specify the secondary ip for the node",
851
                              metavar="ADDRESS", default=None)
852

    
853
READD_OPT = cli_option("--readd", dest="readd",
854
                       default=False, action="store_true",
855
                       help="Readd old node after replacing it")
856

    
857
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
858
                                default=True, action="store_false",
859
                                help="Disable SSH key fingerprint checking")
860

    
861

    
862
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
863
                    type="bool", default=None, metavar=_YORNO,
864
                    help="Set the master_candidate flag on the node")
865

    
866
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
867
                         type="bool", default=None,
868
                         help="Set the offline flag on the node")
869

    
870
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
871
                         type="bool", default=None,
872
                         help="Set the drained flag on the node")
873

    
874
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
875
                    type="bool", default=None, metavar=_YORNO,
876
                    help="Set the master_capable flag on the node")
877

    
878
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
879
                             type="bool", default=None, metavar=_YORNO,
880
                             help="Set the allocatable flag on a volume")
881

    
882
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
883
                               help="Disable support for lvm based instances"
884
                               " (cluster-wide)",
885
                               action="store_false", default=True)
886

    
887
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
888
                            dest="enabled_hypervisors",
889
                            help="Comma-separated list of hypervisors",
890
                            type="string", default=None)
891

    
892
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
893
                            type="keyval", default={},
894
                            help="NIC parameters")
895

    
896
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
897
                         dest="candidate_pool_size", type="int",
898
                         help="Set the candidate pool size")
899

    
900
VG_NAME_OPT = cli_option("-g", "--vg-name", dest="vg_name",
901
                         help="Enables LVM and specifies the volume group"
902
                         " name (cluster-wide) for disk allocation [xenvg]",
903
                         metavar="VG", default=None)
904

    
905
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
906
                          help="Destroy cluster", action="store_true")
907

    
908
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
909
                          help="Skip node agreement check (dangerous)",
910
                          action="store_true", default=False)
911

    
912
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
913
                            help="Specify the mac prefix for the instance IP"
914
                            " addresses, in the format XX:XX:XX",
915
                            metavar="PREFIX",
916
                            default=None)
917

    
918
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
919
                               help="Specify the node interface (cluster-wide)"
920
                               " on which the master IP address will be added "
921
                               " [%s]" % constants.DEFAULT_BRIDGE,
922
                               metavar="NETDEV",
923
                               default=constants.DEFAULT_BRIDGE)
924

    
925
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
926
                                help="Specify the default directory (cluster-"
927
                                "wide) for storing the file-based disks [%s]" %
928
                                constants.DEFAULT_FILE_STORAGE_DIR,
929
                                metavar="DIR",
930
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
931

    
932
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
933
                                   help="Don't modify /etc/hosts",
934
                                   action="store_false", default=True)
935

    
936
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
937
                                    help="Don't initialize SSH keys",
938
                                    action="store_false", default=True)
939

    
940
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
941
                             help="Enable parseable error messages",
942
                             action="store_true", default=False)
943

    
944
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
945
                          help="Skip N+1 memory redundancy tests",
946
                          action="store_true", default=False)
947

    
948
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
949
                             help="Type of reboot: soft/hard/full",
950
                             default=constants.INSTANCE_REBOOT_HARD,
951
                             metavar="<REBOOT>",
952
                             choices=list(constants.REBOOT_TYPES))
953

    
954
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
955
                                    dest="ignore_secondaries",
956
                                    default=False, action="store_true",
957
                                    help="Ignore errors from secondaries")
958

    
959
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
960
                            action="store_false", default=True,
961
                            help="Don't shutdown the instance (unsafe)")
962

    
963
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
964
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
965
                         help="Maximum time to wait")
966

    
967
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
968
                         dest="shutdown_timeout", type="int",
969
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
970
                         help="Maximum time to wait for instance shutdown")
971

    
972
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
973
                          default=None,
974
                          help=("Number of seconds between repetions of the"
975
                                " command"))
976

    
977
EARLY_RELEASE_OPT = cli_option("--early-release",
978
                               dest="early_release", default=False,
979
                               action="store_true",
980
                               help="Release the locks on the secondary"
981
                               " node(s) early")
982

    
983
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
984
                                  dest="new_cluster_cert",
985
                                  default=False, action="store_true",
986
                                  help="Generate a new cluster certificate")
987

    
988
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
989
                           default=None,
990
                           help="File containing new RAPI certificate")
991

    
992
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
993
                               default=None, action="store_true",
994
                               help=("Generate a new self-signed RAPI"
995
                                     " certificate"))
996

    
997
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
998
                                    dest="new_confd_hmac_key",
999
                                    default=False, action="store_true",
1000
                                    help=("Create a new HMAC key for %s" %
1001
                                          constants.CONFD))
1002

    
1003
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1004
                                       dest="cluster_domain_secret",
1005
                                       default=None,
1006
                                       help=("Load new new cluster domain"
1007
                                             " secret from file"))
1008

    
1009
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1010
                                           dest="new_cluster_domain_secret",
1011
                                           default=False, action="store_true",
1012
                                           help=("Create a new cluster domain"
1013
                                                 " secret"))
1014

    
1015
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1016
                              dest="use_replication_network",
1017
                              help="Whether to use the replication network"
1018
                              " for talking to the nodes",
1019
                              action="store_true", default=False)
1020

    
1021
MAINTAIN_NODE_HEALTH_OPT = \
1022
    cli_option("--maintain-node-health", dest="maintain_node_health",
1023
               metavar=_YORNO, default=None, type="bool",
1024
               help="Configure the cluster to automatically maintain node"
1025
               " health, by shutting down unknown instances, shutting down"
1026
               " unknown DRBD devices, etc.")
1027

    
1028
IDENTIFY_DEFAULTS_OPT = \
1029
    cli_option("--identify-defaults", dest="identify_defaults",
1030
               default=False, action="store_true",
1031
               help="Identify which saved instance parameters are equal to"
1032
               " the current cluster defaults and set them as such, instead"
1033
               " of marking them as overridden")
1034

    
1035
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1036
                         action="store", dest="uid_pool",
1037
                         help=("A list of user-ids or user-id"
1038
                               " ranges separated by commas"))
1039

    
1040
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1041
                          action="store", dest="add_uids",
1042
                          help=("A list of user-ids or user-id"
1043
                                " ranges separated by commas, to be"
1044
                                " added to the user-id pool"))
1045

    
1046
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1047
                             action="store", dest="remove_uids",
1048
                             help=("A list of user-ids or user-id"
1049
                                   " ranges separated by commas, to be"
1050
                                   " removed from the user-id pool"))
1051

    
1052
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1053
                             action="store", dest="reserved_lvs",
1054
                             help=("A comma-separated list of reserved"
1055
                                   " logical volumes names, that will be"
1056
                                   " ignored by cluster verify"))
1057

    
1058
ROMAN_OPT = cli_option("--roman",
1059
                       dest="roman_integers", default=False,
1060
                       action="store_true",
1061
                       help="Use roman numbers for positive integers")
1062

    
1063
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1064
                             action="store", default=None,
1065
                             help="Specifies usermode helper for DRBD")
1066

    
1067
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1068
                                action="store_false", default=True,
1069
                                help="Disable support for DRBD")
1070

    
1071
PRIMARY_IP_VERSION_OPT = \
1072
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1073
               action="store", dest="primary_ip_version",
1074
               metavar="%d|%d" % (constants.IP4_VERSION,
1075
                                  constants.IP6_VERSION),
1076
               help="Cluster-wide IP version for primary IP")
1077

    
1078
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1079
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1080
                          choices=_PRIONAME_TO_VALUE.keys(),
1081
                          help="Priority for opcode processing")
1082

    
1083
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1084
                        type="bool", default=None, metavar=_YORNO,
1085
                        help="Sets the hidden flag on the OS")
1086

    
1087
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1088
                        type="bool", default=None, metavar=_YORNO,
1089
                        help="Sets the blacklisted flag on the OS")
1090

    
1091
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1092
                                     type="bool", metavar=_YORNO,
1093
                                     dest="prealloc_wipe_disks",
1094
                                     help=("Wipe disks prior to instance"
1095
                                           " creation"))
1096

    
1097

    
1098
#: Options provided by all commands
1099
COMMON_OPTS = [DEBUG_OPT]
1100

    
1101

    
1102
def _ParseArgs(argv, commands, aliases):
1103
  """Parser for the command line arguments.
1104

1105
  This function parses the arguments and returns the function which
1106
  must be executed together with its (modified) arguments.
1107

1108
  @param argv: the command line
1109
  @param commands: dictionary with special contents, see the design
1110
      doc for cmdline handling
1111
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1112

1113
  """
1114
  if len(argv) == 0:
1115
    binary = "<command>"
1116
  else:
1117
    binary = argv[0].split("/")[-1]
1118

    
1119
  if len(argv) > 1 and argv[1] == "--version":
1120
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1121
             constants.RELEASE_VERSION)
1122
    # Quit right away. That way we don't have to care about this special
1123
    # argument. optparse.py does it the same.
1124
    sys.exit(0)
1125

    
1126
  if len(argv) < 2 or not (argv[1] in commands or
1127
                           argv[1] in aliases):
1128
    # let's do a nice thing
1129
    sortedcmds = commands.keys()
1130
    sortedcmds.sort()
1131

    
1132
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1133
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1134
    ToStdout("")
1135

    
1136
    # compute the max line length for cmd + usage
1137
    mlen = max([len(" %s" % cmd) for cmd in commands])
1138
    mlen = min(60, mlen) # should not get here...
1139

    
1140
    # and format a nice command list
1141
    ToStdout("Commands:")
1142
    for cmd in sortedcmds:
1143
      cmdstr = " %s" % (cmd,)
1144
      help_text = commands[cmd][4]
1145
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1146
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1147
      for line in help_lines:
1148
        ToStdout("%-*s   %s", mlen, "", line)
1149

    
1150
    ToStdout("")
1151

    
1152
    return None, None, None
1153

    
1154
  # get command, unalias it, and look it up in commands
1155
  cmd = argv.pop(1)
1156
  if cmd in aliases:
1157
    if cmd in commands:
1158
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1159
                                   " command" % cmd)
1160

    
1161
    if aliases[cmd] not in commands:
1162
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1163
                                   " command '%s'" % (cmd, aliases[cmd]))
1164

    
1165
    cmd = aliases[cmd]
1166

    
1167
  func, args_def, parser_opts, usage, description = commands[cmd]
1168
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1169
                        description=description,
1170
                        formatter=TitledHelpFormatter(),
1171
                        usage="%%prog %s %s" % (cmd, usage))
1172
  parser.disable_interspersed_args()
1173
  options, args = parser.parse_args()
1174

    
1175
  if not _CheckArguments(cmd, args_def, args):
1176
    return None, None, None
1177

    
1178
  return func, options, args
1179

    
1180

    
1181
def _CheckArguments(cmd, args_def, args):
1182
  """Verifies the arguments using the argument definition.
1183

1184
  Algorithm:
1185

1186
    1. Abort with error if values specified by user but none expected.
1187

1188
    1. For each argument in definition
1189

1190
      1. Keep running count of minimum number of values (min_count)
1191
      1. Keep running count of maximum number of values (max_count)
1192
      1. If it has an unlimited number of values
1193

1194
        1. Abort with error if it's not the last argument in the definition
1195

1196
    1. If last argument has limited number of values
1197

1198
      1. Abort with error if number of values doesn't match or is too large
1199

1200
    1. Abort with error if user didn't pass enough values (min_count)
1201

1202
  """
1203
  if args and not args_def:
1204
    ToStderr("Error: Command %s expects no arguments", cmd)
1205
    return False
1206

    
1207
  min_count = None
1208
  max_count = None
1209
  check_max = None
1210

    
1211
  last_idx = len(args_def) - 1
1212

    
1213
  for idx, arg in enumerate(args_def):
1214
    if min_count is None:
1215
      min_count = arg.min
1216
    elif arg.min is not None:
1217
      min_count += arg.min
1218

    
1219
    if max_count is None:
1220
      max_count = arg.max
1221
    elif arg.max is not None:
1222
      max_count += arg.max
1223

    
1224
    if idx == last_idx:
1225
      check_max = (arg.max is not None)
1226

    
1227
    elif arg.max is None:
1228
      raise errors.ProgrammerError("Only the last argument can have max=None")
1229

    
1230
  if check_max:
1231
    # Command with exact number of arguments
1232
    if (min_count is not None and max_count is not None and
1233
        min_count == max_count and len(args) != min_count):
1234
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1235
      return False
1236

    
1237
    # Command with limited number of arguments
1238
    if max_count is not None and len(args) > max_count:
1239
      ToStderr("Error: Command %s expects only %d argument(s)",
1240
               cmd, max_count)
1241
      return False
1242

    
1243
  # Command with some required arguments
1244
  if min_count is not None and len(args) < min_count:
1245
    ToStderr("Error: Command %s expects at least %d argument(s)",
1246
             cmd, min_count)
1247
    return False
1248

    
1249
  return True
1250

    
1251

    
1252
def SplitNodeOption(value):
1253
  """Splits the value of a --node option.
1254

1255
  """
1256
  if value and ':' in value:
1257
    return value.split(':', 1)
1258
  else:
1259
    return (value, None)
1260

    
1261

    
1262
def CalculateOSNames(os_name, os_variants):
1263
  """Calculates all the names an OS can be called, according to its variants.
1264

1265
  @type os_name: string
1266
  @param os_name: base name of the os
1267
  @type os_variants: list or None
1268
  @param os_variants: list of supported variants
1269
  @rtype: list
1270
  @return: list of valid names
1271

1272
  """
1273
  if os_variants:
1274
    return ['%s+%s' % (os_name, v) for v in os_variants]
1275
  else:
1276
    return [os_name]
1277

    
1278

    
1279
def ParseFields(selected, default):
1280
  """Parses the values of "--field"-like options.
1281

1282
  @type selected: string or None
1283
  @param selected: User-selected options
1284
  @type default: list
1285
  @param default: Default fields
1286

1287
  """
1288
  if selected is None:
1289
    return default
1290

    
1291
  if selected.startswith("+"):
1292
    return default + selected[1:].split(",")
1293

    
1294
  return selected.split(",")
1295

    
1296

    
1297
UsesRPC = rpc.RunWithRPC
1298

    
1299

    
1300
def AskUser(text, choices=None):
1301
  """Ask the user a question.
1302

1303
  @param text: the question to ask
1304

1305
  @param choices: list with elements tuples (input_char, return_value,
1306
      description); if not given, it will default to: [('y', True,
1307
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1308
      note that the '?' char is reserved for help
1309

1310
  @return: one of the return values from the choices list; if input is
1311
      not possible (i.e. not running with a tty, we return the last
1312
      entry from the list
1313

1314
  """
1315
  if choices is None:
1316
    choices = [('y', True, 'Perform the operation'),
1317
               ('n', False, 'Do not perform the operation')]
1318
  if not choices or not isinstance(choices, list):
1319
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1320
  for entry in choices:
1321
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1322
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1323

    
1324
  answer = choices[-1][1]
1325
  new_text = []
1326
  for line in text.splitlines():
1327
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1328
  text = "\n".join(new_text)
1329
  try:
1330
    f = file("/dev/tty", "a+")
1331
  except IOError:
1332
    return answer
1333
  try:
1334
    chars = [entry[0] for entry in choices]
1335
    chars[-1] = "[%s]" % chars[-1]
1336
    chars.append('?')
1337
    maps = dict([(entry[0], entry[1]) for entry in choices])
1338
    while True:
1339
      f.write(text)
1340
      f.write('\n')
1341
      f.write("/".join(chars))
1342
      f.write(": ")
1343
      line = f.readline(2).strip().lower()
1344
      if line in maps:
1345
        answer = maps[line]
1346
        break
1347
      elif line == '?':
1348
        for entry in choices:
1349
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1350
        f.write("\n")
1351
        continue
1352
  finally:
1353
    f.close()
1354
  return answer
1355

    
1356

    
1357
class JobSubmittedException(Exception):
1358
  """Job was submitted, client should exit.
1359

1360
  This exception has one argument, the ID of the job that was
1361
  submitted. The handler should print this ID.
1362

1363
  This is not an error, just a structured way to exit from clients.
1364

1365
  """
1366

    
1367

    
1368
def SendJob(ops, cl=None):
1369
  """Function to submit an opcode without waiting for the results.
1370

1371
  @type ops: list
1372
  @param ops: list of opcodes
1373
  @type cl: luxi.Client
1374
  @param cl: the luxi client to use for communicating with the master;
1375
             if None, a new client will be created
1376

1377
  """
1378
  if cl is None:
1379
    cl = GetClient()
1380

    
1381
  job_id = cl.SubmitJob(ops)
1382

    
1383
  return job_id
1384

    
1385

    
1386
def GenericPollJob(job_id, cbs, report_cbs):
1387
  """Generic job-polling function.
1388

1389
  @type job_id: number
1390
  @param job_id: Job ID
1391
  @type cbs: Instance of L{JobPollCbBase}
1392
  @param cbs: Data callbacks
1393
  @type report_cbs: Instance of L{JobPollReportCbBase}
1394
  @param report_cbs: Reporting callbacks
1395

1396
  """
1397
  prev_job_info = None
1398
  prev_logmsg_serial = None
1399

    
1400
  status = None
1401

    
1402
  while True:
1403
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1404
                                      prev_logmsg_serial)
1405
    if not result:
1406
      # job not found, go away!
1407
      raise errors.JobLost("Job with id %s lost" % job_id)
1408

    
1409
    if result == constants.JOB_NOTCHANGED:
1410
      report_cbs.ReportNotChanged(job_id, status)
1411

    
1412
      # Wait again
1413
      continue
1414

    
1415
    # Split result, a tuple of (field values, log entries)
1416
    (job_info, log_entries) = result
1417
    (status, ) = job_info
1418

    
1419
    if log_entries:
1420
      for log_entry in log_entries:
1421
        (serial, timestamp, log_type, message) = log_entry
1422
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1423
                                    log_type, message)
1424
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1425

    
1426
    # TODO: Handle canceled and archived jobs
1427
    elif status in (constants.JOB_STATUS_SUCCESS,
1428
                    constants.JOB_STATUS_ERROR,
1429
                    constants.JOB_STATUS_CANCELING,
1430
                    constants.JOB_STATUS_CANCELED):
1431
      break
1432

    
1433
    prev_job_info = job_info
1434

    
1435
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1436
  if not jobs:
1437
    raise errors.JobLost("Job with id %s lost" % job_id)
1438

    
1439
  status, opstatus, result = jobs[0]
1440

    
1441
  if status == constants.JOB_STATUS_SUCCESS:
1442
    return result
1443

    
1444
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1445
    raise errors.OpExecError("Job was canceled")
1446

    
1447
  has_ok = False
1448
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1449
    if status == constants.OP_STATUS_SUCCESS:
1450
      has_ok = True
1451
    elif status == constants.OP_STATUS_ERROR:
1452
      errors.MaybeRaise(msg)
1453

    
1454
      if has_ok:
1455
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1456
                                 (idx, msg))
1457

    
1458
      raise errors.OpExecError(str(msg))
1459

    
1460
  # default failure mode
1461
  raise errors.OpExecError(result)
1462

    
1463

    
1464
class JobPollCbBase:
1465
  """Base class for L{GenericPollJob} callbacks.
1466

1467
  """
1468
  def __init__(self):
1469
    """Initializes this class.
1470

1471
    """
1472

    
1473
  def WaitForJobChangeOnce(self, job_id, fields,
1474
                           prev_job_info, prev_log_serial):
1475
    """Waits for changes on a job.
1476

1477
    """
1478
    raise NotImplementedError()
1479

    
1480
  def QueryJobs(self, job_ids, fields):
1481
    """Returns the selected fields for the selected job IDs.
1482

1483
    @type job_ids: list of numbers
1484
    @param job_ids: Job IDs
1485
    @type fields: list of strings
1486
    @param fields: Fields
1487

1488
    """
1489
    raise NotImplementedError()
1490

    
1491

    
1492
class JobPollReportCbBase:
1493
  """Base class for L{GenericPollJob} reporting callbacks.
1494

1495
  """
1496
  def __init__(self):
1497
    """Initializes this class.
1498

1499
    """
1500

    
1501
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1502
    """Handles a log message.
1503

1504
    """
1505
    raise NotImplementedError()
1506

    
1507
  def ReportNotChanged(self, job_id, status):
1508
    """Called for if a job hasn't changed in a while.
1509

1510
    @type job_id: number
1511
    @param job_id: Job ID
1512
    @type status: string or None
1513
    @param status: Job status if available
1514

1515
    """
1516
    raise NotImplementedError()
1517

    
1518

    
1519
class _LuxiJobPollCb(JobPollCbBase):
1520
  def __init__(self, cl):
1521
    """Initializes this class.
1522

1523
    """
1524
    JobPollCbBase.__init__(self)
1525
    self.cl = cl
1526

    
1527
  def WaitForJobChangeOnce(self, job_id, fields,
1528
                           prev_job_info, prev_log_serial):
1529
    """Waits for changes on a job.
1530

1531
    """
1532
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1533
                                        prev_job_info, prev_log_serial)
1534

    
1535
  def QueryJobs(self, job_ids, fields):
1536
    """Returns the selected fields for the selected job IDs.
1537

1538
    """
1539
    return self.cl.QueryJobs(job_ids, fields)
1540

    
1541

    
1542
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1543
  def __init__(self, feedback_fn):
1544
    """Initializes this class.
1545

1546
    """
1547
    JobPollReportCbBase.__init__(self)
1548

    
1549
    self.feedback_fn = feedback_fn
1550

    
1551
    assert callable(feedback_fn)
1552

    
1553
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1554
    """Handles a log message.
1555

1556
    """
1557
    self.feedback_fn((timestamp, log_type, log_msg))
1558

    
1559
  def ReportNotChanged(self, job_id, status):
1560
    """Called if a job hasn't changed in a while.
1561

1562
    """
1563
    # Ignore
1564

    
1565

    
1566
class StdioJobPollReportCb(JobPollReportCbBase):
1567
  def __init__(self):
1568
    """Initializes this class.
1569

1570
    """
1571
    JobPollReportCbBase.__init__(self)
1572

    
1573
    self.notified_queued = False
1574
    self.notified_waitlock = False
1575

    
1576
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1577
    """Handles a log message.
1578

1579
    """
1580
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1581
             FormatLogMessage(log_type, log_msg))
1582

    
1583
  def ReportNotChanged(self, job_id, status):
1584
    """Called if a job hasn't changed in a while.
1585

1586
    """
1587
    if status is None:
1588
      return
1589

    
1590
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1591
      ToStderr("Job %s is waiting in queue", job_id)
1592
      self.notified_queued = True
1593

    
1594
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1595
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1596
      self.notified_waitlock = True
1597

    
1598

    
1599
def FormatLogMessage(log_type, log_msg):
1600
  """Formats a job message according to its type.
1601

1602
  """
1603
  if log_type != constants.ELOG_MESSAGE:
1604
    log_msg = str(log_msg)
1605

    
1606
  return utils.SafeEncode(log_msg)
1607

    
1608

    
1609
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1610
  """Function to poll for the result of a job.
1611

1612
  @type job_id: job identified
1613
  @param job_id: the job to poll for results
1614
  @type cl: luxi.Client
1615
  @param cl: the luxi client to use for communicating with the master;
1616
             if None, a new client will be created
1617

1618
  """
1619
  if cl is None:
1620
    cl = GetClient()
1621

    
1622
  if reporter is None:
1623
    if feedback_fn:
1624
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1625
    else:
1626
      reporter = StdioJobPollReportCb()
1627
  elif feedback_fn:
1628
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1629

    
1630
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1631

    
1632

    
1633
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1634
  """Legacy function to submit an opcode.
1635

1636
  This is just a simple wrapper over the construction of the processor
1637
  instance. It should be extended to better handle feedback and
1638
  interaction functions.
1639

1640
  """
1641
  if cl is None:
1642
    cl = GetClient()
1643

    
1644
  SetGenericOpcodeOpts([op], opts)
1645

    
1646
  job_id = SendJob([op], cl=cl)
1647

    
1648
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1649
                       reporter=reporter)
1650

    
1651
  return op_results[0]
1652

    
1653

    
1654
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1655
  """Wrapper around SubmitOpCode or SendJob.
1656

1657
  This function will decide, based on the 'opts' parameter, whether to
1658
  submit and wait for the result of the opcode (and return it), or
1659
  whether to just send the job and print its identifier. It is used in
1660
  order to simplify the implementation of the '--submit' option.
1661

1662
  It will also process the opcodes if we're sending the via SendJob
1663
  (otherwise SubmitOpCode does it).
1664

1665
  """
1666
  if opts and opts.submit_only:
1667
    job = [op]
1668
    SetGenericOpcodeOpts(job, opts)
1669
    job_id = SendJob(job, cl=cl)
1670
    raise JobSubmittedException(job_id)
1671
  else:
1672
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1673

    
1674

    
1675
def SetGenericOpcodeOpts(opcode_list, options):
1676
  """Processor for generic options.
1677

1678
  This function updates the given opcodes based on generic command
1679
  line options (like debug, dry-run, etc.).
1680

1681
  @param opcode_list: list of opcodes
1682
  @param options: command line options or None
1683
  @return: None (in-place modification)
1684

1685
  """
1686
  if not options:
1687
    return
1688
  for op in opcode_list:
1689
    op.debug_level = options.debug
1690
    if hasattr(options, "dry_run"):
1691
      op.dry_run = options.dry_run
1692
    if getattr(options, "priority", None) is not None:
1693
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1694

    
1695

    
1696
def GetClient():
1697
  # TODO: Cache object?
1698
  try:
1699
    client = luxi.Client()
1700
  except luxi.NoMasterError:
1701
    ss = ssconf.SimpleStore()
1702

    
1703
    # Try to read ssconf file
1704
    try:
1705
      ss.GetMasterNode()
1706
    except errors.ConfigurationError:
1707
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1708
                                 " not part of a cluster")
1709

    
1710
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1711
    if master != myself:
1712
      raise errors.OpPrereqError("This is not the master node, please connect"
1713
                                 " to node '%s' and rerun the command" %
1714
                                 master)
1715
    raise
1716
  return client
1717

    
1718

    
1719
def FormatError(err):
1720
  """Return a formatted error message for a given error.
1721

1722
  This function takes an exception instance and returns a tuple
1723
  consisting of two values: first, the recommended exit code, and
1724
  second, a string describing the error message (not
1725
  newline-terminated).
1726

1727
  """
1728
  retcode = 1
1729
  obuf = StringIO()
1730
  msg = str(err)
1731
  if isinstance(err, errors.ConfigurationError):
1732
    txt = "Corrupt configuration file: %s" % msg
1733
    logging.error(txt)
1734
    obuf.write(txt + "\n")
1735
    obuf.write("Aborting.")
1736
    retcode = 2
1737
  elif isinstance(err, errors.HooksAbort):
1738
    obuf.write("Failure: hooks execution failed:\n")
1739
    for node, script, out in err.args[0]:
1740
      if out:
1741
        obuf.write("  node: %s, script: %s, output: %s\n" %
1742
                   (node, script, out))
1743
      else:
1744
        obuf.write("  node: %s, script: %s (no output)\n" %
1745
                   (node, script))
1746
  elif isinstance(err, errors.HooksFailure):
1747
    obuf.write("Failure: hooks general failure: %s" % msg)
1748
  elif isinstance(err, errors.ResolverError):
1749
    this_host = netutils.Hostname.GetSysName()
1750
    if err.args[0] == this_host:
1751
      msg = "Failure: can't resolve my own hostname ('%s')"
1752
    else:
1753
      msg = "Failure: can't resolve hostname '%s'"
1754
    obuf.write(msg % err.args[0])
1755
  elif isinstance(err, errors.OpPrereqError):
1756
    if len(err.args) == 2:
1757
      obuf.write("Failure: prerequisites not met for this"
1758
               " operation:\nerror type: %s, error details:\n%s" %
1759
                 (err.args[1], err.args[0]))
1760
    else:
1761
      obuf.write("Failure: prerequisites not met for this"
1762
                 " operation:\n%s" % msg)
1763
  elif isinstance(err, errors.OpExecError):
1764
    obuf.write("Failure: command execution error:\n%s" % msg)
1765
  elif isinstance(err, errors.TagError):
1766
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1767
  elif isinstance(err, errors.JobQueueDrainError):
1768
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1769
               " accept new requests\n")
1770
  elif isinstance(err, errors.JobQueueFull):
1771
    obuf.write("Failure: the job queue is full and doesn't accept new"
1772
               " job submissions until old jobs are archived\n")
1773
  elif isinstance(err, errors.TypeEnforcementError):
1774
    obuf.write("Parameter Error: %s" % msg)
1775
  elif isinstance(err, errors.ParameterError):
1776
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1777
  elif isinstance(err, luxi.NoMasterError):
1778
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1779
               " and listening for connections?")
1780
  elif isinstance(err, luxi.TimeoutError):
1781
    obuf.write("Timeout while talking to the master daemon. Error:\n"
1782
               "%s" % msg)
1783
  elif isinstance(err, luxi.PermissionError):
1784
    obuf.write("It seems you don't have permissions to connect to the"
1785
               " master daemon.\nPlease retry as a different user.")
1786
  elif isinstance(err, luxi.ProtocolError):
1787
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1788
               "%s" % msg)
1789
  elif isinstance(err, errors.JobLost):
1790
    obuf.write("Error checking job status: %s" % msg)
1791
  elif isinstance(err, errors.GenericError):
1792
    obuf.write("Unhandled Ganeti error: %s" % msg)
1793
  elif isinstance(err, JobSubmittedException):
1794
    obuf.write("JobID: %s\n" % err.args[0])
1795
    retcode = 0
1796
  else:
1797
    obuf.write("Unhandled exception: %s" % msg)
1798
  return retcode, obuf.getvalue().rstrip('\n')
1799

    
1800

    
1801
def GenericMain(commands, override=None, aliases=None):
1802
  """Generic main function for all the gnt-* commands.
1803

1804
  Arguments:
1805
    - commands: a dictionary with a special structure, see the design doc
1806
                for command line handling.
1807
    - override: if not None, we expect a dictionary with keys that will
1808
                override command line options; this can be used to pass
1809
                options from the scripts to generic functions
1810
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1811

1812
  """
1813
  # save the program name and the entire command line for later logging
1814
  if sys.argv:
1815
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1816
    if len(sys.argv) >= 2:
1817
      binary += " " + sys.argv[1]
1818
      old_cmdline = " ".join(sys.argv[2:])
1819
    else:
1820
      old_cmdline = ""
1821
  else:
1822
    binary = "<unknown program>"
1823
    old_cmdline = ""
1824

    
1825
  if aliases is None:
1826
    aliases = {}
1827

    
1828
  try:
1829
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1830
  except errors.ParameterError, err:
1831
    result, err_msg = FormatError(err)
1832
    ToStderr(err_msg)
1833
    return 1
1834

    
1835
  if func is None: # parse error
1836
    return 1
1837

    
1838
  if override is not None:
1839
    for key, val in override.iteritems():
1840
      setattr(options, key, val)
1841

    
1842
  utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1843
                     stderr_logging=True, program=binary)
1844

    
1845
  if old_cmdline:
1846
    logging.info("run with arguments '%s'", old_cmdline)
1847
  else:
1848
    logging.info("run with no arguments")
1849

    
1850
  try:
1851
    result = func(options, args)
1852
  except (errors.GenericError, luxi.ProtocolError,
1853
          JobSubmittedException), err:
1854
    result, err_msg = FormatError(err)
1855
    logging.exception("Error during command processing")
1856
    ToStderr(err_msg)
1857

    
1858
  return result
1859

    
1860

    
1861
def ParseNicOption(optvalue):
1862
  """Parses the value of the --net option(s).
1863

1864
  """
1865
  try:
1866
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1867
  except (TypeError, ValueError), err:
1868
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1869

    
1870
  nics = [{}] * nic_max
1871
  for nidx, ndict in optvalue:
1872
    nidx = int(nidx)
1873

    
1874
    if not isinstance(ndict, dict):
1875
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1876
                                 " got %s" % (nidx, ndict))
1877

    
1878
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1879

    
1880
    nics[nidx] = ndict
1881

    
1882
  return nics
1883

    
1884

    
1885
def GenericInstanceCreate(mode, opts, args):
1886
  """Add an instance to the cluster via either creation or import.
1887

1888
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1889
  @param opts: the command line options selected by the user
1890
  @type args: list
1891
  @param args: should contain only one element, the new instance name
1892
  @rtype: int
1893
  @return: the desired exit code
1894

1895
  """
1896
  instance = args[0]
1897

    
1898
  (pnode, snode) = SplitNodeOption(opts.node)
1899

    
1900
  hypervisor = None
1901
  hvparams = {}
1902
  if opts.hypervisor:
1903
    hypervisor, hvparams = opts.hypervisor
1904

    
1905
  if opts.nics:
1906
    nics = ParseNicOption(opts.nics)
1907
  elif opts.no_nics:
1908
    # no nics
1909
    nics = []
1910
  elif mode == constants.INSTANCE_CREATE:
1911
    # default of one nic, all auto
1912
    nics = [{}]
1913
  else:
1914
    # mode == import
1915
    nics = []
1916

    
1917
  if opts.disk_template == constants.DT_DISKLESS:
1918
    if opts.disks or opts.sd_size is not None:
1919
      raise errors.OpPrereqError("Diskless instance but disk"
1920
                                 " information passed")
1921
    disks = []
1922
  else:
1923
    if (not opts.disks and not opts.sd_size
1924
        and mode == constants.INSTANCE_CREATE):
1925
      raise errors.OpPrereqError("No disk information specified")
1926
    if opts.disks and opts.sd_size is not None:
1927
      raise errors.OpPrereqError("Please use either the '--disk' or"
1928
                                 " '-s' option")
1929
    if opts.sd_size is not None:
1930
      opts.disks = [(0, {"size": opts.sd_size})]
1931

    
1932
    if opts.disks:
1933
      try:
1934
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
1935
      except ValueError, err:
1936
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
1937
      disks = [{}] * disk_max
1938
    else:
1939
      disks = []
1940
    for didx, ddict in opts.disks:
1941
      didx = int(didx)
1942
      if not isinstance(ddict, dict):
1943
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
1944
        raise errors.OpPrereqError(msg)
1945
      elif "size" in ddict:
1946
        if "adopt" in ddict:
1947
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
1948
                                     " (disk %d)" % didx)
1949
        try:
1950
          ddict["size"] = utils.ParseUnit(ddict["size"])
1951
        except ValueError, err:
1952
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
1953
                                     (didx, err))
1954
      elif "adopt" in ddict:
1955
        if mode == constants.INSTANCE_IMPORT:
1956
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
1957
                                     " import")
1958
        ddict["size"] = 0
1959
      else:
1960
        raise errors.OpPrereqError("Missing size or adoption source for"
1961
                                   " disk %d" % didx)
1962
      disks[didx] = ddict
1963

    
1964
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
1965
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
1966

    
1967
  if mode == constants.INSTANCE_CREATE:
1968
    start = opts.start
1969
    os_type = opts.os
1970
    force_variant = opts.force_variant
1971
    src_node = None
1972
    src_path = None
1973
    no_install = opts.no_install
1974
    identify_defaults = False
1975
  elif mode == constants.INSTANCE_IMPORT:
1976
    start = False
1977
    os_type = None
1978
    force_variant = False
1979
    src_node = opts.src_node
1980
    src_path = opts.src_dir
1981
    no_install = None
1982
    identify_defaults = opts.identify_defaults
1983
  else:
1984
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
1985

    
1986
  op = opcodes.OpCreateInstance(instance_name=instance,
1987
                                disks=disks,
1988
                                disk_template=opts.disk_template,
1989
                                nics=nics,
1990
                                pnode=pnode, snode=snode,
1991
                                ip_check=opts.ip_check,
1992
                                name_check=opts.name_check,
1993
                                wait_for_sync=opts.wait_for_sync,
1994
                                file_storage_dir=opts.file_storage_dir,
1995
                                file_driver=opts.file_driver,
1996
                                iallocator=opts.iallocator,
1997
                                hypervisor=hypervisor,
1998
                                hvparams=hvparams,
1999
                                beparams=opts.beparams,
2000
                                osparams=opts.osparams,
2001
                                mode=mode,
2002
                                start=start,
2003
                                os_type=os_type,
2004
                                force_variant=force_variant,
2005
                                src_node=src_node,
2006
                                src_path=src_path,
2007
                                no_install=no_install,
2008
                                identify_defaults=identify_defaults)
2009

    
2010
  SubmitOrSend(op, opts)
2011
  return 0
2012

    
2013

    
2014
class _RunWhileClusterStoppedHelper:
2015
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2016

2017
  """
2018
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2019
    """Initializes this class.
2020

2021
    @type feedback_fn: callable
2022
    @param feedback_fn: Feedback function
2023
    @type cluster_name: string
2024
    @param cluster_name: Cluster name
2025
    @type master_node: string
2026
    @param master_node Master node name
2027
    @type online_nodes: list
2028
    @param online_nodes: List of names of online nodes
2029

2030
    """
2031
    self.feedback_fn = feedback_fn
2032
    self.cluster_name = cluster_name
2033
    self.master_node = master_node
2034
    self.online_nodes = online_nodes
2035

    
2036
    self.ssh = ssh.SshRunner(self.cluster_name)
2037

    
2038
    self.nonmaster_nodes = [name for name in online_nodes
2039
                            if name != master_node]
2040

    
2041
    assert self.master_node not in self.nonmaster_nodes
2042

    
2043
  def _RunCmd(self, node_name, cmd):
2044
    """Runs a command on the local or a remote machine.
2045

2046
    @type node_name: string
2047
    @param node_name: Machine name
2048
    @type cmd: list
2049
    @param cmd: Command
2050

2051
    """
2052
    if node_name is None or node_name == self.master_node:
2053
      # No need to use SSH
2054
      result = utils.RunCmd(cmd)
2055
    else:
2056
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2057

    
2058
    if result.failed:
2059
      errmsg = ["Failed to run command %s" % result.cmd]
2060
      if node_name:
2061
        errmsg.append("on node %s" % node_name)
2062
      errmsg.append(": exitcode %s and error %s" %
2063
                    (result.exit_code, result.output))
2064
      raise errors.OpExecError(" ".join(errmsg))
2065

    
2066
  def Call(self, fn, *args):
2067
    """Call function while all daemons are stopped.
2068

2069
    @type fn: callable
2070
    @param fn: Function to be called
2071

2072
    """
2073
    # Pause watcher by acquiring an exclusive lock on watcher state file
2074
    self.feedback_fn("Blocking watcher")
2075
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2076
    try:
2077
      # TODO: Currently, this just blocks. There's no timeout.
2078
      # TODO: Should it be a shared lock?
2079
      watcher_block.Exclusive(blocking=True)
2080

    
2081
      # Stop master daemons, so that no new jobs can come in and all running
2082
      # ones are finished
2083
      self.feedback_fn("Stopping master daemons")
2084
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2085
      try:
2086
        # Stop daemons on all nodes
2087
        for node_name in self.online_nodes:
2088
          self.feedback_fn("Stopping daemons on %s" % node_name)
2089
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2090

    
2091
        # All daemons are shut down now
2092
        try:
2093
          return fn(self, *args)
2094
        except Exception, err:
2095
          _, errmsg = FormatError(err)
2096
          logging.exception("Caught exception")
2097
          self.feedback_fn(errmsg)
2098
          raise
2099
      finally:
2100
        # Start cluster again, master node last
2101
        for node_name in self.nonmaster_nodes + [self.master_node]:
2102
          self.feedback_fn("Starting daemons on %s" % node_name)
2103
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2104
    finally:
2105
      # Resume watcher
2106
      watcher_block.Close()
2107

    
2108

    
2109
def RunWhileClusterStopped(feedback_fn, fn, *args):
2110
  """Calls a function while all cluster daemons are stopped.
2111

2112
  @type feedback_fn: callable
2113
  @param feedback_fn: Feedback function
2114
  @type fn: callable
2115
  @param fn: Function to be called when daemons are stopped
2116

2117
  """
2118
  feedback_fn("Gathering cluster information")
2119

    
2120
  # This ensures we're running on the master daemon
2121
  cl = GetClient()
2122

    
2123
  (cluster_name, master_node) = \
2124
    cl.QueryConfigValues(["cluster_name", "master_node"])
2125

    
2126
  online_nodes = GetOnlineNodes([], cl=cl)
2127

    
2128
  # Don't keep a reference to the client. The master daemon will go away.
2129
  del cl
2130

    
2131
  assert master_node in online_nodes
2132

    
2133
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2134
                                       online_nodes).Call(fn, *args)
2135

    
2136

    
2137
def GenerateTable(headers, fields, separator, data,
2138
                  numfields=None, unitfields=None,
2139
                  units=None):
2140
  """Prints a table with headers and different fields.
2141

2142
  @type headers: dict
2143
  @param headers: dictionary mapping field names to headers for
2144
      the table
2145
  @type fields: list
2146
  @param fields: the field names corresponding to each row in
2147
      the data field
2148
  @param separator: the separator to be used; if this is None,
2149
      the default 'smart' algorithm is used which computes optimal
2150
      field width, otherwise just the separator is used between
2151
      each field
2152
  @type data: list
2153
  @param data: a list of lists, each sublist being one row to be output
2154
  @type numfields: list
2155
  @param numfields: a list with the fields that hold numeric
2156
      values and thus should be right-aligned
2157
  @type unitfields: list
2158
  @param unitfields: a list with the fields that hold numeric
2159
      values that should be formatted with the units field
2160
  @type units: string or None
2161
  @param units: the units we should use for formatting, or None for
2162
      automatic choice (human-readable for non-separator usage, otherwise
2163
      megabytes); this is a one-letter string
2164

2165
  """
2166
  if units is None:
2167
    if separator:
2168
      units = "m"
2169
    else:
2170
      units = "h"
2171

    
2172
  if numfields is None:
2173
    numfields = []
2174
  if unitfields is None:
2175
    unitfields = []
2176

    
2177
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2178
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2179

    
2180
  format_fields = []
2181
  for field in fields:
2182
    if headers and field not in headers:
2183
      # TODO: handle better unknown fields (either revert to old
2184
      # style of raising exception, or deal more intelligently with
2185
      # variable fields)
2186
      headers[field] = field
2187
    if separator is not None:
2188
      format_fields.append("%s")
2189
    elif numfields.Matches(field):
2190
      format_fields.append("%*s")
2191
    else:
2192
      format_fields.append("%-*s")
2193

    
2194
  if separator is None:
2195
    mlens = [0 for name in fields]
2196
    format_str = ' '.join(format_fields)
2197
  else:
2198
    format_str = separator.replace("%", "%%").join(format_fields)
2199

    
2200
  for row in data:
2201
    if row is None:
2202
      continue
2203
    for idx, val in enumerate(row):
2204
      if unitfields.Matches(fields[idx]):
2205
        try:
2206
          val = int(val)
2207
        except (TypeError, ValueError):
2208
          pass
2209
        else:
2210
          val = row[idx] = utils.FormatUnit(val, units)
2211
      val = row[idx] = str(val)
2212
      if separator is None:
2213
        mlens[idx] = max(mlens[idx], len(val))
2214

    
2215
  result = []
2216
  if headers:
2217
    args = []
2218
    for idx, name in enumerate(fields):
2219
      hdr = headers[name]
2220
      if separator is None:
2221
        mlens[idx] = max(mlens[idx], len(hdr))
2222
        args.append(mlens[idx])
2223
      args.append(hdr)
2224
    result.append(format_str % tuple(args))
2225

    
2226
  if separator is None:
2227
    assert len(mlens) == len(fields)
2228

    
2229
    if fields and not numfields.Matches(fields[-1]):
2230
      mlens[-1] = 0
2231

    
2232
  for line in data:
2233
    args = []
2234
    if line is None:
2235
      line = ['-' for _ in fields]
2236
    for idx in range(len(fields)):
2237
      if separator is None:
2238
        args.append(mlens[idx])
2239
      args.append(line[idx])
2240
    result.append(format_str % tuple(args))
2241

    
2242
  return result
2243

    
2244

    
2245
def FormatTimestamp(ts):
2246
  """Formats a given timestamp.
2247

2248
  @type ts: timestamp
2249
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2250

2251
  @rtype: string
2252
  @return: a string with the formatted timestamp
2253

2254
  """
2255
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2256
    return '?'
2257
  sec, usec = ts
2258
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2259

    
2260

    
2261
def ParseTimespec(value):
2262
  """Parse a time specification.
2263

2264
  The following suffixed will be recognized:
2265

2266
    - s: seconds
2267
    - m: minutes
2268
    - h: hours
2269
    - d: day
2270
    - w: weeks
2271

2272
  Without any suffix, the value will be taken to be in seconds.
2273

2274
  """
2275
  value = str(value)
2276
  if not value:
2277
    raise errors.OpPrereqError("Empty time specification passed")
2278
  suffix_map = {
2279
    's': 1,
2280
    'm': 60,
2281
    'h': 3600,
2282
    'd': 86400,
2283
    'w': 604800,
2284
    }
2285
  if value[-1] not in suffix_map:
2286
    try:
2287
      value = int(value)
2288
    except (TypeError, ValueError):
2289
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2290
  else:
2291
    multiplier = suffix_map[value[-1]]
2292
    value = value[:-1]
2293
    if not value: # no data left after stripping the suffix
2294
      raise errors.OpPrereqError("Invalid time specification (only"
2295
                                 " suffix passed)")
2296
    try:
2297
      value = int(value) * multiplier
2298
    except (TypeError, ValueError):
2299
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2300
  return value
2301

    
2302

    
2303
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2304
                   filter_master=False):
2305
  """Returns the names of online nodes.
2306

2307
  This function will also log a warning on stderr with the names of
2308
  the online nodes.
2309

2310
  @param nodes: if not empty, use only this subset of nodes (minus the
2311
      offline ones)
2312
  @param cl: if not None, luxi client to use
2313
  @type nowarn: boolean
2314
  @param nowarn: by default, this function will output a note with the
2315
      offline nodes that are skipped; if this parameter is True the
2316
      note is not displayed
2317
  @type secondary_ips: boolean
2318
  @param secondary_ips: if True, return the secondary IPs instead of the
2319
      names, useful for doing network traffic over the replication interface
2320
      (if any)
2321
  @type filter_master: boolean
2322
  @param filter_master: if True, do not return the master node in the list
2323
      (useful in coordination with secondary_ips where we cannot check our
2324
      node name against the list)
2325

2326
  """
2327
  if cl is None:
2328
    cl = GetClient()
2329

    
2330
  if secondary_ips:
2331
    name_idx = 2
2332
  else:
2333
    name_idx = 0
2334

    
2335
  if filter_master:
2336
    master_node = cl.QueryConfigValues(["master_node"])[0]
2337
    filter_fn = lambda x: x != master_node
2338
  else:
2339
    filter_fn = lambda _: True
2340

    
2341
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2342
                         use_locking=False)
2343
  offline = [row[0] for row in result if row[1]]
2344
  if offline and not nowarn:
2345
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2346
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2347

    
2348

    
2349
def _ToStream(stream, txt, *args):
2350
  """Write a message to a stream, bypassing the logging system
2351

2352
  @type stream: file object
2353
  @param stream: the file to which we should write
2354
  @type txt: str
2355
  @param txt: the message
2356

2357
  """
2358
  if args:
2359
    args = tuple(args)
2360
    stream.write(txt % args)
2361
  else:
2362
    stream.write(txt)
2363
  stream.write('\n')
2364
  stream.flush()
2365

    
2366

    
2367
def ToStdout(txt, *args):
2368
  """Write a message to stdout only, bypassing the logging system
2369

2370
  This is just a wrapper over _ToStream.
2371

2372
  @type txt: str
2373
  @param txt: the message
2374

2375
  """
2376
  _ToStream(sys.stdout, txt, *args)
2377

    
2378

    
2379
def ToStderr(txt, *args):
2380
  """Write a message to stderr only, bypassing the logging system
2381

2382
  This is just a wrapper over _ToStream.
2383

2384
  @type txt: str
2385
  @param txt: the message
2386

2387
  """
2388
  _ToStream(sys.stderr, txt, *args)
2389

    
2390

    
2391
class JobExecutor(object):
2392
  """Class which manages the submission and execution of multiple jobs.
2393

2394
  Note that instances of this class should not be reused between
2395
  GetResults() calls.
2396

2397
  """
2398
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2399
    self.queue = []
2400
    if cl is None:
2401
      cl = GetClient()
2402
    self.cl = cl
2403
    self.verbose = verbose
2404
    self.jobs = []
2405
    self.opts = opts
2406
    self.feedback_fn = feedback_fn
2407

    
2408
  def QueueJob(self, name, *ops):
2409
    """Record a job for later submit.
2410

2411
    @type name: string
2412
    @param name: a description of the job, will be used in WaitJobSet
2413
    """
2414
    SetGenericOpcodeOpts(ops, self.opts)
2415
    self.queue.append((name, ops))
2416

    
2417
  def SubmitPending(self, each=False):
2418
    """Submit all pending jobs.
2419

2420
    """
2421
    if each:
2422
      results = []
2423
      for row in self.queue:
2424
        # SubmitJob will remove the success status, but raise an exception if
2425
        # the submission fails, so we'll notice that anyway.
2426
        results.append([True, self.cl.SubmitJob(row[1])])
2427
    else:
2428
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2429
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2430
                                                            self.queue)):
2431
      self.jobs.append((idx, status, data, name))
2432

    
2433
  def _ChooseJob(self):
2434
    """Choose a non-waiting/queued job to poll next.
2435

2436
    """
2437
    assert self.jobs, "_ChooseJob called with empty job list"
2438

    
2439
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2440
    assert result
2441

    
2442
    for job_data, status in zip(self.jobs, result):
2443
      if (isinstance(status, list) and status and
2444
          status[0] in (constants.JOB_STATUS_QUEUED,
2445
                        constants.JOB_STATUS_WAITLOCK,
2446
                        constants.JOB_STATUS_CANCELING)):
2447
        # job is still present and waiting
2448
        continue
2449
      # good candidate found (either running job or lost job)
2450
      self.jobs.remove(job_data)
2451
      return job_data
2452

    
2453
    # no job found
2454
    return self.jobs.pop(0)
2455

    
2456
  def GetResults(self):
2457
    """Wait for and return the results of all jobs.
2458

2459
    @rtype: list
2460
    @return: list of tuples (success, job results), in the same order
2461
        as the submitted jobs; if a job has failed, instead of the result
2462
        there will be the error message
2463

2464
    """
2465
    if not self.jobs:
2466
      self.SubmitPending()
2467
    results = []
2468
    if self.verbose:
2469
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2470
      if ok_jobs:
2471
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2472

    
2473
    # first, remove any non-submitted jobs
2474
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2475
    for idx, _, jid, name in failures:
2476
      ToStderr("Failed to submit job for %s: %s", name, jid)
2477
      results.append((idx, False, jid))
2478

    
2479
    while self.jobs:
2480
      (idx, _, jid, name) = self._ChooseJob()
2481
      ToStdout("Waiting for job %s for %s...", jid, name)
2482
      try:
2483
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2484
        success = True
2485
      except errors.JobLost, err:
2486
        _, job_result = FormatError(err)
2487
        ToStderr("Job %s for %s has been archived, cannot check its result",
2488
                 jid, name)
2489
        success = False
2490
      except (errors.GenericError, luxi.ProtocolError), err:
2491
        _, job_result = FormatError(err)
2492
        success = False
2493
        # the error message will always be shown, verbose or not
2494
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2495

    
2496
      results.append((idx, success, job_result))
2497

    
2498
    # sort based on the index, then drop it
2499
    results.sort()
2500
    results = [i[1:] for i in results]
2501

    
2502
    return results
2503

    
2504
  def WaitOrShow(self, wait):
2505
    """Wait for job results or only print the job IDs.
2506

2507
    @type wait: boolean
2508
    @param wait: whether to wait or not
2509

2510
    """
2511
    if wait:
2512
      return self.GetResults()
2513
    else:
2514
      if not self.jobs:
2515
        self.SubmitPending()
2516
      for _, status, result, name in self.jobs:
2517
        if status:
2518
          ToStdout("%s: %s", result, name)
2519
        else:
2520
          ToStderr("Failure for %s: %s", name, result)
2521
      return [row[1:3] for row in self.jobs]