root / lib / cli.py @ f4484122
History | View | Annotate | Download (59.5 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Module dealing with command line parsing"""
|
23 |
|
24 |
|
25 |
import sys |
26 |
import textwrap |
27 |
import os.path |
28 |
import time |
29 |
import logging |
30 |
from cStringIO import StringIO |
31 |
|
32 |
from ganeti import utils |
33 |
from ganeti import errors |
34 |
from ganeti import constants |
35 |
from ganeti import opcodes |
36 |
from ganeti import luxi |
37 |
from ganeti import ssconf |
38 |
from ganeti import rpc |
39 |
|
40 |
from optparse import (OptionParser, TitledHelpFormatter, |
41 |
Option, OptionValueError) |
42 |
|
43 |
|
44 |
__all__ = [ |
45 |
# Command line options
|
46 |
"ALLOCATABLE_OPT",
|
47 |
"ALL_OPT",
|
48 |
"AUTO_REPLACE_OPT",
|
49 |
"BACKEND_OPT",
|
50 |
"CLEANUP_OPT",
|
51 |
"CONFIRM_OPT",
|
52 |
"CP_SIZE_OPT",
|
53 |
"DEBUG_OPT",
|
54 |
"DEBUG_SIMERR_OPT",
|
55 |
"DISKIDX_OPT",
|
56 |
"DISK_OPT",
|
57 |
"DISK_TEMPLATE_OPT",
|
58 |
"DRAINED_OPT",
|
59 |
"EARLY_RELEASE_OPT",
|
60 |
"ENABLED_HV_OPT",
|
61 |
"ERROR_CODES_OPT",
|
62 |
"FIELDS_OPT",
|
63 |
"FILESTORE_DIR_OPT",
|
64 |
"FILESTORE_DRIVER_OPT",
|
65 |
"FORCE_OPT",
|
66 |
"FORCE_VARIANT_OPT",
|
67 |
"GLOBAL_FILEDIR_OPT",
|
68 |
"HVLIST_OPT",
|
69 |
"HVOPTS_OPT",
|
70 |
"HYPERVISOR_OPT",
|
71 |
"IALLOCATOR_OPT",
|
72 |
"IGNORE_CONSIST_OPT",
|
73 |
"IGNORE_FAILURES_OPT",
|
74 |
"IGNORE_SECONDARIES_OPT",
|
75 |
"IGNORE_SIZE_OPT",
|
76 |
"MAC_PREFIX_OPT",
|
77 |
"MASTER_NETDEV_OPT",
|
78 |
"MC_OPT",
|
79 |
"NET_OPT",
|
80 |
"NEW_SECONDARY_OPT",
|
81 |
"NIC_PARAMS_OPT",
|
82 |
"NODE_LIST_OPT",
|
83 |
"NODE_PLACEMENT_OPT",
|
84 |
"NOHDR_OPT",
|
85 |
"NOIPCHECK_OPT",
|
86 |
"NONAMECHECK_OPT",
|
87 |
"NOLVM_STORAGE_OPT",
|
88 |
"NOMODIFY_ETCHOSTS_OPT",
|
89 |
"NOMODIFY_SSH_SETUP_OPT",
|
90 |
"NONICS_OPT",
|
91 |
"NONLIVE_OPT",
|
92 |
"NONPLUS1_OPT",
|
93 |
"NOSHUTDOWN_OPT",
|
94 |
"NOSTART_OPT",
|
95 |
"NOSSH_KEYCHECK_OPT",
|
96 |
"NOVOTING_OPT",
|
97 |
"NWSYNC_OPT",
|
98 |
"ON_PRIMARY_OPT",
|
99 |
"ON_SECONDARY_OPT",
|
100 |
"OFFLINE_OPT",
|
101 |
"OS_OPT",
|
102 |
"OS_SIZE_OPT",
|
103 |
"READD_OPT",
|
104 |
"REBOOT_TYPE_OPT",
|
105 |
"SECONDARY_IP_OPT",
|
106 |
"SELECT_OS_OPT",
|
107 |
"SEP_OPT",
|
108 |
"SHOWCMD_OPT",
|
109 |
"SHUTDOWN_TIMEOUT_OPT",
|
110 |
"SINGLE_NODE_OPT",
|
111 |
"SRC_DIR_OPT",
|
112 |
"SRC_NODE_OPT",
|
113 |
"SUBMIT_OPT",
|
114 |
"STATIC_OPT",
|
115 |
"SYNC_OPT",
|
116 |
"TAG_SRC_OPT",
|
117 |
"TIMEOUT_OPT",
|
118 |
"USEUNITS_OPT",
|
119 |
"VERBOSE_OPT",
|
120 |
"VG_NAME_OPT",
|
121 |
"YES_DOIT_OPT",
|
122 |
# Generic functions for CLI programs
|
123 |
"GenericMain",
|
124 |
"GenericInstanceCreate",
|
125 |
"GetClient",
|
126 |
"GetOnlineNodes",
|
127 |
"JobExecutor",
|
128 |
"JobSubmittedException",
|
129 |
"ParseTimespec",
|
130 |
"SubmitOpCode",
|
131 |
"SubmitOrSend",
|
132 |
"UsesRPC",
|
133 |
# Formatting functions
|
134 |
"ToStderr", "ToStdout", |
135 |
"FormatError",
|
136 |
"GenerateTable",
|
137 |
"AskUser",
|
138 |
"FormatTimestamp",
|
139 |
# Tags functions
|
140 |
"ListTags",
|
141 |
"AddTags",
|
142 |
"RemoveTags",
|
143 |
# command line options support infrastructure
|
144 |
"ARGS_MANY_INSTANCES",
|
145 |
"ARGS_MANY_NODES",
|
146 |
"ARGS_NONE",
|
147 |
"ARGS_ONE_INSTANCE",
|
148 |
"ARGS_ONE_NODE",
|
149 |
"ArgChoice",
|
150 |
"ArgCommand",
|
151 |
"ArgFile",
|
152 |
"ArgHost",
|
153 |
"ArgInstance",
|
154 |
"ArgJobId",
|
155 |
"ArgNode",
|
156 |
"ArgSuggest",
|
157 |
"ArgUnknown",
|
158 |
"OPT_COMPL_INST_ADD_NODES",
|
159 |
"OPT_COMPL_MANY_NODES",
|
160 |
"OPT_COMPL_ONE_IALLOCATOR",
|
161 |
"OPT_COMPL_ONE_INSTANCE",
|
162 |
"OPT_COMPL_ONE_NODE",
|
163 |
"OPT_COMPL_ONE_OS",
|
164 |
"cli_option",
|
165 |
"SplitNodeOption",
|
166 |
"CalculateOSNames",
|
167 |
] |
168 |
|
169 |
NO_PREFIX = "no_"
|
170 |
UN_PREFIX = "-"
|
171 |
|
172 |
|
173 |
class _Argument: |
174 |
def __init__(self, min=0, max=None): # pylint: disable-msg=W0622 |
175 |
self.min = min |
176 |
self.max = max |
177 |
|
178 |
def __repr__(self): |
179 |
return ("<%s min=%s max=%s>" % |
180 |
(self.__class__.__name__, self.min, self.max)) |
181 |
|
182 |
|
183 |
class ArgSuggest(_Argument): |
184 |
"""Suggesting argument.
|
185 |
|
186 |
Value can be any of the ones passed to the constructor.
|
187 |
|
188 |
"""
|
189 |
# pylint: disable-msg=W0622
|
190 |
def __init__(self, min=0, max=None, choices=None): |
191 |
_Argument.__init__(self, min=min, max=max) |
192 |
self.choices = choices
|
193 |
|
194 |
def __repr__(self): |
195 |
return ("<%s min=%s max=%s choices=%r>" % |
196 |
(self.__class__.__name__, self.min, self.max, self.choices)) |
197 |
|
198 |
|
199 |
class ArgChoice(ArgSuggest): |
200 |
"""Choice argument.
|
201 |
|
202 |
Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
|
203 |
but value must be one of the choices.
|
204 |
|
205 |
"""
|
206 |
|
207 |
|
208 |
class ArgUnknown(_Argument): |
209 |
"""Unknown argument to program (e.g. determined at runtime).
|
210 |
|
211 |
"""
|
212 |
|
213 |
|
214 |
class ArgInstance(_Argument): |
215 |
"""Instances argument.
|
216 |
|
217 |
"""
|
218 |
|
219 |
|
220 |
class ArgNode(_Argument): |
221 |
"""Node argument.
|
222 |
|
223 |
"""
|
224 |
|
225 |
class ArgJobId(_Argument): |
226 |
"""Job ID argument.
|
227 |
|
228 |
"""
|
229 |
|
230 |
|
231 |
class ArgFile(_Argument): |
232 |
"""File path argument.
|
233 |
|
234 |
"""
|
235 |
|
236 |
|
237 |
class ArgCommand(_Argument): |
238 |
"""Command argument.
|
239 |
|
240 |
"""
|
241 |
|
242 |
|
243 |
class ArgHost(_Argument): |
244 |
"""Host argument.
|
245 |
|
246 |
"""
|
247 |
|
248 |
|
249 |
ARGS_NONE = [] |
250 |
ARGS_MANY_INSTANCES = [ArgInstance()] |
251 |
ARGS_MANY_NODES = [ArgNode()] |
252 |
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)] |
253 |
ARGS_ONE_NODE = [ArgNode(min=1, max=1)] |
254 |
|
255 |
|
256 |
def _ExtractTagsObject(opts, args): |
257 |
"""Extract the tag type object.
|
258 |
|
259 |
Note that this function will modify its args parameter.
|
260 |
|
261 |
"""
|
262 |
if not hasattr(opts, "tag_type"): |
263 |
raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject") |
264 |
kind = opts.tag_type |
265 |
if kind == constants.TAG_CLUSTER:
|
266 |
retval = kind, kind |
267 |
elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE: |
268 |
if not args: |
269 |
raise errors.OpPrereqError("no arguments passed to the command") |
270 |
name = args.pop(0)
|
271 |
retval = kind, name |
272 |
else:
|
273 |
raise errors.ProgrammerError("Unhandled tag type '%s'" % kind) |
274 |
return retval
|
275 |
|
276 |
|
277 |
def _ExtendTags(opts, args): |
278 |
"""Extend the args if a source file has been given.
|
279 |
|
280 |
This function will extend the tags with the contents of the file
|
281 |
passed in the 'tags_source' attribute of the opts parameter. A file
|
282 |
named '-' will be replaced by stdin.
|
283 |
|
284 |
"""
|
285 |
fname = opts.tags_source |
286 |
if fname is None: |
287 |
return
|
288 |
if fname == "-": |
289 |
new_fh = sys.stdin |
290 |
else:
|
291 |
new_fh = open(fname, "r") |
292 |
new_data = [] |
293 |
try:
|
294 |
# we don't use the nice 'new_data = [line.strip() for line in fh]'
|
295 |
# because of python bug 1633941
|
296 |
while True: |
297 |
line = new_fh.readline() |
298 |
if not line: |
299 |
break
|
300 |
new_data.append(line.strip()) |
301 |
finally:
|
302 |
new_fh.close() |
303 |
args.extend(new_data) |
304 |
|
305 |
|
306 |
def ListTags(opts, args): |
307 |
"""List the tags on a given object.
|
308 |
|
309 |
This is a generic implementation that knows how to deal with all
|
310 |
three cases of tag objects (cluster, node, instance). The opts
|
311 |
argument is expected to contain a tag_type field denoting what
|
312 |
object type we work on.
|
313 |
|
314 |
"""
|
315 |
kind, name = _ExtractTagsObject(opts, args) |
316 |
cl = GetClient() |
317 |
result = cl.QueryTags(kind, name) |
318 |
result = list(result)
|
319 |
result.sort() |
320 |
for tag in result: |
321 |
ToStdout(tag) |
322 |
|
323 |
|
324 |
def AddTags(opts, args): |
325 |
"""Add tags on a given object.
|
326 |
|
327 |
This is a generic implementation that knows how to deal with all
|
328 |
three cases of tag objects (cluster, node, instance). The opts
|
329 |
argument is expected to contain a tag_type field denoting what
|
330 |
object type we work on.
|
331 |
|
332 |
"""
|
333 |
kind, name = _ExtractTagsObject(opts, args) |
334 |
_ExtendTags(opts, args) |
335 |
if not args: |
336 |
raise errors.OpPrereqError("No tags to be added") |
337 |
op = opcodes.OpAddTags(kind=kind, name=name, tags=args) |
338 |
SubmitOpCode(op) |
339 |
|
340 |
|
341 |
def RemoveTags(opts, args): |
342 |
"""Remove tags from a given object.
|
343 |
|
344 |
This is a generic implementation that knows how to deal with all
|
345 |
three cases of tag objects (cluster, node, instance). The opts
|
346 |
argument is expected to contain a tag_type field denoting what
|
347 |
object type we work on.
|
348 |
|
349 |
"""
|
350 |
kind, name = _ExtractTagsObject(opts, args) |
351 |
_ExtendTags(opts, args) |
352 |
if not args: |
353 |
raise errors.OpPrereqError("No tags to be removed") |
354 |
op = opcodes.OpDelTags(kind=kind, name=name, tags=args) |
355 |
SubmitOpCode(op) |
356 |
|
357 |
|
358 |
def check_unit(option, opt, value): # pylint: disable-msg=W0613 |
359 |
"""OptParsers custom converter for units.
|
360 |
|
361 |
"""
|
362 |
try:
|
363 |
return utils.ParseUnit(value)
|
364 |
except errors.UnitParseError, err:
|
365 |
raise OptionValueError("option %s: %s" % (opt, err)) |
366 |
|
367 |
|
368 |
def _SplitKeyVal(opt, data): |
369 |
"""Convert a KeyVal string into a dict.
|
370 |
|
371 |
This function will convert a key=val[,...] string into a dict. Empty
|
372 |
values will be converted specially: keys which have the prefix 'no_'
|
373 |
will have the value=False and the prefix stripped, the others will
|
374 |
have value=True.
|
375 |
|
376 |
@type opt: string
|
377 |
@param opt: a string holding the option name for which we process the
|
378 |
data, used in building error messages
|
379 |
@type data: string
|
380 |
@param data: a string of the format key=val,key=val,...
|
381 |
@rtype: dict
|
382 |
@return: {key=val, key=val}
|
383 |
@raises errors.ParameterError: if there are duplicate keys
|
384 |
|
385 |
"""
|
386 |
kv_dict = {} |
387 |
if data:
|
388 |
for elem in utils.UnescapeAndSplit(data, sep=","): |
389 |
if "=" in elem: |
390 |
key, val = elem.split("=", 1) |
391 |
else:
|
392 |
if elem.startswith(NO_PREFIX):
|
393 |
key, val = elem[len(NO_PREFIX):], False |
394 |
elif elem.startswith(UN_PREFIX):
|
395 |
key, val = elem[len(UN_PREFIX):], None |
396 |
else:
|
397 |
key, val = elem, True
|
398 |
if key in kv_dict: |
399 |
raise errors.ParameterError("Duplicate key '%s' in option %s" % |
400 |
(key, opt)) |
401 |
kv_dict[key] = val |
402 |
return kv_dict
|
403 |
|
404 |
|
405 |
def check_ident_key_val(option, opt, value): # pylint: disable-msg=W0613 |
406 |
"""Custom parser for ident:key=val,key=val options.
|
407 |
|
408 |
This will store the parsed values as a tuple (ident, {key: val}). As such,
|
409 |
multiple uses of this option via action=append is possible.
|
410 |
|
411 |
"""
|
412 |
if ":" not in value: |
413 |
ident, rest = value, ''
|
414 |
else:
|
415 |
ident, rest = value.split(":", 1) |
416 |
|
417 |
if ident.startswith(NO_PREFIX):
|
418 |
if rest:
|
419 |
msg = "Cannot pass options when removing parameter groups: %s" % value
|
420 |
raise errors.ParameterError(msg)
|
421 |
retval = (ident[len(NO_PREFIX):], False) |
422 |
elif ident.startswith(UN_PREFIX):
|
423 |
if rest:
|
424 |
msg = "Cannot pass options when removing parameter groups: %s" % value
|
425 |
raise errors.ParameterError(msg)
|
426 |
retval = (ident[len(UN_PREFIX):], None) |
427 |
else:
|
428 |
kv_dict = _SplitKeyVal(opt, rest) |
429 |
retval = (ident, kv_dict) |
430 |
return retval
|
431 |
|
432 |
|
433 |
def check_key_val(option, opt, value): # pylint: disable-msg=W0613 |
434 |
"""Custom parser class for key=val,key=val options.
|
435 |
|
436 |
This will store the parsed values as a dict {key: val}.
|
437 |
|
438 |
"""
|
439 |
return _SplitKeyVal(opt, value)
|
440 |
|
441 |
|
442 |
# completion_suggestion is normally a list. Using numeric values not evaluating
|
443 |
# to False for dynamic completion.
|
444 |
(OPT_COMPL_MANY_NODES, |
445 |
OPT_COMPL_ONE_NODE, |
446 |
OPT_COMPL_ONE_INSTANCE, |
447 |
OPT_COMPL_ONE_OS, |
448 |
OPT_COMPL_ONE_IALLOCATOR, |
449 |
OPT_COMPL_INST_ADD_NODES) = range(100, 106) |
450 |
|
451 |
OPT_COMPL_ALL = frozenset([
|
452 |
OPT_COMPL_MANY_NODES, |
453 |
OPT_COMPL_ONE_NODE, |
454 |
OPT_COMPL_ONE_INSTANCE, |
455 |
OPT_COMPL_ONE_OS, |
456 |
OPT_COMPL_ONE_IALLOCATOR, |
457 |
OPT_COMPL_INST_ADD_NODES, |
458 |
]) |
459 |
|
460 |
|
461 |
class CliOption(Option): |
462 |
"""Custom option class for optparse.
|
463 |
|
464 |
"""
|
465 |
ATTRS = Option.ATTRS + [ |
466 |
"completion_suggest",
|
467 |
] |
468 |
TYPES = Option.TYPES + ( |
469 |
"identkeyval",
|
470 |
"keyval",
|
471 |
"unit",
|
472 |
) |
473 |
TYPE_CHECKER = Option.TYPE_CHECKER.copy() |
474 |
TYPE_CHECKER["identkeyval"] = check_ident_key_val
|
475 |
TYPE_CHECKER["keyval"] = check_key_val
|
476 |
TYPE_CHECKER["unit"] = check_unit
|
477 |
|
478 |
|
479 |
# optparse.py sets make_option, so we do it for our own option class, too
|
480 |
cli_option = CliOption |
481 |
|
482 |
|
483 |
_YESNO = ("yes", "no") |
484 |
_YORNO = "yes|no"
|
485 |
|
486 |
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count", |
487 |
help="Increase debugging level")
|
488 |
|
489 |
NOHDR_OPT = cli_option("--no-headers", default=False, |
490 |
action="store_true", dest="no_headers", |
491 |
help="Don't display column headers")
|
492 |
|
493 |
SEP_OPT = cli_option("--separator", default=None, |
494 |
action="store", dest="separator", |
495 |
help=("Separator between output fields"
|
496 |
" (defaults to one space)"))
|
497 |
|
498 |
USEUNITS_OPT = cli_option("--units", default=None, |
499 |
dest="units", choices=('h', 'm', 'g', 't'), |
500 |
help="Specify units for output (one of hmgt)")
|
501 |
|
502 |
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store", |
503 |
type="string", metavar="FIELDS", |
504 |
help="Comma separated list of output fields")
|
505 |
|
506 |
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true", |
507 |
default=False, help="Force the operation") |
508 |
|
509 |
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true", |
510 |
default=False, help="Do not require confirmation") |
511 |
|
512 |
TAG_SRC_OPT = cli_option("--from", dest="tags_source", |
513 |
default=None, help="File with tag names") |
514 |
|
515 |
SUBMIT_OPT = cli_option("--submit", dest="submit_only", |
516 |
default=False, action="store_true", |
517 |
help=("Submit the job and return the job ID, but"
|
518 |
" don't wait for the job to finish"))
|
519 |
|
520 |
SYNC_OPT = cli_option("--sync", dest="do_locking", |
521 |
default=False, action="store_true", |
522 |
help=("Grab locks while doing the queries"
|
523 |
" in order to ensure more consistent results"))
|
524 |
|
525 |
_DRY_RUN_OPT = cli_option("--dry-run", default=False, |
526 |
action="store_true",
|
527 |
help=("Do not execute the operation, just run the"
|
528 |
" check steps and verify it it could be"
|
529 |
" executed"))
|
530 |
|
531 |
VERBOSE_OPT = cli_option("-v", "--verbose", default=False, |
532 |
action="store_true",
|
533 |
help="Increase the verbosity of the operation")
|
534 |
|
535 |
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False, |
536 |
action="store_true", dest="simulate_errors", |
537 |
help="Debugging option that makes the operation"
|
538 |
" treat most runtime checks as failed")
|
539 |
|
540 |
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync", |
541 |
default=True, action="store_false", |
542 |
help="Don't wait for sync (DANGEROUS!)")
|
543 |
|
544 |
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template", |
545 |
help="Custom disk setup (diskless, file,"
|
546 |
" plain or drbd)",
|
547 |
default=None, metavar="TEMPL", |
548 |
choices=list(constants.DISK_TEMPLATES))
|
549 |
|
550 |
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true", |
551 |
help="Do not create any network cards for"
|
552 |
" the instance")
|
553 |
|
554 |
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir", |
555 |
help="Relative path under default cluster-wide"
|
556 |
" file storage dir to store file-based disks",
|
557 |
default=None, metavar="<DIR>") |
558 |
|
559 |
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver", |
560 |
help="Driver to use for image files",
|
561 |
default="loop", metavar="<DRIVER>", |
562 |
choices=list(constants.FILE_DRIVER))
|
563 |
|
564 |
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>", |
565 |
help="Select nodes for the instance automatically"
|
566 |
" using the <NAME> iallocator plugin",
|
567 |
default=None, type="string", |
568 |
completion_suggest=OPT_COMPL_ONE_IALLOCATOR) |
569 |
|
570 |
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run", |
571 |
metavar="<os>",
|
572 |
completion_suggest=OPT_COMPL_ONE_OS) |
573 |
|
574 |
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant", |
575 |
action="store_true", default=False, |
576 |
help="Force an unknown variant")
|
577 |
|
578 |
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams", |
579 |
type="keyval", default={},
|
580 |
help="Backend parameters")
|
581 |
|
582 |
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval", |
583 |
default={}, dest="hvparams",
|
584 |
help="Hypervisor parameters")
|
585 |
|
586 |
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor", |
587 |
help="Hypervisor and hypervisor options, in the"
|
588 |
" format hypervisor:option=value,option=value,...",
|
589 |
default=None, type="identkeyval") |
590 |
|
591 |
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams", |
592 |
help="Hypervisor and hypervisor options, in the"
|
593 |
" format hypervisor:option=value,option=value,...",
|
594 |
default=[], action="append", type="identkeyval") |
595 |
|
596 |
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True, |
597 |
action="store_false",
|
598 |
help="Don't check that the instance's IP"
|
599 |
" is alive")
|
600 |
|
601 |
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check", |
602 |
default=True, action="store_false", |
603 |
help="Don't check that the instance's name"
|
604 |
" is resolvable")
|
605 |
|
606 |
NET_OPT = cli_option("--net",
|
607 |
help="NIC parameters", default=[],
|
608 |
dest="nics", action="append", type="identkeyval") |
609 |
|
610 |
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[], |
611 |
dest="disks", action="append", type="identkeyval") |
612 |
|
613 |
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None, |
614 |
help="Comma-separated list of disks"
|
615 |
" indices to act on (e.g. 0,2) (optional,"
|
616 |
" defaults to all disks)")
|
617 |
|
618 |
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size", |
619 |
help="Enforces a single-disk configuration using the"
|
620 |
" given disk size, in MiB unless a suffix is used",
|
621 |
default=None, type="unit", metavar="<size>") |
622 |
|
623 |
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
|
624 |
dest="ignore_consistency",
|
625 |
action="store_true", default=False, |
626 |
help="Ignore the consistency of the disks on"
|
627 |
" the secondary")
|
628 |
|
629 |
NONLIVE_OPT = cli_option("--non-live", dest="live", |
630 |
default=True, action="store_false", |
631 |
help="Do a non-live migration (this usually means"
|
632 |
" freeze the instance, save the state, transfer and"
|
633 |
" only then resume running on the secondary node)")
|
634 |
|
635 |
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node", |
636 |
help="Target node and optional secondary node",
|
637 |
metavar="<pnode>[:<snode>]",
|
638 |
completion_suggest=OPT_COMPL_INST_ADD_NODES) |
639 |
|
640 |
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[], |
641 |
action="append", metavar="<node>", |
642 |
help="Use only this node (can be used multiple"
|
643 |
" times, if not given defaults to all nodes)",
|
644 |
completion_suggest=OPT_COMPL_ONE_NODE) |
645 |
|
646 |
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node", |
647 |
metavar="<node>",
|
648 |
completion_suggest=OPT_COMPL_ONE_NODE) |
649 |
|
650 |
NOSTART_OPT = cli_option("--no-start", dest="start", default=True, |
651 |
action="store_false",
|
652 |
help="Don't start the instance after creation")
|
653 |
|
654 |
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command", |
655 |
action="store_true", default=False, |
656 |
help="Show command instead of executing it")
|
657 |
|
658 |
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup", |
659 |
default=False, action="store_true", |
660 |
help="Instead of performing the migration, try to"
|
661 |
" recover from a failed cleanup. This is safe"
|
662 |
" to run even if the instance is healthy, but it"
|
663 |
" will create extra replication traffic and "
|
664 |
" disrupt briefly the replication (like during the"
|
665 |
" migration")
|
666 |
|
667 |
STATIC_OPT = cli_option("-s", "--static", dest="static", |
668 |
action="store_true", default=False, |
669 |
help="Only show configuration data, not runtime data")
|
670 |
|
671 |
ALL_OPT = cli_option("--all", dest="show_all", |
672 |
default=False, action="store_true", |
673 |
help="Show info on all instances on the cluster."
|
674 |
" This can take a long time to run, use wisely")
|
675 |
|
676 |
SELECT_OS_OPT = cli_option("--select-os", dest="select_os", |
677 |
action="store_true", default=False, |
678 |
help="Interactive OS reinstall, lists available"
|
679 |
" OS templates for selection")
|
680 |
|
681 |
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures", |
682 |
action="store_true", default=False, |
683 |
help="Remove the instance from the cluster"
|
684 |
" configuration even if there are failures"
|
685 |
" during the removal process")
|
686 |
|
687 |
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node", |
688 |
help="Specifies the new secondary node",
|
689 |
metavar="NODE", default=None, |
690 |
completion_suggest=OPT_COMPL_ONE_NODE) |
691 |
|
692 |
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary", |
693 |
default=False, action="store_true", |
694 |
help="Replace the disk(s) on the primary"
|
695 |
" node (only for the drbd template)")
|
696 |
|
697 |
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary", |
698 |
default=False, action="store_true", |
699 |
help="Replace the disk(s) on the secondary"
|
700 |
" node (only for the drbd template)")
|
701 |
|
702 |
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto", |
703 |
default=False, action="store_true", |
704 |
help="Automatically replace faulty disks"
|
705 |
" (only for the drbd template)")
|
706 |
|
707 |
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size", |
708 |
default=False, action="store_true", |
709 |
help="Ignore current recorded size"
|
710 |
" (useful for forcing activation when"
|
711 |
" the recorded size is wrong)")
|
712 |
|
713 |
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node", |
714 |
metavar="<node>",
|
715 |
completion_suggest=OPT_COMPL_ONE_NODE) |
716 |
|
717 |
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory", |
718 |
metavar="<dir>")
|
719 |
|
720 |
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip", |
721 |
help="Specify the secondary ip for the node",
|
722 |
metavar="ADDRESS", default=None) |
723 |
|
724 |
READD_OPT = cli_option("--readd", dest="readd", |
725 |
default=False, action="store_true", |
726 |
help="Readd old node after replacing it")
|
727 |
|
728 |
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check", |
729 |
default=True, action="store_false", |
730 |
help="Disable SSH key fingerprint checking")
|
731 |
|
732 |
|
733 |
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate", |
734 |
choices=_YESNO, default=None, metavar=_YORNO,
|
735 |
help="Set the master_candidate flag on the node")
|
736 |
|
737 |
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO, |
738 |
choices=_YESNO, default=None,
|
739 |
help="Set the offline flag on the node")
|
740 |
|
741 |
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO, |
742 |
choices=_YESNO, default=None,
|
743 |
help="Set the drained flag on the node")
|
744 |
|
745 |
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable", |
746 |
choices=_YESNO, default=None, metavar=_YORNO,
|
747 |
help="Set the allocatable flag on a volume")
|
748 |
|
749 |
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage", |
750 |
help="Disable support for lvm based instances"
|
751 |
" (cluster-wide)",
|
752 |
action="store_false", default=True) |
753 |
|
754 |
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
|
755 |
dest="enabled_hypervisors",
|
756 |
help="Comma-separated list of hypervisors",
|
757 |
type="string", default=None) |
758 |
|
759 |
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams", |
760 |
type="keyval", default={},
|
761 |
help="NIC parameters")
|
762 |
|
763 |
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None, |
764 |
dest="candidate_pool_size", type="int", |
765 |
help="Set the candidate pool size")
|
766 |
|
767 |
VG_NAME_OPT = cli_option("-g", "--vg-name", dest="vg_name", |
768 |
help="Enables LVM and specifies the volume group"
|
769 |
" name (cluster-wide) for disk allocation [xenvg]",
|
770 |
metavar="VG", default=None) |
771 |
|
772 |
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it", |
773 |
help="Destroy cluster", action="store_true") |
774 |
|
775 |
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting", |
776 |
help="Skip node agreement check (dangerous)",
|
777 |
action="store_true", default=False) |
778 |
|
779 |
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix", |
780 |
help="Specify the mac prefix for the instance IP"
|
781 |
" addresses, in the format XX:XX:XX",
|
782 |
metavar="PREFIX",
|
783 |
default=None)
|
784 |
|
785 |
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev", |
786 |
help="Specify the node interface (cluster-wide)"
|
787 |
" on which the master IP address will be added "
|
788 |
" [%s]" % constants.DEFAULT_BRIDGE,
|
789 |
metavar="NETDEV",
|
790 |
default=constants.DEFAULT_BRIDGE) |
791 |
|
792 |
|
793 |
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir", |
794 |
help="Specify the default directory (cluster-"
|
795 |
"wide) for storing the file-based disks [%s]" %
|
796 |
constants.DEFAULT_FILE_STORAGE_DIR, |
797 |
metavar="DIR",
|
798 |
default=constants.DEFAULT_FILE_STORAGE_DIR) |
799 |
|
800 |
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts", |
801 |
help="Don't modify /etc/hosts",
|
802 |
action="store_false", default=True) |
803 |
|
804 |
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup", |
805 |
help="Don't initialize SSH keys",
|
806 |
action="store_false", default=True) |
807 |
|
808 |
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes", |
809 |
help="Enable parseable error messages",
|
810 |
action="store_true", default=False) |
811 |
|
812 |
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem", |
813 |
help="Skip N+1 memory redundancy tests",
|
814 |
action="store_true", default=False) |
815 |
|
816 |
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type", |
817 |
help="Type of reboot: soft/hard/full",
|
818 |
default=constants.INSTANCE_REBOOT_HARD, |
819 |
metavar="<REBOOT>",
|
820 |
choices=list(constants.REBOOT_TYPES))
|
821 |
|
822 |
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
|
823 |
dest="ignore_secondaries",
|
824 |
default=False, action="store_true", |
825 |
help="Ignore errors from secondaries")
|
826 |
|
827 |
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown", |
828 |
action="store_false", default=True, |
829 |
help="Don't shutdown the instance (unsafe)")
|
830 |
|
831 |
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int", |
832 |
default=constants.DEFAULT_SHUTDOWN_TIMEOUT, |
833 |
help="Maximum time to wait")
|
834 |
|
835 |
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
|
836 |
dest="shutdown_timeout", type="int", |
837 |
default=constants.DEFAULT_SHUTDOWN_TIMEOUT, |
838 |
help="Maximum time to wait for instance shutdown")
|
839 |
|
840 |
EARLY_RELEASE_OPT = cli_option("--early-release",
|
841 |
dest="early_release", default=False, |
842 |
action="store_true",
|
843 |
help="Release the locks on the secondary"
|
844 |
" node(s) early")
|
845 |
|
846 |
|
847 |
def _ParseArgs(argv, commands, aliases): |
848 |
"""Parser for the command line arguments.
|
849 |
|
850 |
This function parses the arguments and returns the function which
|
851 |
must be executed together with its (modified) arguments.
|
852 |
|
853 |
@param argv: the command line
|
854 |
@param commands: dictionary with special contents, see the design
|
855 |
doc for cmdline handling
|
856 |
@param aliases: dictionary with command aliases {'alias': 'target, ...}
|
857 |
|
858 |
"""
|
859 |
if len(argv) == 0: |
860 |
binary = "<command>"
|
861 |
else:
|
862 |
binary = argv[0].split("/")[-1] |
863 |
|
864 |
if len(argv) > 1 and argv[1] == "--version": |
865 |
ToStdout("%s (ganeti) %s", binary, constants.RELEASE_VERSION)
|
866 |
# Quit right away. That way we don't have to care about this special
|
867 |
# argument. optparse.py does it the same.
|
868 |
sys.exit(0)
|
869 |
|
870 |
if len(argv) < 2 or not (argv[1] in commands or |
871 |
argv[1] in aliases): |
872 |
# let's do a nice thing
|
873 |
sortedcmds = commands.keys() |
874 |
sortedcmds.sort() |
875 |
|
876 |
ToStdout("Usage: %s {command} [options...] [argument...]", binary)
|
877 |
ToStdout("%s <command> --help to see details, or man %s", binary, binary)
|
878 |
ToStdout("")
|
879 |
|
880 |
# compute the max line length for cmd + usage
|
881 |
mlen = max([len(" %s" % cmd) for cmd in commands]) |
882 |
mlen = min(60, mlen) # should not get here... |
883 |
|
884 |
# and format a nice command list
|
885 |
ToStdout("Commands:")
|
886 |
for cmd in sortedcmds: |
887 |
cmdstr = " %s" % (cmd,)
|
888 |
help_text = commands[cmd][4]
|
889 |
help_lines = textwrap.wrap(help_text, 79 - 3 - mlen) |
890 |
ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0)) |
891 |
for line in help_lines: |
892 |
ToStdout("%-*s %s", mlen, "", line) |
893 |
|
894 |
ToStdout("")
|
895 |
|
896 |
return None, None, None |
897 |
|
898 |
# get command, unalias it, and look it up in commands
|
899 |
cmd = argv.pop(1)
|
900 |
if cmd in aliases: |
901 |
if cmd in commands: |
902 |
raise errors.ProgrammerError("Alias '%s' overrides an existing" |
903 |
" command" % cmd)
|
904 |
|
905 |
if aliases[cmd] not in commands: |
906 |
raise errors.ProgrammerError("Alias '%s' maps to non-existing" |
907 |
" command '%s'" % (cmd, aliases[cmd]))
|
908 |
|
909 |
cmd = aliases[cmd] |
910 |
|
911 |
func, args_def, parser_opts, usage, description = commands[cmd] |
912 |
parser = OptionParser(option_list=parser_opts + [_DRY_RUN_OPT, DEBUG_OPT], |
913 |
description=description, |
914 |
formatter=TitledHelpFormatter(), |
915 |
usage="%%prog %s %s" % (cmd, usage))
|
916 |
parser.disable_interspersed_args() |
917 |
options, args = parser.parse_args() |
918 |
|
919 |
if not _CheckArguments(cmd, args_def, args): |
920 |
return None, None, None |
921 |
|
922 |
return func, options, args
|
923 |
|
924 |
|
925 |
def _CheckArguments(cmd, args_def, args): |
926 |
"""Verifies the arguments using the argument definition.
|
927 |
|
928 |
Algorithm:
|
929 |
|
930 |
1. Abort with error if values specified by user but none expected.
|
931 |
|
932 |
1. For each argument in definition
|
933 |
|
934 |
1. Keep running count of minimum number of values (min_count)
|
935 |
1. Keep running count of maximum number of values (max_count)
|
936 |
1. If it has an unlimited number of values
|
937 |
|
938 |
1. Abort with error if it's not the last argument in the definition
|
939 |
|
940 |
1. If last argument has limited number of values
|
941 |
|
942 |
1. Abort with error if number of values doesn't match or is too large
|
943 |
|
944 |
1. Abort with error if user didn't pass enough values (min_count)
|
945 |
|
946 |
"""
|
947 |
if args and not args_def: |
948 |
ToStderr("Error: Command %s expects no arguments", cmd)
|
949 |
return False |
950 |
|
951 |
min_count = None
|
952 |
max_count = None
|
953 |
check_max = None
|
954 |
|
955 |
last_idx = len(args_def) - 1 |
956 |
|
957 |
for idx, arg in enumerate(args_def): |
958 |
if min_count is None: |
959 |
min_count = arg.min |
960 |
elif arg.min is not None: |
961 |
min_count += arg.min |
962 |
|
963 |
if max_count is None: |
964 |
max_count = arg.max |
965 |
elif arg.max is not None: |
966 |
max_count += arg.max |
967 |
|
968 |
if idx == last_idx:
|
969 |
check_max = (arg.max is not None) |
970 |
|
971 |
elif arg.max is None: |
972 |
raise errors.ProgrammerError("Only the last argument can have max=None") |
973 |
|
974 |
if check_max:
|
975 |
# Command with exact number of arguments
|
976 |
if (min_count is not None and max_count is not None and |
977 |
min_count == max_count and len(args) != min_count): |
978 |
ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
|
979 |
return False |
980 |
|
981 |
# Command with limited number of arguments
|
982 |
if max_count is not None and len(args) > max_count: |
983 |
ToStderr("Error: Command %s expects only %d argument(s)",
|
984 |
cmd, max_count) |
985 |
return False |
986 |
|
987 |
# Command with some required arguments
|
988 |
if min_count is not None and len(args) < min_count: |
989 |
ToStderr("Error: Command %s expects at least %d argument(s)",
|
990 |
cmd, min_count) |
991 |
return False |
992 |
|
993 |
return True |
994 |
|
995 |
|
996 |
def SplitNodeOption(value): |
997 |
"""Splits the value of a --node option.
|
998 |
|
999 |
"""
|
1000 |
if value and ':' in value: |
1001 |
return value.split(':', 1) |
1002 |
else:
|
1003 |
return (value, None) |
1004 |
|
1005 |
|
1006 |
def CalculateOSNames(os_name, os_variants): |
1007 |
"""Calculates all the names an OS can be called, according to its variants.
|
1008 |
|
1009 |
@type os_name: string
|
1010 |
@param os_name: base name of the os
|
1011 |
@type os_variants: list or None
|
1012 |
@param os_variants: list of supported variants
|
1013 |
@rtype: list
|
1014 |
@return: list of valid names
|
1015 |
|
1016 |
"""
|
1017 |
if os_variants:
|
1018 |
return ['%s+%s' % (os_name, v) for v in os_variants] |
1019 |
else:
|
1020 |
return [os_name]
|
1021 |
|
1022 |
|
1023 |
def UsesRPC(fn): |
1024 |
def wrapper(*args, **kwargs): |
1025 |
rpc.Init() |
1026 |
try:
|
1027 |
return fn(*args, **kwargs)
|
1028 |
finally:
|
1029 |
rpc.Shutdown() |
1030 |
return wrapper
|
1031 |
|
1032 |
|
1033 |
def AskUser(text, choices=None): |
1034 |
"""Ask the user a question.
|
1035 |
|
1036 |
@param text: the question to ask
|
1037 |
|
1038 |
@param choices: list with elements tuples (input_char, return_value,
|
1039 |
description); if not given, it will default to: [('y', True,
|
1040 |
'Perform the operation'), ('n', False, 'Do no do the operation')];
|
1041 |
note that the '?' char is reserved for help
|
1042 |
|
1043 |
@return: one of the return values from the choices list; if input is
|
1044 |
not possible (i.e. not running with a tty, we return the last
|
1045 |
entry from the list
|
1046 |
|
1047 |
"""
|
1048 |
if choices is None: |
1049 |
choices = [('y', True, 'Perform the operation'), |
1050 |
('n', False, 'Do not perform the operation')] |
1051 |
if not choices or not isinstance(choices, list): |
1052 |
raise errors.ProgrammerError("Invalid choices argument to AskUser") |
1053 |
for entry in choices: |
1054 |
if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?': |
1055 |
raise errors.ProgrammerError("Invalid choices element to AskUser") |
1056 |
|
1057 |
answer = choices[-1][1] |
1058 |
new_text = [] |
1059 |
for line in text.splitlines(): |
1060 |
new_text.append(textwrap.fill(line, 70, replace_whitespace=False)) |
1061 |
text = "\n".join(new_text)
|
1062 |
try:
|
1063 |
f = file("/dev/tty", "a+") |
1064 |
except IOError: |
1065 |
return answer
|
1066 |
try:
|
1067 |
chars = [entry[0] for entry in choices] |
1068 |
chars[-1] = "[%s]" % chars[-1] |
1069 |
chars.append('?')
|
1070 |
maps = dict([(entry[0], entry[1]) for entry in choices]) |
1071 |
while True: |
1072 |
f.write(text) |
1073 |
f.write('\n')
|
1074 |
f.write("/".join(chars))
|
1075 |
f.write(": ")
|
1076 |
line = f.readline(2).strip().lower()
|
1077 |
if line in maps: |
1078 |
answer = maps[line] |
1079 |
break
|
1080 |
elif line == '?': |
1081 |
for entry in choices: |
1082 |
f.write(" %s - %s\n" % (entry[0], entry[2])) |
1083 |
f.write("\n")
|
1084 |
continue
|
1085 |
finally:
|
1086 |
f.close() |
1087 |
return answer
|
1088 |
|
1089 |
|
1090 |
class JobSubmittedException(Exception): |
1091 |
"""Job was submitted, client should exit.
|
1092 |
|
1093 |
This exception has one argument, the ID of the job that was
|
1094 |
submitted. The handler should print this ID.
|
1095 |
|
1096 |
This is not an error, just a structured way to exit from clients.
|
1097 |
|
1098 |
"""
|
1099 |
|
1100 |
|
1101 |
def SendJob(ops, cl=None): |
1102 |
"""Function to submit an opcode without waiting for the results.
|
1103 |
|
1104 |
@type ops: list
|
1105 |
@param ops: list of opcodes
|
1106 |
@type cl: luxi.Client
|
1107 |
@param cl: the luxi client to use for communicating with the master;
|
1108 |
if None, a new client will be created
|
1109 |
|
1110 |
"""
|
1111 |
if cl is None: |
1112 |
cl = GetClient() |
1113 |
|
1114 |
job_id = cl.SubmitJob(ops) |
1115 |
|
1116 |
return job_id
|
1117 |
|
1118 |
|
1119 |
def PollJob(job_id, cl=None, feedback_fn=None): |
1120 |
"""Function to poll for the result of a job.
|
1121 |
|
1122 |
@type job_id: job identified
|
1123 |
@param job_id: the job to poll for results
|
1124 |
@type cl: luxi.Client
|
1125 |
@param cl: the luxi client to use for communicating with the master;
|
1126 |
if None, a new client will be created
|
1127 |
|
1128 |
"""
|
1129 |
if cl is None: |
1130 |
cl = GetClient() |
1131 |
|
1132 |
prev_job_info = None
|
1133 |
prev_logmsg_serial = None
|
1134 |
|
1135 |
status = None
|
1136 |
|
1137 |
notified_queued = False
|
1138 |
notified_waitlock = False
|
1139 |
|
1140 |
while True: |
1141 |
result = cl.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
|
1142 |
prev_logmsg_serial) |
1143 |
if not result: |
1144 |
# job not found, go away!
|
1145 |
raise errors.JobLost("Job with id %s lost" % job_id) |
1146 |
elif result == constants.JOB_NOTCHANGED:
|
1147 |
if status is not None and not callable(feedback_fn): |
1148 |
if status == constants.JOB_STATUS_QUEUED and not notified_queued: |
1149 |
ToStderr("Job %s is waiting in queue", job_id)
|
1150 |
notified_queued = True
|
1151 |
elif status == constants.JOB_STATUS_WAITLOCK and not notified_waitlock: |
1152 |
ToStderr("Job %s is trying to acquire all necessary locks", job_id)
|
1153 |
notified_waitlock = True
|
1154 |
|
1155 |
# Wait again
|
1156 |
continue
|
1157 |
|
1158 |
# Split result, a tuple of (field values, log entries)
|
1159 |
(job_info, log_entries) = result |
1160 |
(status, ) = job_info |
1161 |
|
1162 |
if log_entries:
|
1163 |
for log_entry in log_entries: |
1164 |
(serial, timestamp, _, message) = log_entry |
1165 |
if callable(feedback_fn): |
1166 |
feedback_fn(log_entry[1:])
|
1167 |
else:
|
1168 |
encoded = utils.SafeEncode(message) |
1169 |
ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)), encoded)
|
1170 |
prev_logmsg_serial = max(prev_logmsg_serial, serial)
|
1171 |
|
1172 |
# TODO: Handle canceled and archived jobs
|
1173 |
elif status in (constants.JOB_STATUS_SUCCESS, |
1174 |
constants.JOB_STATUS_ERROR, |
1175 |
constants.JOB_STATUS_CANCELING, |
1176 |
constants.JOB_STATUS_CANCELED): |
1177 |
break
|
1178 |
|
1179 |
prev_job_info = job_info |
1180 |
|
1181 |
jobs = cl.QueryJobs([job_id], ["status", "opstatus", "opresult"]) |
1182 |
if not jobs: |
1183 |
raise errors.JobLost("Job with id %s lost" % job_id) |
1184 |
|
1185 |
status, opstatus, result = jobs[0]
|
1186 |
if status == constants.JOB_STATUS_SUCCESS:
|
1187 |
return result
|
1188 |
elif status in (constants.JOB_STATUS_CANCELING, |
1189 |
constants.JOB_STATUS_CANCELED): |
1190 |
raise errors.OpExecError("Job was canceled") |
1191 |
else:
|
1192 |
has_ok = False
|
1193 |
for idx, (status, msg) in enumerate(zip(opstatus, result)): |
1194 |
if status == constants.OP_STATUS_SUCCESS:
|
1195 |
has_ok = True
|
1196 |
elif status == constants.OP_STATUS_ERROR:
|
1197 |
errors.MaybeRaise(msg) |
1198 |
if has_ok:
|
1199 |
raise errors.OpExecError("partial failure (opcode %d): %s" % |
1200 |
(idx, msg)) |
1201 |
else:
|
1202 |
raise errors.OpExecError(str(msg)) |
1203 |
# default failure mode
|
1204 |
raise errors.OpExecError(result)
|
1205 |
|
1206 |
|
1207 |
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None): |
1208 |
"""Legacy function to submit an opcode.
|
1209 |
|
1210 |
This is just a simple wrapper over the construction of the processor
|
1211 |
instance. It should be extended to better handle feedback and
|
1212 |
interaction functions.
|
1213 |
|
1214 |
"""
|
1215 |
if cl is None: |
1216 |
cl = GetClient() |
1217 |
|
1218 |
SetGenericOpcodeOpts([op], opts) |
1219 |
|
1220 |
job_id = SendJob([op], cl) |
1221 |
|
1222 |
op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn) |
1223 |
|
1224 |
return op_results[0] |
1225 |
|
1226 |
|
1227 |
def SubmitOrSend(op, opts, cl=None, feedback_fn=None): |
1228 |
"""Wrapper around SubmitOpCode or SendJob.
|
1229 |
|
1230 |
This function will decide, based on the 'opts' parameter, whether to
|
1231 |
submit and wait for the result of the opcode (and return it), or
|
1232 |
whether to just send the job and print its identifier. It is used in
|
1233 |
order to simplify the implementation of the '--submit' option.
|
1234 |
|
1235 |
It will also process the opcodes if we're sending the via SendJob
|
1236 |
(otherwise SubmitOpCode does it).
|
1237 |
|
1238 |
"""
|
1239 |
if opts and opts.submit_only: |
1240 |
job = [op] |
1241 |
SetGenericOpcodeOpts(job, opts) |
1242 |
job_id = SendJob(job, cl=cl) |
1243 |
raise JobSubmittedException(job_id)
|
1244 |
else:
|
1245 |
return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
|
1246 |
|
1247 |
|
1248 |
def SetGenericOpcodeOpts(opcode_list, options): |
1249 |
"""Processor for generic options.
|
1250 |
|
1251 |
This function updates the given opcodes based on generic command
|
1252 |
line options (like debug, dry-run, etc.).
|
1253 |
|
1254 |
@param opcode_list: list of opcodes
|
1255 |
@param options: command line options or None
|
1256 |
@return: None (in-place modification)
|
1257 |
|
1258 |
"""
|
1259 |
if not options: |
1260 |
return
|
1261 |
for op in opcode_list: |
1262 |
op.dry_run = options.dry_run |
1263 |
op.debug_level = options.debug |
1264 |
|
1265 |
|
1266 |
def GetClient(): |
1267 |
# TODO: Cache object?
|
1268 |
try:
|
1269 |
client = luxi.Client() |
1270 |
except luxi.NoMasterError:
|
1271 |
ss = ssconf.SimpleStore() |
1272 |
|
1273 |
# Try to read ssconf file
|
1274 |
try:
|
1275 |
ss.GetMasterNode() |
1276 |
except errors.ConfigurationError:
|
1277 |
raise errors.OpPrereqError("Cluster not initialized or this machine is" |
1278 |
" not part of a cluster")
|
1279 |
|
1280 |
master, myself = ssconf.GetMasterAndMyself(ss=ss) |
1281 |
if master != myself:
|
1282 |
raise errors.OpPrereqError("This is not the master node, please connect" |
1283 |
" to node '%s' and rerun the command" %
|
1284 |
master) |
1285 |
raise
|
1286 |
return client
|
1287 |
|
1288 |
|
1289 |
def FormatError(err): |
1290 |
"""Return a formatted error message for a given error.
|
1291 |
|
1292 |
This function takes an exception instance and returns a tuple
|
1293 |
consisting of two values: first, the recommended exit code, and
|
1294 |
second, a string describing the error message (not
|
1295 |
newline-terminated).
|
1296 |
|
1297 |
"""
|
1298 |
retcode = 1
|
1299 |
obuf = StringIO() |
1300 |
msg = str(err)
|
1301 |
if isinstance(err, errors.ConfigurationError): |
1302 |
txt = "Corrupt configuration file: %s" % msg
|
1303 |
logging.error(txt) |
1304 |
obuf.write(txt + "\n")
|
1305 |
obuf.write("Aborting.")
|
1306 |
retcode = 2
|
1307 |
elif isinstance(err, errors.HooksAbort): |
1308 |
obuf.write("Failure: hooks execution failed:\n")
|
1309 |
for node, script, out in err.args[0]: |
1310 |
if out:
|
1311 |
obuf.write(" node: %s, script: %s, output: %s\n" %
|
1312 |
(node, script, out)) |
1313 |
else:
|
1314 |
obuf.write(" node: %s, script: %s (no output)\n" %
|
1315 |
(node, script)) |
1316 |
elif isinstance(err, errors.HooksFailure): |
1317 |
obuf.write("Failure: hooks general failure: %s" % msg)
|
1318 |
elif isinstance(err, errors.ResolverError): |
1319 |
this_host = utils.HostInfo.SysName() |
1320 |
if err.args[0] == this_host: |
1321 |
msg = "Failure: can't resolve my own hostname ('%s')"
|
1322 |
else:
|
1323 |
msg = "Failure: can't resolve hostname '%s'"
|
1324 |
obuf.write(msg % err.args[0])
|
1325 |
elif isinstance(err, errors.OpPrereqError): |
1326 |
if len(err.args) == 2: |
1327 |
obuf.write("Failure: prerequisites not met for this"
|
1328 |
" operation:\nerror type: %s, error details:\n%s" %
|
1329 |
(err.args[1], err.args[0])) |
1330 |
else:
|
1331 |
obuf.write("Failure: prerequisites not met for this"
|
1332 |
" operation:\n%s" % msg)
|
1333 |
elif isinstance(err, errors.OpExecError): |
1334 |
obuf.write("Failure: command execution error:\n%s" % msg)
|
1335 |
elif isinstance(err, errors.TagError): |
1336 |
obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
|
1337 |
elif isinstance(err, errors.JobQueueDrainError): |
1338 |
obuf.write("Failure: the job queue is marked for drain and doesn't"
|
1339 |
" accept new requests\n")
|
1340 |
elif isinstance(err, errors.JobQueueFull): |
1341 |
obuf.write("Failure: the job queue is full and doesn't accept new"
|
1342 |
" job submissions until old jobs are archived\n")
|
1343 |
elif isinstance(err, errors.TypeEnforcementError): |
1344 |
obuf.write("Parameter Error: %s" % msg)
|
1345 |
elif isinstance(err, errors.ParameterError): |
1346 |
obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
|
1347 |
elif isinstance(err, errors.GenericError): |
1348 |
obuf.write("Unhandled Ganeti error: %s" % msg)
|
1349 |
elif isinstance(err, luxi.NoMasterError): |
1350 |
obuf.write("Cannot communicate with the master daemon.\nIs it running"
|
1351 |
" and listening for connections?")
|
1352 |
elif isinstance(err, luxi.TimeoutError): |
1353 |
obuf.write("Timeout while talking to the master daemon. Error:\n"
|
1354 |
"%s" % msg)
|
1355 |
elif isinstance(err, luxi.ProtocolError): |
1356 |
obuf.write("Unhandled protocol error while talking to the master daemon:\n"
|
1357 |
"%s" % msg)
|
1358 |
elif isinstance(err, JobSubmittedException): |
1359 |
obuf.write("JobID: %s\n" % err.args[0]) |
1360 |
retcode = 0
|
1361 |
else:
|
1362 |
obuf.write("Unhandled exception: %s" % msg)
|
1363 |
return retcode, obuf.getvalue().rstrip('\n') |
1364 |
|
1365 |
|
1366 |
def GenericMain(commands, override=None, aliases=None): |
1367 |
"""Generic main function for all the gnt-* commands.
|
1368 |
|
1369 |
Arguments:
|
1370 |
- commands: a dictionary with a special structure, see the design doc
|
1371 |
for command line handling.
|
1372 |
- override: if not None, we expect a dictionary with keys that will
|
1373 |
override command line options; this can be used to pass
|
1374 |
options from the scripts to generic functions
|
1375 |
- aliases: dictionary with command aliases {'alias': 'target, ...}
|
1376 |
|
1377 |
"""
|
1378 |
# save the program name and the entire command line for later logging
|
1379 |
if sys.argv:
|
1380 |
binary = os.path.basename(sys.argv[0]) or sys.argv[0] |
1381 |
if len(sys.argv) >= 2: |
1382 |
binary += " " + sys.argv[1] |
1383 |
old_cmdline = " ".join(sys.argv[2:]) |
1384 |
else:
|
1385 |
old_cmdline = ""
|
1386 |
else:
|
1387 |
binary = "<unknown program>"
|
1388 |
old_cmdline = ""
|
1389 |
|
1390 |
if aliases is None: |
1391 |
aliases = {} |
1392 |
|
1393 |
try:
|
1394 |
func, options, args = _ParseArgs(sys.argv, commands, aliases) |
1395 |
except errors.ParameterError, err:
|
1396 |
result, err_msg = FormatError(err) |
1397 |
ToStderr(err_msg) |
1398 |
return 1 |
1399 |
|
1400 |
if func is None: # parse error |
1401 |
return 1 |
1402 |
|
1403 |
if override is not None: |
1404 |
for key, val in override.iteritems(): |
1405 |
setattr(options, key, val)
|
1406 |
|
1407 |
utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug, |
1408 |
stderr_logging=True, program=binary)
|
1409 |
|
1410 |
if old_cmdline:
|
1411 |
logging.info("run with arguments '%s'", old_cmdline)
|
1412 |
else:
|
1413 |
logging.info("run with no arguments")
|
1414 |
|
1415 |
try:
|
1416 |
result = func(options, args) |
1417 |
except (errors.GenericError, luxi.ProtocolError,
|
1418 |
JobSubmittedException), err: |
1419 |
result, err_msg = FormatError(err) |
1420 |
logging.exception("Error during command processing")
|
1421 |
ToStderr(err_msg) |
1422 |
|
1423 |
return result
|
1424 |
|
1425 |
|
1426 |
def GenericInstanceCreate(mode, opts, args): |
1427 |
"""Add an instance to the cluster via either creation or import.
|
1428 |
|
1429 |
@param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
|
1430 |
@param opts: the command line options selected by the user
|
1431 |
@type args: list
|
1432 |
@param args: should contain only one element, the new instance name
|
1433 |
@rtype: int
|
1434 |
@return: the desired exit code
|
1435 |
|
1436 |
"""
|
1437 |
instance = args[0]
|
1438 |
|
1439 |
(pnode, snode) = SplitNodeOption(opts.node) |
1440 |
|
1441 |
hypervisor = None
|
1442 |
hvparams = {} |
1443 |
if opts.hypervisor:
|
1444 |
hypervisor, hvparams = opts.hypervisor |
1445 |
|
1446 |
if opts.nics:
|
1447 |
try:
|
1448 |
nic_max = max(int(nidx[0]) + 1 for nidx in opts.nics) |
1449 |
except ValueError, err: |
1450 |
raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err)) |
1451 |
nics = [{}] * nic_max |
1452 |
for nidx, ndict in opts.nics: |
1453 |
nidx = int(nidx)
|
1454 |
if not isinstance(ndict, dict): |
1455 |
msg = "Invalid nic/%d value: expected dict, got %s" % (nidx, ndict)
|
1456 |
raise errors.OpPrereqError(msg)
|
1457 |
nics[nidx] = ndict |
1458 |
elif opts.no_nics:
|
1459 |
# no nics
|
1460 |
nics = [] |
1461 |
else:
|
1462 |
# default of one nic, all auto
|
1463 |
nics = [{}] |
1464 |
|
1465 |
if opts.disk_template == constants.DT_DISKLESS:
|
1466 |
if opts.disks or opts.sd_size is not None: |
1467 |
raise errors.OpPrereqError("Diskless instance but disk" |
1468 |
" information passed")
|
1469 |
disks = [] |
1470 |
else:
|
1471 |
if not opts.disks and not opts.sd_size: |
1472 |
raise errors.OpPrereqError("No disk information specified") |
1473 |
if opts.disks and opts.sd_size is not None: |
1474 |
raise errors.OpPrereqError("Please use either the '--disk' or" |
1475 |
" '-s' option")
|
1476 |
if opts.sd_size is not None: |
1477 |
opts.disks = [(0, {"size": opts.sd_size})] |
1478 |
try:
|
1479 |
disk_max = max(int(didx[0]) + 1 for didx in opts.disks) |
1480 |
except ValueError, err: |
1481 |
raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err)) |
1482 |
disks = [{}] * disk_max |
1483 |
for didx, ddict in opts.disks: |
1484 |
didx = int(didx)
|
1485 |
if not isinstance(ddict, dict): |
1486 |
msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
|
1487 |
raise errors.OpPrereqError(msg)
|
1488 |
elif "size" not in ddict: |
1489 |
raise errors.OpPrereqError("Missing size for disk %d" % didx) |
1490 |
try:
|
1491 |
ddict["size"] = utils.ParseUnit(ddict["size"]) |
1492 |
except ValueError, err: |
1493 |
raise errors.OpPrereqError("Invalid disk size for disk %d: %s" % |
1494 |
(didx, err)) |
1495 |
disks[didx] = ddict |
1496 |
|
1497 |
utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES) |
1498 |
utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES) |
1499 |
|
1500 |
if mode == constants.INSTANCE_CREATE:
|
1501 |
start = opts.start |
1502 |
os_type = opts.os |
1503 |
src_node = None
|
1504 |
src_path = None
|
1505 |
elif mode == constants.INSTANCE_IMPORT:
|
1506 |
start = False
|
1507 |
os_type = None
|
1508 |
src_node = opts.src_node |
1509 |
src_path = opts.src_dir |
1510 |
else:
|
1511 |
raise errors.ProgrammerError("Invalid creation mode %s" % mode) |
1512 |
|
1513 |
op = opcodes.OpCreateInstance(instance_name=instance, |
1514 |
disks=disks, |
1515 |
disk_template=opts.disk_template, |
1516 |
nics=nics, |
1517 |
pnode=pnode, snode=snode, |
1518 |
ip_check=opts.ip_check, |
1519 |
name_check=opts.name_check, |
1520 |
wait_for_sync=opts.wait_for_sync, |
1521 |
file_storage_dir=opts.file_storage_dir, |
1522 |
file_driver=opts.file_driver, |
1523 |
iallocator=opts.iallocator, |
1524 |
hypervisor=hypervisor, |
1525 |
hvparams=hvparams, |
1526 |
beparams=opts.beparams, |
1527 |
mode=mode, |
1528 |
start=start, |
1529 |
os_type=os_type, |
1530 |
src_node=src_node, |
1531 |
src_path=src_path) |
1532 |
|
1533 |
SubmitOrSend(op, opts) |
1534 |
return 0 |
1535 |
|
1536 |
|
1537 |
def GenerateTable(headers, fields, separator, data, |
1538 |
numfields=None, unitfields=None, |
1539 |
units=None):
|
1540 |
"""Prints a table with headers and different fields.
|
1541 |
|
1542 |
@type headers: dict
|
1543 |
@param headers: dictionary mapping field names to headers for
|
1544 |
the table
|
1545 |
@type fields: list
|
1546 |
@param fields: the field names corresponding to each row in
|
1547 |
the data field
|
1548 |
@param separator: the separator to be used; if this is None,
|
1549 |
the default 'smart' algorithm is used which computes optimal
|
1550 |
field width, otherwise just the separator is used between
|
1551 |
each field
|
1552 |
@type data: list
|
1553 |
@param data: a list of lists, each sublist being one row to be output
|
1554 |
@type numfields: list
|
1555 |
@param numfields: a list with the fields that hold numeric
|
1556 |
values and thus should be right-aligned
|
1557 |
@type unitfields: list
|
1558 |
@param unitfields: a list with the fields that hold numeric
|
1559 |
values that should be formatted with the units field
|
1560 |
@type units: string or None
|
1561 |
@param units: the units we should use for formatting, or None for
|
1562 |
automatic choice (human-readable for non-separator usage, otherwise
|
1563 |
megabytes); this is a one-letter string
|
1564 |
|
1565 |
"""
|
1566 |
if units is None: |
1567 |
if separator:
|
1568 |
units = "m"
|
1569 |
else:
|
1570 |
units = "h"
|
1571 |
|
1572 |
if numfields is None: |
1573 |
numfields = [] |
1574 |
if unitfields is None: |
1575 |
unitfields = [] |
1576 |
|
1577 |
numfields = utils.FieldSet(*numfields) # pylint: disable-msg=W0142
|
1578 |
unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
|
1579 |
|
1580 |
format_fields = [] |
1581 |
for field in fields: |
1582 |
if headers and field not in headers: |
1583 |
# TODO: handle better unknown fields (either revert to old
|
1584 |
# style of raising exception, or deal more intelligently with
|
1585 |
# variable fields)
|
1586 |
headers[field] = field |
1587 |
if separator is not None: |
1588 |
format_fields.append("%s")
|
1589 |
elif numfields.Matches(field):
|
1590 |
format_fields.append("%*s")
|
1591 |
else:
|
1592 |
format_fields.append("%-*s")
|
1593 |
|
1594 |
if separator is None: |
1595 |
mlens = [0 for name in fields] |
1596 |
format = ' '.join(format_fields)
|
1597 |
else:
|
1598 |
format = separator.replace("%", "%%").join(format_fields) |
1599 |
|
1600 |
for row in data: |
1601 |
if row is None: |
1602 |
continue
|
1603 |
for idx, val in enumerate(row): |
1604 |
if unitfields.Matches(fields[idx]):
|
1605 |
try:
|
1606 |
val = int(val)
|
1607 |
except (TypeError, ValueError): |
1608 |
pass
|
1609 |
else:
|
1610 |
val = row[idx] = utils.FormatUnit(val, units) |
1611 |
val = row[idx] = str(val)
|
1612 |
if separator is None: |
1613 |
mlens[idx] = max(mlens[idx], len(val)) |
1614 |
|
1615 |
result = [] |
1616 |
if headers:
|
1617 |
args = [] |
1618 |
for idx, name in enumerate(fields): |
1619 |
hdr = headers[name] |
1620 |
if separator is None: |
1621 |
mlens[idx] = max(mlens[idx], len(hdr)) |
1622 |
args.append(mlens[idx]) |
1623 |
args.append(hdr) |
1624 |
result.append(format % tuple(args))
|
1625 |
|
1626 |
if separator is None: |
1627 |
assert len(mlens) == len(fields) |
1628 |
|
1629 |
if fields and not numfields.Matches(fields[-1]): |
1630 |
mlens[-1] = 0 |
1631 |
|
1632 |
for line in data: |
1633 |
args = [] |
1634 |
if line is None: |
1635 |
line = ['-' for _ in fields] |
1636 |
for idx in range(len(fields)): |
1637 |
if separator is None: |
1638 |
args.append(mlens[idx]) |
1639 |
args.append(line[idx]) |
1640 |
result.append(format % tuple(args))
|
1641 |
|
1642 |
return result
|
1643 |
|
1644 |
|
1645 |
def FormatTimestamp(ts): |
1646 |
"""Formats a given timestamp.
|
1647 |
|
1648 |
@type ts: timestamp
|
1649 |
@param ts: a timeval-type timestamp, a tuple of seconds and microseconds
|
1650 |
|
1651 |
@rtype: string
|
1652 |
@return: a string with the formatted timestamp
|
1653 |
|
1654 |
"""
|
1655 |
if not isinstance (ts, (tuple, list)) or len(ts) != 2: |
1656 |
return '?' |
1657 |
sec, usec = ts |
1658 |
return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec |
1659 |
|
1660 |
|
1661 |
def ParseTimespec(value): |
1662 |
"""Parse a time specification.
|
1663 |
|
1664 |
The following suffixed will be recognized:
|
1665 |
|
1666 |
- s: seconds
|
1667 |
- m: minutes
|
1668 |
- h: hours
|
1669 |
- d: day
|
1670 |
- w: weeks
|
1671 |
|
1672 |
Without any suffix, the value will be taken to be in seconds.
|
1673 |
|
1674 |
"""
|
1675 |
value = str(value)
|
1676 |
if not value: |
1677 |
raise errors.OpPrereqError("Empty time specification passed") |
1678 |
suffix_map = { |
1679 |
's': 1, |
1680 |
'm': 60, |
1681 |
'h': 3600, |
1682 |
'd': 86400, |
1683 |
'w': 604800, |
1684 |
} |
1685 |
if value[-1] not in suffix_map: |
1686 |
try:
|
1687 |
value = int(value)
|
1688 |
except (TypeError, ValueError): |
1689 |
raise errors.OpPrereqError("Invalid time specification '%s'" % value) |
1690 |
else:
|
1691 |
multiplier = suffix_map[value[-1]]
|
1692 |
value = value[:-1]
|
1693 |
if not value: # no data left after stripping the suffix |
1694 |
raise errors.OpPrereqError("Invalid time specification (only" |
1695 |
" suffix passed)")
|
1696 |
try:
|
1697 |
value = int(value) * multiplier
|
1698 |
except (TypeError, ValueError): |
1699 |
raise errors.OpPrereqError("Invalid time specification '%s'" % value) |
1700 |
return value
|
1701 |
|
1702 |
|
1703 |
def GetOnlineNodes(nodes, cl=None, nowarn=False): |
1704 |
"""Returns the names of online nodes.
|
1705 |
|
1706 |
This function will also log a warning on stderr with the names of
|
1707 |
the online nodes.
|
1708 |
|
1709 |
@param nodes: if not empty, use only this subset of nodes (minus the
|
1710 |
offline ones)
|
1711 |
@param cl: if not None, luxi client to use
|
1712 |
@type nowarn: boolean
|
1713 |
@param nowarn: by default, this function will output a note with the
|
1714 |
offline nodes that are skipped; if this parameter is True the
|
1715 |
note is not displayed
|
1716 |
|
1717 |
"""
|
1718 |
if cl is None: |
1719 |
cl = GetClient() |
1720 |
|
1721 |
result = cl.QueryNodes(names=nodes, fields=["name", "offline"], |
1722 |
use_locking=False)
|
1723 |
offline = [row[0] for row in result if row[1]] |
1724 |
if offline and not nowarn: |
1725 |
ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
|
1726 |
return [row[0] for row in result if not row[1]] |
1727 |
|
1728 |
|
1729 |
def _ToStream(stream, txt, *args): |
1730 |
"""Write a message to a stream, bypassing the logging system
|
1731 |
|
1732 |
@type stream: file object
|
1733 |
@param stream: the file to which we should write
|
1734 |
@type txt: str
|
1735 |
@param txt: the message
|
1736 |
|
1737 |
"""
|
1738 |
if args:
|
1739 |
args = tuple(args)
|
1740 |
stream.write(txt % args) |
1741 |
else:
|
1742 |
stream.write(txt) |
1743 |
stream.write('\n')
|
1744 |
stream.flush() |
1745 |
|
1746 |
|
1747 |
def ToStdout(txt, *args): |
1748 |
"""Write a message to stdout only, bypassing the logging system
|
1749 |
|
1750 |
This is just a wrapper over _ToStream.
|
1751 |
|
1752 |
@type txt: str
|
1753 |
@param txt: the message
|
1754 |
|
1755 |
"""
|
1756 |
_ToStream(sys.stdout, txt, *args) |
1757 |
|
1758 |
|
1759 |
def ToStderr(txt, *args): |
1760 |
"""Write a message to stderr only, bypassing the logging system
|
1761 |
|
1762 |
This is just a wrapper over _ToStream.
|
1763 |
|
1764 |
@type txt: str
|
1765 |
@param txt: the message
|
1766 |
|
1767 |
"""
|
1768 |
_ToStream(sys.stderr, txt, *args) |
1769 |
|
1770 |
|
1771 |
class JobExecutor(object): |
1772 |
"""Class which manages the submission and execution of multiple jobs.
|
1773 |
|
1774 |
Note that instances of this class should not be reused between
|
1775 |
GetResults() calls.
|
1776 |
|
1777 |
"""
|
1778 |
def __init__(self, cl=None, verbose=True, opts=None): |
1779 |
self.queue = []
|
1780 |
if cl is None: |
1781 |
cl = GetClient() |
1782 |
self.cl = cl
|
1783 |
self.verbose = verbose
|
1784 |
self.jobs = []
|
1785 |
self.opts = opts
|
1786 |
|
1787 |
def QueueJob(self, name, *ops): |
1788 |
"""Record a job for later submit.
|
1789 |
|
1790 |
@type name: string
|
1791 |
@param name: a description of the job, will be used in WaitJobSet
|
1792 |
"""
|
1793 |
SetGenericOpcodeOpts(ops, self.opts)
|
1794 |
self.queue.append((name, ops))
|
1795 |
|
1796 |
def SubmitPending(self): |
1797 |
"""Submit all pending jobs.
|
1798 |
|
1799 |
"""
|
1800 |
results = self.cl.SubmitManyJobs([row[1] for row in self.queue]) |
1801 |
for ((status, data), (name, _)) in zip(results, self.queue): |
1802 |
self.jobs.append((status, data, name))
|
1803 |
|
1804 |
def GetResults(self): |
1805 |
"""Wait for and return the results of all jobs.
|
1806 |
|
1807 |
@rtype: list
|
1808 |
@return: list of tuples (success, job results), in the same order
|
1809 |
as the submitted jobs; if a job has failed, instead of the result
|
1810 |
there will be the error message
|
1811 |
|
1812 |
"""
|
1813 |
if not self.jobs: |
1814 |
self.SubmitPending()
|
1815 |
results = [] |
1816 |
if self.verbose: |
1817 |
ok_jobs = [row[1] for row in self.jobs if row[0]] |
1818 |
if ok_jobs:
|
1819 |
ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
|
1820 |
for submit_status, jid, name in self.jobs: |
1821 |
if not submit_status: |
1822 |
ToStderr("Failed to submit job for %s: %s", name, jid)
|
1823 |
results.append((False, jid))
|
1824 |
continue
|
1825 |
if self.verbose: |
1826 |
ToStdout("Waiting for job %s for %s...", jid, name)
|
1827 |
try:
|
1828 |
job_result = PollJob(jid, cl=self.cl)
|
1829 |
success = True
|
1830 |
except (errors.GenericError, luxi.ProtocolError), err:
|
1831 |
_, job_result = FormatError(err) |
1832 |
success = False
|
1833 |
# the error message will always be shown, verbose or not
|
1834 |
ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
|
1835 |
|
1836 |
results.append((success, job_result)) |
1837 |
return results
|
1838 |
|
1839 |
def WaitOrShow(self, wait): |
1840 |
"""Wait for job results or only print the job IDs.
|
1841 |
|
1842 |
@type wait: boolean
|
1843 |
@param wait: whether to wait or not
|
1844 |
|
1845 |
"""
|
1846 |
if wait:
|
1847 |
return self.GetResults() |
1848 |
else:
|
1849 |
if not self.jobs: |
1850 |
self.SubmitPending()
|
1851 |
for status, result, name in self.jobs: |
1852 |
if status:
|
1853 |
ToStdout("%s: %s", result, name)
|
1854 |
else:
|
1855 |
ToStderr("Failure for %s: %s", name, result)
|