root / lib / utils / __init__.py @ 95e7e85e
History | View | Annotate | Download (23.9 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Ganeti utility module.
|
23 |
|
24 |
This module holds functions that can be used in both daemons (all) and
|
25 |
the command line scripts.
|
26 |
|
27 |
"""
|
28 |
|
29 |
# Allow wildcard import in pylint: disable=W0401
|
30 |
|
31 |
import os |
32 |
import re |
33 |
import errno |
34 |
import pwd |
35 |
import time |
36 |
import itertools |
37 |
import select |
38 |
import logging |
39 |
import signal |
40 |
|
41 |
from ganeti import errors |
42 |
from ganeti import constants |
43 |
from ganeti import compat |
44 |
from ganeti import pathutils |
45 |
|
46 |
from ganeti.utils.algo import * |
47 |
from ganeti.utils.filelock import * |
48 |
from ganeti.utils.hash import * |
49 |
from ganeti.utils.io import * |
50 |
from ganeti.utils.log import * |
51 |
from ganeti.utils.lvm import * |
52 |
from ganeti.utils.mlock import * |
53 |
from ganeti.utils.nodesetup import * |
54 |
from ganeti.utils.process import * |
55 |
from ganeti.utils.retry import * |
56 |
from ganeti.utils.storage import * |
57 |
from ganeti.utils.text import * |
58 |
from ganeti.utils.wrapper import * |
59 |
from ganeti.utils.x509 import * |
60 |
|
61 |
|
62 |
_VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
|
63 |
|
64 |
UUID_RE = re.compile(constants.UUID_REGEX) |
65 |
|
66 |
|
67 |
def ForceDictType(target, key_types, allowed_values=None): |
68 |
"""Force the values of a dict to have certain types.
|
69 |
|
70 |
@type target: dict
|
71 |
@param target: the dict to update
|
72 |
@type key_types: dict
|
73 |
@param key_types: dict mapping target dict keys to types
|
74 |
in constants.ENFORCEABLE_TYPES
|
75 |
@type allowed_values: list
|
76 |
@keyword allowed_values: list of specially allowed values
|
77 |
|
78 |
"""
|
79 |
if allowed_values is None: |
80 |
allowed_values = [] |
81 |
|
82 |
if not isinstance(target, dict): |
83 |
msg = "Expected dictionary, got '%s'" % target
|
84 |
raise errors.TypeEnforcementError(msg)
|
85 |
|
86 |
for key in target: |
87 |
if key not in key_types: |
88 |
msg = "Unknown parameter '%s'" % key
|
89 |
raise errors.TypeEnforcementError(msg)
|
90 |
|
91 |
if target[key] in allowed_values: |
92 |
continue
|
93 |
|
94 |
ktype = key_types[key] |
95 |
if ktype not in constants.ENFORCEABLE_TYPES: |
96 |
msg = "'%s' has non-enforceable type %s" % (key, ktype)
|
97 |
raise errors.ProgrammerError(msg)
|
98 |
|
99 |
if ktype in (constants.VTYPE_STRING, constants.VTYPE_MAYBE_STRING): |
100 |
if target[key] is None and ktype == constants.VTYPE_MAYBE_STRING: |
101 |
pass
|
102 |
elif not isinstance(target[key], basestring): |
103 |
if isinstance(target[key], bool) and not target[key]: |
104 |
target[key] = ""
|
105 |
else:
|
106 |
msg = "'%s' (value %s) is not a valid string" % (key, target[key])
|
107 |
raise errors.TypeEnforcementError(msg)
|
108 |
elif ktype == constants.VTYPE_BOOL:
|
109 |
if isinstance(target[key], basestring) and target[key]: |
110 |
if target[key].lower() == constants.VALUE_FALSE:
|
111 |
target[key] = False
|
112 |
elif target[key].lower() == constants.VALUE_TRUE:
|
113 |
target[key] = True
|
114 |
else:
|
115 |
msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
|
116 |
raise errors.TypeEnforcementError(msg)
|
117 |
elif target[key]:
|
118 |
target[key] = True
|
119 |
else:
|
120 |
target[key] = False
|
121 |
elif ktype == constants.VTYPE_SIZE:
|
122 |
try:
|
123 |
target[key] = ParseUnit(target[key]) |
124 |
except errors.UnitParseError, err:
|
125 |
msg = "'%s' (value %s) is not a valid size. error: %s" % \
|
126 |
(key, target[key], err) |
127 |
raise errors.TypeEnforcementError(msg)
|
128 |
elif ktype == constants.VTYPE_INT:
|
129 |
try:
|
130 |
target[key] = int(target[key])
|
131 |
except (ValueError, TypeError): |
132 |
msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
|
133 |
raise errors.TypeEnforcementError(msg)
|
134 |
|
135 |
|
136 |
def ValidateServiceName(name): |
137 |
"""Validate the given service name.
|
138 |
|
139 |
@type name: number or string
|
140 |
@param name: Service name or port specification
|
141 |
|
142 |
"""
|
143 |
try:
|
144 |
numport = int(name)
|
145 |
except (ValueError, TypeError): |
146 |
# Non-numeric service name
|
147 |
valid = _VALID_SERVICE_NAME_RE.match(name) |
148 |
else:
|
149 |
# Numeric port (protocols other than TCP or UDP might need adjustments
|
150 |
# here)
|
151 |
valid = (numport >= 0 and numport < (1 << 16)) |
152 |
|
153 |
if not valid: |
154 |
raise errors.OpPrereqError("Invalid service name '%s'" % name, |
155 |
errors.ECODE_INVAL) |
156 |
|
157 |
return name
|
158 |
|
159 |
|
160 |
def _ComputeMissingKeys(key_path, options, defaults): |
161 |
"""Helper functions to compute which keys a invalid.
|
162 |
|
163 |
@param key_path: The current key path (if any)
|
164 |
@param options: The user provided options
|
165 |
@param defaults: The default dictionary
|
166 |
@return: A list of invalid keys
|
167 |
|
168 |
"""
|
169 |
defaults_keys = frozenset(defaults.keys())
|
170 |
invalid = [] |
171 |
for key, value in options.items(): |
172 |
if key_path:
|
173 |
new_path = "%s/%s" % (key_path, key)
|
174 |
else:
|
175 |
new_path = key |
176 |
|
177 |
if key not in defaults_keys: |
178 |
invalid.append(new_path) |
179 |
elif isinstance(value, dict): |
180 |
invalid.extend(_ComputeMissingKeys(new_path, value, defaults[key])) |
181 |
|
182 |
return invalid
|
183 |
|
184 |
|
185 |
def VerifyDictOptions(options, defaults): |
186 |
"""Verify a dict has only keys set which also are in the defaults dict.
|
187 |
|
188 |
@param options: The user provided options
|
189 |
@param defaults: The default dictionary
|
190 |
@raise error.OpPrereqError: If one of the keys is not supported
|
191 |
|
192 |
"""
|
193 |
invalid = _ComputeMissingKeys("", options, defaults)
|
194 |
|
195 |
if invalid:
|
196 |
raise errors.OpPrereqError("Provided option keys not supported: %s" % |
197 |
CommaJoin(invalid), errors.ECODE_INVAL) |
198 |
|
199 |
|
200 |
def ListVolumeGroups(): |
201 |
"""List volume groups and their size
|
202 |
|
203 |
@rtype: dict
|
204 |
@return:
|
205 |
Dictionary with keys volume name and values
|
206 |
the size of the volume
|
207 |
|
208 |
"""
|
209 |
command = "vgs --noheadings --units m --nosuffix -o name,size"
|
210 |
result = RunCmd(command) |
211 |
retval = {} |
212 |
if result.failed:
|
213 |
return retval
|
214 |
|
215 |
for line in result.stdout.splitlines(): |
216 |
try:
|
217 |
name, size = line.split() |
218 |
size = int(float(size)) |
219 |
except (IndexError, ValueError), err: |
220 |
logging.error("Invalid output from vgs (%s): %s", err, line)
|
221 |
continue
|
222 |
|
223 |
retval[name] = size |
224 |
|
225 |
return retval
|
226 |
|
227 |
|
228 |
def BridgeExists(bridge): |
229 |
"""Check whether the given bridge exists in the system
|
230 |
|
231 |
@type bridge: str
|
232 |
@param bridge: the bridge name to check
|
233 |
@rtype: boolean
|
234 |
@return: True if it does
|
235 |
|
236 |
"""
|
237 |
return os.path.isdir("/sys/class/net/%s/bridge" % bridge) |
238 |
|
239 |
|
240 |
def TryConvert(fn, val): |
241 |
"""Try to convert a value ignoring errors.
|
242 |
|
243 |
This function tries to apply function I{fn} to I{val}. If no
|
244 |
C{ValueError} or C{TypeError} exceptions are raised, it will return
|
245 |
the result, else it will return the original value. Any other
|
246 |
exceptions are propagated to the caller.
|
247 |
|
248 |
@type fn: callable
|
249 |
@param fn: function to apply to the value
|
250 |
@param val: the value to be converted
|
251 |
@return: The converted value if the conversion was successful,
|
252 |
otherwise the original value.
|
253 |
|
254 |
"""
|
255 |
try:
|
256 |
nv = fn(val) |
257 |
except (ValueError, TypeError): |
258 |
nv = val |
259 |
return nv
|
260 |
|
261 |
|
262 |
def ParseCpuMask(cpu_mask): |
263 |
"""Parse a CPU mask definition and return the list of CPU IDs.
|
264 |
|
265 |
CPU mask format: comma-separated list of CPU IDs
|
266 |
or dash-separated ID ranges
|
267 |
Example: "0-2,5" -> "0,1,2,5"
|
268 |
|
269 |
@type cpu_mask: str
|
270 |
@param cpu_mask: CPU mask definition
|
271 |
@rtype: list of int
|
272 |
@return: list of CPU IDs
|
273 |
|
274 |
"""
|
275 |
if not cpu_mask: |
276 |
return []
|
277 |
cpu_list = [] |
278 |
for range_def in cpu_mask.split(","): |
279 |
boundaries = range_def.split("-")
|
280 |
n_elements = len(boundaries)
|
281 |
if n_elements > 2: |
282 |
raise errors.ParseError("Invalid CPU ID range definition" |
283 |
" (only one hyphen allowed): %s" % range_def)
|
284 |
try:
|
285 |
lower = int(boundaries[0]) |
286 |
except (ValueError, TypeError), err: |
287 |
raise errors.ParseError("Invalid CPU ID value for lower boundary of" |
288 |
" CPU ID range: %s" % str(err)) |
289 |
try:
|
290 |
higher = int(boundaries[-1]) |
291 |
except (ValueError, TypeError), err: |
292 |
raise errors.ParseError("Invalid CPU ID value for higher boundary of" |
293 |
" CPU ID range: %s" % str(err)) |
294 |
if lower > higher:
|
295 |
raise errors.ParseError("Invalid CPU ID range definition" |
296 |
" (%d > %d): %s" % (lower, higher, range_def))
|
297 |
cpu_list.extend(range(lower, higher + 1)) |
298 |
return cpu_list
|
299 |
|
300 |
|
301 |
def ParseMultiCpuMask(cpu_mask): |
302 |
"""Parse a multiple CPU mask definition and return the list of CPU IDs.
|
303 |
|
304 |
CPU mask format: colon-separated list of comma-separated list of CPU IDs
|
305 |
or dash-separated ID ranges, with optional "all" as CPU value
|
306 |
Example: "0-2,5:all:1,5,6:2" -> [ [ 0,1,2,5 ], [ -1 ], [ 1, 5, 6 ], [ 2 ] ]
|
307 |
|
308 |
@type cpu_mask: str
|
309 |
@param cpu_mask: multiple CPU mask definition
|
310 |
@rtype: list of lists of int
|
311 |
@return: list of lists of CPU IDs
|
312 |
|
313 |
"""
|
314 |
if not cpu_mask: |
315 |
return []
|
316 |
cpu_list = [] |
317 |
for range_def in cpu_mask.split(constants.CPU_PINNING_SEP): |
318 |
if range_def == constants.CPU_PINNING_ALL:
|
319 |
cpu_list.append([constants.CPU_PINNING_ALL_VAL, ]) |
320 |
else:
|
321 |
# Uniquify and sort the list before adding
|
322 |
cpu_list.append(sorted(set(ParseCpuMask(range_def)))) |
323 |
|
324 |
return cpu_list
|
325 |
|
326 |
|
327 |
def GetHomeDir(user, default=None): |
328 |
"""Try to get the homedir of the given user.
|
329 |
|
330 |
The user can be passed either as a string (denoting the name) or as
|
331 |
an integer (denoting the user id). If the user is not found, the
|
332 |
C{default} argument is returned, which defaults to C{None}.
|
333 |
|
334 |
"""
|
335 |
try:
|
336 |
if isinstance(user, basestring): |
337 |
result = pwd.getpwnam(user) |
338 |
elif isinstance(user, (int, long)): |
339 |
result = pwd.getpwuid(user) |
340 |
else:
|
341 |
raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" % |
342 |
type(user))
|
343 |
except KeyError: |
344 |
return default
|
345 |
return result.pw_dir
|
346 |
|
347 |
|
348 |
def FirstFree(seq, base=0): |
349 |
"""Returns the first non-existing integer from seq.
|
350 |
|
351 |
The seq argument should be a sorted list of positive integers. The
|
352 |
first time the index of an element is smaller than the element
|
353 |
value, the index will be returned.
|
354 |
|
355 |
The base argument is used to start at a different offset,
|
356 |
i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
|
357 |
|
358 |
Example: C{[0, 1, 3]} will return I{2}.
|
359 |
|
360 |
@type seq: sequence
|
361 |
@param seq: the sequence to be analyzed.
|
362 |
@type base: int
|
363 |
@param base: use this value as the base index of the sequence
|
364 |
@rtype: int
|
365 |
@return: the first non-used index in the sequence
|
366 |
|
367 |
"""
|
368 |
for idx, elem in enumerate(seq): |
369 |
assert elem >= base, "Passed element is higher than base offset" |
370 |
if elem > idx + base:
|
371 |
# idx is not used
|
372 |
return idx + base
|
373 |
return None |
374 |
|
375 |
|
376 |
def SingleWaitForFdCondition(fdobj, event, timeout): |
377 |
"""Waits for a condition to occur on the socket.
|
378 |
|
379 |
Immediately returns at the first interruption.
|
380 |
|
381 |
@type fdobj: integer or object supporting a fileno() method
|
382 |
@param fdobj: entity to wait for events on
|
383 |
@type event: integer
|
384 |
@param event: ORed condition (see select module)
|
385 |
@type timeout: float or None
|
386 |
@param timeout: Timeout in seconds
|
387 |
@rtype: int or None
|
388 |
@return: None for timeout, otherwise occured conditions
|
389 |
|
390 |
"""
|
391 |
check = (event | select.POLLPRI | |
392 |
select.POLLNVAL | select.POLLHUP | select.POLLERR) |
393 |
|
394 |
if timeout is not None: |
395 |
# Poller object expects milliseconds
|
396 |
timeout *= 1000
|
397 |
|
398 |
poller = select.poll() |
399 |
poller.register(fdobj, event) |
400 |
try:
|
401 |
# TODO: If the main thread receives a signal and we have no timeout, we
|
402 |
# could wait forever. This should check a global "quit" flag or something
|
403 |
# every so often.
|
404 |
io_events = poller.poll(timeout) |
405 |
except select.error, err:
|
406 |
if err[0] != errno.EINTR: |
407 |
raise
|
408 |
io_events = [] |
409 |
if io_events and io_events[0][1] & check: |
410 |
return io_events[0][1] |
411 |
else:
|
412 |
return None |
413 |
|
414 |
|
415 |
class FdConditionWaiterHelper(object): |
416 |
"""Retry helper for WaitForFdCondition.
|
417 |
|
418 |
This class contains the retried and wait functions that make sure
|
419 |
WaitForFdCondition can continue waiting until the timeout is actually
|
420 |
expired.
|
421 |
|
422 |
"""
|
423 |
|
424 |
def __init__(self, timeout): |
425 |
self.timeout = timeout
|
426 |
|
427 |
def Poll(self, fdobj, event): |
428 |
result = SingleWaitForFdCondition(fdobj, event, self.timeout)
|
429 |
if result is None: |
430 |
raise RetryAgain()
|
431 |
else:
|
432 |
return result
|
433 |
|
434 |
def UpdateTimeout(self, timeout): |
435 |
self.timeout = timeout
|
436 |
|
437 |
|
438 |
def WaitForFdCondition(fdobj, event, timeout): |
439 |
"""Waits for a condition to occur on the socket.
|
440 |
|
441 |
Retries until the timeout is expired, even if interrupted.
|
442 |
|
443 |
@type fdobj: integer or object supporting a fileno() method
|
444 |
@param fdobj: entity to wait for events on
|
445 |
@type event: integer
|
446 |
@param event: ORed condition (see select module)
|
447 |
@type timeout: float or None
|
448 |
@param timeout: Timeout in seconds
|
449 |
@rtype: int or None
|
450 |
@return: None for timeout, otherwise occured conditions
|
451 |
|
452 |
"""
|
453 |
if timeout is not None: |
454 |
retrywaiter = FdConditionWaiterHelper(timeout) |
455 |
try:
|
456 |
result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout, |
457 |
args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout) |
458 |
except RetryTimeout:
|
459 |
result = None
|
460 |
else:
|
461 |
result = None
|
462 |
while result is None: |
463 |
result = SingleWaitForFdCondition(fdobj, event, timeout) |
464 |
return result
|
465 |
|
466 |
|
467 |
def EnsureDaemon(name): |
468 |
"""Check for and start daemon if not alive.
|
469 |
|
470 |
"""
|
471 |
result = RunCmd([pathutils.DAEMON_UTIL, "check-and-start", name])
|
472 |
if result.failed:
|
473 |
logging.error("Can't start daemon '%s', failure %s, output: %s",
|
474 |
name, result.fail_reason, result.output) |
475 |
return False |
476 |
|
477 |
return True |
478 |
|
479 |
|
480 |
def StopDaemon(name): |
481 |
"""Stop daemon
|
482 |
|
483 |
"""
|
484 |
result = RunCmd([pathutils.DAEMON_UTIL, "stop", name])
|
485 |
if result.failed:
|
486 |
logging.error("Can't stop daemon '%s', failure %s, output: %s",
|
487 |
name, result.fail_reason, result.output) |
488 |
return False |
489 |
|
490 |
return True |
491 |
|
492 |
|
493 |
def SplitTime(value): |
494 |
"""Splits time as floating point number into a tuple.
|
495 |
|
496 |
@param value: Time in seconds
|
497 |
@type value: int or float
|
498 |
@return: Tuple containing (seconds, microseconds)
|
499 |
|
500 |
"""
|
501 |
(seconds, microseconds) = divmod(int(value * 1000000), 1000000) |
502 |
|
503 |
assert 0 <= seconds, \ |
504 |
"Seconds must be larger than or equal to 0, but are %s" % seconds
|
505 |
assert 0 <= microseconds <= 999999, \ |
506 |
"Microseconds must be 0-999999, but are %s" % microseconds
|
507 |
|
508 |
return (int(seconds), int(microseconds)) |
509 |
|
510 |
|
511 |
def MergeTime(timetuple): |
512 |
"""Merges a tuple into time as a floating point number.
|
513 |
|
514 |
@param timetuple: Time as tuple, (seconds, microseconds)
|
515 |
@type timetuple: tuple
|
516 |
@return: Time as a floating point number expressed in seconds
|
517 |
|
518 |
"""
|
519 |
(seconds, microseconds) = timetuple |
520 |
|
521 |
assert 0 <= seconds, \ |
522 |
"Seconds must be larger than or equal to 0, but are %s" % seconds
|
523 |
assert 0 <= microseconds <= 999999, \ |
524 |
"Microseconds must be 0-999999, but are %s" % microseconds
|
525 |
|
526 |
return float(seconds) + (float(microseconds) * 0.000001) |
527 |
|
528 |
|
529 |
def EpochNano(): |
530 |
"""Return the current timestamp expressed as number of nanoseconds since the
|
531 |
unix epoch
|
532 |
|
533 |
@return: nanoseconds since the Unix epoch
|
534 |
|
535 |
"""
|
536 |
return int(time.time() * 1000000000) |
537 |
|
538 |
|
539 |
def FindMatch(data, name): |
540 |
"""Tries to find an item in a dictionary matching a name.
|
541 |
|
542 |
Callers have to ensure the data names aren't contradictory (e.g. a regexp
|
543 |
that matches a string). If the name isn't a direct key, all regular
|
544 |
expression objects in the dictionary are matched against it.
|
545 |
|
546 |
@type data: dict
|
547 |
@param data: Dictionary containing data
|
548 |
@type name: string
|
549 |
@param name: Name to look for
|
550 |
@rtype: tuple; (value in dictionary, matched groups as list)
|
551 |
|
552 |
"""
|
553 |
if name in data: |
554 |
return (data[name], [])
|
555 |
|
556 |
for key, value in data.items(): |
557 |
# Regex objects
|
558 |
if hasattr(key, "match"): |
559 |
m = key.match(name) |
560 |
if m:
|
561 |
return (value, list(m.groups())) |
562 |
|
563 |
return None |
564 |
|
565 |
|
566 |
def GetMounts(filename=constants.PROC_MOUNTS): |
567 |
"""Returns the list of mounted filesystems.
|
568 |
|
569 |
This function is Linux-specific.
|
570 |
|
571 |
@param filename: path of mounts file (/proc/mounts by default)
|
572 |
@rtype: list of tuples
|
573 |
@return: list of mount entries (device, mountpoint, fstype, options)
|
574 |
|
575 |
"""
|
576 |
# TODO(iustin): investigate non-Linux options (e.g. via mount output)
|
577 |
data = [] |
578 |
mountlines = ReadFile(filename).splitlines() |
579 |
for line in mountlines: |
580 |
device, mountpoint, fstype, options, _ = line.split(None, 4) |
581 |
data.append((device, mountpoint, fstype, options)) |
582 |
|
583 |
return data
|
584 |
|
585 |
|
586 |
def SignalHandled(signums): |
587 |
"""Signal Handled decoration.
|
588 |
|
589 |
This special decorator installs a signal handler and then calls the target
|
590 |
function. The function must accept a 'signal_handlers' keyword argument,
|
591 |
which will contain a dict indexed by signal number, with SignalHandler
|
592 |
objects as values.
|
593 |
|
594 |
The decorator can be safely stacked with iself, to handle multiple signals
|
595 |
with different handlers.
|
596 |
|
597 |
@type signums: list
|
598 |
@param signums: signals to intercept
|
599 |
|
600 |
"""
|
601 |
def wrap(fn): |
602 |
def sig_function(*args, **kwargs): |
603 |
assert "signal_handlers" not in kwargs or \ |
604 |
kwargs["signal_handlers"] is None or \ |
605 |
isinstance(kwargs["signal_handlers"], dict), \ |
606 |
"Wrong signal_handlers parameter in original function call"
|
607 |
if "signal_handlers" in kwargs and kwargs["signal_handlers"] is not None: |
608 |
signal_handlers = kwargs["signal_handlers"]
|
609 |
else:
|
610 |
signal_handlers = {} |
611 |
kwargs["signal_handlers"] = signal_handlers
|
612 |
sighandler = SignalHandler(signums) |
613 |
try:
|
614 |
for sig in signums: |
615 |
signal_handlers[sig] = sighandler |
616 |
return fn(*args, **kwargs)
|
617 |
finally:
|
618 |
sighandler.Reset() |
619 |
return sig_function
|
620 |
return wrap
|
621 |
|
622 |
|
623 |
def TimeoutExpired(epoch, timeout, _time_fn=time.time): |
624 |
"""Checks whether a timeout has expired.
|
625 |
|
626 |
"""
|
627 |
return _time_fn() > (epoch + timeout)
|
628 |
|
629 |
|
630 |
class SignalWakeupFd(object): |
631 |
try:
|
632 |
# This is only supported in Python 2.5 and above (some distributions
|
633 |
# backported it to Python 2.4)
|
634 |
_set_wakeup_fd_fn = signal.set_wakeup_fd |
635 |
except AttributeError: |
636 |
# Not supported
|
637 |
|
638 |
def _SetWakeupFd(self, _): # pylint: disable=R0201 |
639 |
return -1 |
640 |
else:
|
641 |
|
642 |
def _SetWakeupFd(self, fd): |
643 |
return self._set_wakeup_fd_fn(fd) |
644 |
|
645 |
def __init__(self): |
646 |
"""Initializes this class.
|
647 |
|
648 |
"""
|
649 |
(read_fd, write_fd) = os.pipe() |
650 |
|
651 |
# Once these succeeded, the file descriptors will be closed automatically.
|
652 |
# Buffer size 0 is important, otherwise .read() with a specified length
|
653 |
# might buffer data and the file descriptors won't be marked readable.
|
654 |
self._read_fh = os.fdopen(read_fd, "r", 0) |
655 |
self._write_fh = os.fdopen(write_fd, "w", 0) |
656 |
|
657 |
self._previous = self._SetWakeupFd(self._write_fh.fileno()) |
658 |
|
659 |
# Utility functions
|
660 |
self.fileno = self._read_fh.fileno |
661 |
self.read = self._read_fh.read |
662 |
|
663 |
def Reset(self): |
664 |
"""Restores the previous wakeup file descriptor.
|
665 |
|
666 |
"""
|
667 |
if hasattr(self, "_previous") and self._previous is not None: |
668 |
self._SetWakeupFd(self._previous) |
669 |
self._previous = None |
670 |
|
671 |
def Notify(self): |
672 |
"""Notifies the wakeup file descriptor.
|
673 |
|
674 |
"""
|
675 |
self._write_fh.write("\0") |
676 |
|
677 |
def __del__(self): |
678 |
"""Called before object deletion.
|
679 |
|
680 |
"""
|
681 |
self.Reset()
|
682 |
|
683 |
|
684 |
class SignalHandler(object): |
685 |
"""Generic signal handler class.
|
686 |
|
687 |
It automatically restores the original handler when deconstructed or
|
688 |
when L{Reset} is called. You can either pass your own handler
|
689 |
function in or query the L{called} attribute to detect whether the
|
690 |
signal was sent.
|
691 |
|
692 |
@type signum: list
|
693 |
@ivar signum: the signals we handle
|
694 |
@type called: boolean
|
695 |
@ivar called: tracks whether any of the signals have been raised
|
696 |
|
697 |
"""
|
698 |
def __init__(self, signum, handler_fn=None, wakeup=None): |
699 |
"""Constructs a new SignalHandler instance.
|
700 |
|
701 |
@type signum: int or list of ints
|
702 |
@param signum: Single signal number or set of signal numbers
|
703 |
@type handler_fn: callable
|
704 |
@param handler_fn: Signal handling function
|
705 |
|
706 |
"""
|
707 |
assert handler_fn is None or callable(handler_fn) |
708 |
|
709 |
self.signum = set(signum) |
710 |
self.called = False |
711 |
|
712 |
self._handler_fn = handler_fn
|
713 |
self._wakeup = wakeup
|
714 |
|
715 |
self._previous = {}
|
716 |
try:
|
717 |
for signum in self.signum: |
718 |
# Setup handler
|
719 |
prev_handler = signal.signal(signum, self._HandleSignal)
|
720 |
try:
|
721 |
self._previous[signum] = prev_handler
|
722 |
except:
|
723 |
# Restore previous handler
|
724 |
signal.signal(signum, prev_handler) |
725 |
raise
|
726 |
except:
|
727 |
# Reset all handlers
|
728 |
self.Reset()
|
729 |
# Here we have a race condition: a handler may have already been called,
|
730 |
# but there's not much we can do about it at this point.
|
731 |
raise
|
732 |
|
733 |
def __del__(self): |
734 |
self.Reset()
|
735 |
|
736 |
def Reset(self): |
737 |
"""Restore previous handler.
|
738 |
|
739 |
This will reset all the signals to their previous handlers.
|
740 |
|
741 |
"""
|
742 |
for signum, prev_handler in self._previous.items(): |
743 |
signal.signal(signum, prev_handler) |
744 |
# If successful, remove from dict
|
745 |
del self._previous[signum] |
746 |
|
747 |
def Clear(self): |
748 |
"""Unsets the L{called} flag.
|
749 |
|
750 |
This function can be used in case a signal may arrive several times.
|
751 |
|
752 |
"""
|
753 |
self.called = False |
754 |
|
755 |
def _HandleSignal(self, signum, frame): |
756 |
"""Actual signal handling function.
|
757 |
|
758 |
"""
|
759 |
# This is not nice and not absolutely atomic, but it appears to be the only
|
760 |
# solution in Python -- there are no atomic types.
|
761 |
self.called = True |
762 |
|
763 |
if self._wakeup: |
764 |
# Notify whoever is interested in signals
|
765 |
self._wakeup.Notify()
|
766 |
|
767 |
if self._handler_fn: |
768 |
self._handler_fn(signum, frame)
|
769 |
|
770 |
|
771 |
class FieldSet(object): |
772 |
"""A simple field set.
|
773 |
|
774 |
Among the features are:
|
775 |
- checking if a string is among a list of static string or regex objects
|
776 |
- checking if a whole list of string matches
|
777 |
- returning the matching groups from a regex match
|
778 |
|
779 |
Internally, all fields are held as regular expression objects.
|
780 |
|
781 |
"""
|
782 |
def __init__(self, *items): |
783 |
self.items = [re.compile("^%s$" % value) for value in items] |
784 |
|
785 |
def Extend(self, other_set): |
786 |
"""Extend the field set with the items from another one"""
|
787 |
self.items.extend(other_set.items)
|
788 |
|
789 |
def Matches(self, field): |
790 |
"""Checks if a field matches the current set
|
791 |
|
792 |
@type field: str
|
793 |
@param field: the string to match
|
794 |
@return: either None or a regular expression match object
|
795 |
|
796 |
"""
|
797 |
for m in itertools.ifilter(None, (val.match(field) for val in self.items)): |
798 |
return m
|
799 |
return None |
800 |
|
801 |
def NonMatching(self, items): |
802 |
"""Returns the list of fields not matching the current set
|
803 |
|
804 |
@type items: list
|
805 |
@param items: the list of fields to check
|
806 |
@rtype: list
|
807 |
@return: list of non-matching fields
|
808 |
|
809 |
"""
|
810 |
return [val for val in items if not self.Matches(val)] |
811 |
|
812 |
|
813 |
def ValidateDeviceNames(kind, container): |
814 |
"""Validate instance device names.
|
815 |
|
816 |
Check that a device container contains only unique and valid names.
|
817 |
|
818 |
@type kind: string
|
819 |
@param kind: One-word item description
|
820 |
@type container: list
|
821 |
@param container: Container containing the devices
|
822 |
|
823 |
"""
|
824 |
|
825 |
valid = [] |
826 |
for device in container: |
827 |
if isinstance(device, dict): |
828 |
if kind == "NIC": |
829 |
name = device.get(constants.INIC_NAME, None)
|
830 |
elif kind == "disk": |
831 |
name = device.get(constants.IDISK_NAME, None)
|
832 |
else:
|
833 |
raise errors.OpPrereqError("Invalid container kind '%s'" % kind, |
834 |
errors.ECODE_INVAL) |
835 |
else:
|
836 |
name = device.name |
837 |
# Check that a device name is not the UUID of another device
|
838 |
valid.append(device.uuid) |
839 |
|
840 |
try:
|
841 |
int(name)
|
842 |
except (ValueError, TypeError): |
843 |
pass
|
844 |
else:
|
845 |
raise errors.OpPrereqError("Invalid name '%s'. Purely numeric %s names" |
846 |
" are not allowed" % (name, kind),
|
847 |
errors.ECODE_INVAL) |
848 |
|
849 |
if name is not None and name.lower() != constants.VALUE_NONE: |
850 |
if name in valid: |
851 |
raise errors.OpPrereqError("%s name '%s' already used" % (kind, name), |
852 |
errors.ECODE_NOTUNIQUE) |
853 |
else:
|
854 |
valid.append(name) |