root / lib / utils / __init__.py @ f8326fca
History | View | Annotate | Download (22 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Ganeti utility module.
|
23 |
|
24 |
This module holds functions that can be used in both daemons (all) and
|
25 |
the command line scripts.
|
26 |
|
27 |
"""
|
28 |
|
29 |
# Allow wildcard import in pylint: disable=W0401
|
30 |
|
31 |
import os |
32 |
import re |
33 |
import errno |
34 |
import pwd |
35 |
import time |
36 |
import itertools |
37 |
import select |
38 |
import logging |
39 |
import signal |
40 |
|
41 |
from ganeti import errors |
42 |
from ganeti import constants |
43 |
from ganeti import compat |
44 |
|
45 |
from ganeti.utils.algo import * |
46 |
from ganeti.utils.filelock import * |
47 |
from ganeti.utils.hash import * |
48 |
from ganeti.utils.io import * |
49 |
from ganeti.utils.log import * |
50 |
from ganeti.utils.mlock import * |
51 |
from ganeti.utils.nodesetup import * |
52 |
from ganeti.utils.process import * |
53 |
from ganeti.utils.retry import * |
54 |
from ganeti.utils.text import * |
55 |
from ganeti.utils.wrapper import * |
56 |
from ganeti.utils.x509 import * |
57 |
|
58 |
|
59 |
_VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
|
60 |
|
61 |
UUID_RE = re.compile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-"
|
62 |
"[a-f0-9]{4}-[a-f0-9]{12}$")
|
63 |
|
64 |
|
65 |
def ForceDictType(target, key_types, allowed_values=None): |
66 |
"""Force the values of a dict to have certain types.
|
67 |
|
68 |
@type target: dict
|
69 |
@param target: the dict to update
|
70 |
@type key_types: dict
|
71 |
@param key_types: dict mapping target dict keys to types
|
72 |
in constants.ENFORCEABLE_TYPES
|
73 |
@type allowed_values: list
|
74 |
@keyword allowed_values: list of specially allowed values
|
75 |
|
76 |
"""
|
77 |
if allowed_values is None: |
78 |
allowed_values = [] |
79 |
|
80 |
if not isinstance(target, dict): |
81 |
msg = "Expected dictionary, got '%s'" % target
|
82 |
raise errors.TypeEnforcementError(msg)
|
83 |
|
84 |
for key in target: |
85 |
if key not in key_types: |
86 |
msg = "Unknown parameter '%s'" % key
|
87 |
raise errors.TypeEnforcementError(msg)
|
88 |
|
89 |
if target[key] in allowed_values: |
90 |
continue
|
91 |
|
92 |
ktype = key_types[key] |
93 |
if ktype not in constants.ENFORCEABLE_TYPES: |
94 |
msg = "'%s' has non-enforceable type %s" % (key, ktype)
|
95 |
raise errors.ProgrammerError(msg)
|
96 |
|
97 |
if ktype in (constants.VTYPE_STRING, constants.VTYPE_MAYBE_STRING): |
98 |
if target[key] is None and ktype == constants.VTYPE_MAYBE_STRING: |
99 |
pass
|
100 |
elif not isinstance(target[key], basestring): |
101 |
if isinstance(target[key], bool) and not target[key]: |
102 |
target[key] = ""
|
103 |
else:
|
104 |
msg = "'%s' (value %s) is not a valid string" % (key, target[key])
|
105 |
raise errors.TypeEnforcementError(msg)
|
106 |
elif ktype == constants.VTYPE_BOOL:
|
107 |
if isinstance(target[key], basestring) and target[key]: |
108 |
if target[key].lower() == constants.VALUE_FALSE:
|
109 |
target[key] = False
|
110 |
elif target[key].lower() == constants.VALUE_TRUE:
|
111 |
target[key] = True
|
112 |
else:
|
113 |
msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
|
114 |
raise errors.TypeEnforcementError(msg)
|
115 |
elif target[key]:
|
116 |
target[key] = True
|
117 |
else:
|
118 |
target[key] = False
|
119 |
elif ktype == constants.VTYPE_SIZE:
|
120 |
try:
|
121 |
target[key] = ParseUnit(target[key]) |
122 |
except errors.UnitParseError, err:
|
123 |
msg = "'%s' (value %s) is not a valid size. error: %s" % \
|
124 |
(key, target[key], err) |
125 |
raise errors.TypeEnforcementError(msg)
|
126 |
elif ktype == constants.VTYPE_INT:
|
127 |
try:
|
128 |
target[key] = int(target[key])
|
129 |
except (ValueError, TypeError): |
130 |
msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
|
131 |
raise errors.TypeEnforcementError(msg)
|
132 |
|
133 |
|
134 |
def ValidateServiceName(name): |
135 |
"""Validate the given service name.
|
136 |
|
137 |
@type name: number or string
|
138 |
@param name: Service name or port specification
|
139 |
|
140 |
"""
|
141 |
try:
|
142 |
numport = int(name)
|
143 |
except (ValueError, TypeError): |
144 |
# Non-numeric service name
|
145 |
valid = _VALID_SERVICE_NAME_RE.match(name) |
146 |
else:
|
147 |
# Numeric port (protocols other than TCP or UDP might need adjustments
|
148 |
# here)
|
149 |
valid = (numport >= 0 and numport < (1 << 16)) |
150 |
|
151 |
if not valid: |
152 |
raise errors.OpPrereqError("Invalid service name '%s'" % name, |
153 |
errors.ECODE_INVAL) |
154 |
|
155 |
return name
|
156 |
|
157 |
|
158 |
def ListVolumeGroups(): |
159 |
"""List volume groups and their size
|
160 |
|
161 |
@rtype: dict
|
162 |
@return:
|
163 |
Dictionary with keys volume name and values
|
164 |
the size of the volume
|
165 |
|
166 |
"""
|
167 |
command = "vgs --noheadings --units m --nosuffix -o name,size"
|
168 |
result = RunCmd(command) |
169 |
retval = {} |
170 |
if result.failed:
|
171 |
return retval
|
172 |
|
173 |
for line in result.stdout.splitlines(): |
174 |
try:
|
175 |
name, size = line.split() |
176 |
size = int(float(size)) |
177 |
except (IndexError, ValueError), err: |
178 |
logging.error("Invalid output from vgs (%s): %s", err, line)
|
179 |
continue
|
180 |
|
181 |
retval[name] = size |
182 |
|
183 |
return retval
|
184 |
|
185 |
|
186 |
def BridgeExists(bridge): |
187 |
"""Check whether the given bridge exists in the system
|
188 |
|
189 |
@type bridge: str
|
190 |
@param bridge: the bridge name to check
|
191 |
@rtype: boolean
|
192 |
@return: True if it does
|
193 |
|
194 |
"""
|
195 |
return os.path.isdir("/sys/class/net/%s/bridge" % bridge) |
196 |
|
197 |
|
198 |
def TryConvert(fn, val): |
199 |
"""Try to convert a value ignoring errors.
|
200 |
|
201 |
This function tries to apply function I{fn} to I{val}. If no
|
202 |
C{ValueError} or C{TypeError} exceptions are raised, it will return
|
203 |
the result, else it will return the original value. Any other
|
204 |
exceptions are propagated to the caller.
|
205 |
|
206 |
@type fn: callable
|
207 |
@param fn: function to apply to the value
|
208 |
@param val: the value to be converted
|
209 |
@return: The converted value if the conversion was successful,
|
210 |
otherwise the original value.
|
211 |
|
212 |
"""
|
213 |
try:
|
214 |
nv = fn(val) |
215 |
except (ValueError, TypeError): |
216 |
nv = val |
217 |
return nv
|
218 |
|
219 |
|
220 |
def ParseCpuMask(cpu_mask): |
221 |
"""Parse a CPU mask definition and return the list of CPU IDs.
|
222 |
|
223 |
CPU mask format: comma-separated list of CPU IDs
|
224 |
or dash-separated ID ranges
|
225 |
Example: "0-2,5" -> "0,1,2,5"
|
226 |
|
227 |
@type cpu_mask: str
|
228 |
@param cpu_mask: CPU mask definition
|
229 |
@rtype: list of int
|
230 |
@return: list of CPU IDs
|
231 |
|
232 |
"""
|
233 |
if not cpu_mask: |
234 |
return []
|
235 |
cpu_list = [] |
236 |
for range_def in cpu_mask.split(","): |
237 |
boundaries = range_def.split("-")
|
238 |
n_elements = len(boundaries)
|
239 |
if n_elements > 2: |
240 |
raise errors.ParseError("Invalid CPU ID range definition" |
241 |
" (only one hyphen allowed): %s" % range_def)
|
242 |
try:
|
243 |
lower = int(boundaries[0]) |
244 |
except (ValueError, TypeError), err: |
245 |
raise errors.ParseError("Invalid CPU ID value for lower boundary of" |
246 |
" CPU ID range: %s" % str(err)) |
247 |
try:
|
248 |
higher = int(boundaries[-1]) |
249 |
except (ValueError, TypeError), err: |
250 |
raise errors.ParseError("Invalid CPU ID value for higher boundary of" |
251 |
" CPU ID range: %s" % str(err)) |
252 |
if lower > higher:
|
253 |
raise errors.ParseError("Invalid CPU ID range definition" |
254 |
" (%d > %d): %s" % (lower, higher, range_def))
|
255 |
cpu_list.extend(range(lower, higher + 1)) |
256 |
return cpu_list
|
257 |
|
258 |
|
259 |
def ParseMultiCpuMask(cpu_mask): |
260 |
"""Parse a multiple CPU mask definition and return the list of CPU IDs.
|
261 |
|
262 |
CPU mask format: colon-separated list of comma-separated list of CPU IDs
|
263 |
or dash-separated ID ranges, with optional "all" as CPU value
|
264 |
Example: "0-2,5:all:1,5,6:2" -> [ [ 0,1,2,5 ], [ -1 ], [ 1, 5, 6 ], [ 2 ] ]
|
265 |
|
266 |
@type cpu_mask: str
|
267 |
@param cpu_mask: multiple CPU mask definition
|
268 |
@rtype: list of lists of int
|
269 |
@return: list of lists of CPU IDs
|
270 |
|
271 |
"""
|
272 |
if not cpu_mask: |
273 |
return []
|
274 |
cpu_list = [] |
275 |
for range_def in cpu_mask.split(constants.CPU_PINNING_SEP): |
276 |
if range_def == constants.CPU_PINNING_ALL:
|
277 |
cpu_list.append([constants.CPU_PINNING_ALL_VAL, ]) |
278 |
else:
|
279 |
# Uniquify and sort the list before adding
|
280 |
cpu_list.append(sorted(set(ParseCpuMask(range_def)))) |
281 |
|
282 |
return cpu_list
|
283 |
|
284 |
|
285 |
def GetHomeDir(user, default=None): |
286 |
"""Try to get the homedir of the given user.
|
287 |
|
288 |
The user can be passed either as a string (denoting the name) or as
|
289 |
an integer (denoting the user id). If the user is not found, the
|
290 |
'default' argument is returned, which defaults to None.
|
291 |
|
292 |
"""
|
293 |
try:
|
294 |
if isinstance(user, basestring): |
295 |
result = pwd.getpwnam(user) |
296 |
elif isinstance(user, (int, long)): |
297 |
result = pwd.getpwuid(user) |
298 |
else:
|
299 |
raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" % |
300 |
type(user))
|
301 |
except KeyError: |
302 |
return default
|
303 |
return result.pw_dir
|
304 |
|
305 |
|
306 |
def FirstFree(seq, base=0): |
307 |
"""Returns the first non-existing integer from seq.
|
308 |
|
309 |
The seq argument should be a sorted list of positive integers. The
|
310 |
first time the index of an element is smaller than the element
|
311 |
value, the index will be returned.
|
312 |
|
313 |
The base argument is used to start at a different offset,
|
314 |
i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
|
315 |
|
316 |
Example: C{[0, 1, 3]} will return I{2}.
|
317 |
|
318 |
@type seq: sequence
|
319 |
@param seq: the sequence to be analyzed.
|
320 |
@type base: int
|
321 |
@param base: use this value as the base index of the sequence
|
322 |
@rtype: int
|
323 |
@return: the first non-used index in the sequence
|
324 |
|
325 |
"""
|
326 |
for idx, elem in enumerate(seq): |
327 |
assert elem >= base, "Passed element is higher than base offset" |
328 |
if elem > idx + base:
|
329 |
# idx is not used
|
330 |
return idx + base
|
331 |
return None |
332 |
|
333 |
|
334 |
def SingleWaitForFdCondition(fdobj, event, timeout): |
335 |
"""Waits for a condition to occur on the socket.
|
336 |
|
337 |
Immediately returns at the first interruption.
|
338 |
|
339 |
@type fdobj: integer or object supporting a fileno() method
|
340 |
@param fdobj: entity to wait for events on
|
341 |
@type event: integer
|
342 |
@param event: ORed condition (see select module)
|
343 |
@type timeout: float or None
|
344 |
@param timeout: Timeout in seconds
|
345 |
@rtype: int or None
|
346 |
@return: None for timeout, otherwise occured conditions
|
347 |
|
348 |
"""
|
349 |
check = (event | select.POLLPRI | |
350 |
select.POLLNVAL | select.POLLHUP | select.POLLERR) |
351 |
|
352 |
if timeout is not None: |
353 |
# Poller object expects milliseconds
|
354 |
timeout *= 1000
|
355 |
|
356 |
poller = select.poll() |
357 |
poller.register(fdobj, event) |
358 |
try:
|
359 |
# TODO: If the main thread receives a signal and we have no timeout, we
|
360 |
# could wait forever. This should check a global "quit" flag or something
|
361 |
# every so often.
|
362 |
io_events = poller.poll(timeout) |
363 |
except select.error, err:
|
364 |
if err[0] != errno.EINTR: |
365 |
raise
|
366 |
io_events = [] |
367 |
if io_events and io_events[0][1] & check: |
368 |
return io_events[0][1] |
369 |
else:
|
370 |
return None |
371 |
|
372 |
|
373 |
class FdConditionWaiterHelper(object): |
374 |
"""Retry helper for WaitForFdCondition.
|
375 |
|
376 |
This class contains the retried and wait functions that make sure
|
377 |
WaitForFdCondition can continue waiting until the timeout is actually
|
378 |
expired.
|
379 |
|
380 |
"""
|
381 |
|
382 |
def __init__(self, timeout): |
383 |
self.timeout = timeout
|
384 |
|
385 |
def Poll(self, fdobj, event): |
386 |
result = SingleWaitForFdCondition(fdobj, event, self.timeout)
|
387 |
if result is None: |
388 |
raise RetryAgain()
|
389 |
else:
|
390 |
return result
|
391 |
|
392 |
def UpdateTimeout(self, timeout): |
393 |
self.timeout = timeout
|
394 |
|
395 |
|
396 |
def WaitForFdCondition(fdobj, event, timeout): |
397 |
"""Waits for a condition to occur on the socket.
|
398 |
|
399 |
Retries until the timeout is expired, even if interrupted.
|
400 |
|
401 |
@type fdobj: integer or object supporting a fileno() method
|
402 |
@param fdobj: entity to wait for events on
|
403 |
@type event: integer
|
404 |
@param event: ORed condition (see select module)
|
405 |
@type timeout: float or None
|
406 |
@param timeout: Timeout in seconds
|
407 |
@rtype: int or None
|
408 |
@return: None for timeout, otherwise occured conditions
|
409 |
|
410 |
"""
|
411 |
if timeout is not None: |
412 |
retrywaiter = FdConditionWaiterHelper(timeout) |
413 |
try:
|
414 |
result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout, |
415 |
args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout) |
416 |
except RetryTimeout:
|
417 |
result = None
|
418 |
else:
|
419 |
result = None
|
420 |
while result is None: |
421 |
result = SingleWaitForFdCondition(fdobj, event, timeout) |
422 |
return result
|
423 |
|
424 |
|
425 |
def EnsureDaemon(name): |
426 |
"""Check for and start daemon if not alive.
|
427 |
|
428 |
"""
|
429 |
result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
|
430 |
if result.failed:
|
431 |
logging.error("Can't start daemon '%s', failure %s, output: %s",
|
432 |
name, result.fail_reason, result.output) |
433 |
return False |
434 |
|
435 |
return True |
436 |
|
437 |
|
438 |
def StopDaemon(name): |
439 |
"""Stop daemon
|
440 |
|
441 |
"""
|
442 |
result = RunCmd([constants.DAEMON_UTIL, "stop", name])
|
443 |
if result.failed:
|
444 |
logging.error("Can't stop daemon '%s', failure %s, output: %s",
|
445 |
name, result.fail_reason, result.output) |
446 |
return False |
447 |
|
448 |
return True |
449 |
|
450 |
|
451 |
def CheckVolumeGroupSize(vglist, vgname, minsize): |
452 |
"""Checks if the volume group list is valid.
|
453 |
|
454 |
The function will check if a given volume group is in the list of
|
455 |
volume groups and has a minimum size.
|
456 |
|
457 |
@type vglist: dict
|
458 |
@param vglist: dictionary of volume group names and their size
|
459 |
@type vgname: str
|
460 |
@param vgname: the volume group we should check
|
461 |
@type minsize: int
|
462 |
@param minsize: the minimum size we accept
|
463 |
@rtype: None or str
|
464 |
@return: None for success, otherwise the error message
|
465 |
|
466 |
"""
|
467 |
vgsize = vglist.get(vgname, None)
|
468 |
if vgsize is None: |
469 |
return "volume group '%s' missing" % vgname |
470 |
elif vgsize < minsize:
|
471 |
return ("volume group '%s' too small (%s MiB required, %d MiB found)" % |
472 |
(vgname, minsize, vgsize)) |
473 |
return None |
474 |
|
475 |
|
476 |
def SplitTime(value): |
477 |
"""Splits time as floating point number into a tuple.
|
478 |
|
479 |
@param value: Time in seconds
|
480 |
@type value: int or float
|
481 |
@return: Tuple containing (seconds, microseconds)
|
482 |
|
483 |
"""
|
484 |
(seconds, microseconds) = divmod(int(value * 1000000), 1000000) |
485 |
|
486 |
assert 0 <= seconds, \ |
487 |
"Seconds must be larger than or equal to 0, but are %s" % seconds
|
488 |
assert 0 <= microseconds <= 999999, \ |
489 |
"Microseconds must be 0-999999, but are %s" % microseconds
|
490 |
|
491 |
return (int(seconds), int(microseconds)) |
492 |
|
493 |
|
494 |
def MergeTime(timetuple): |
495 |
"""Merges a tuple into time as a floating point number.
|
496 |
|
497 |
@param timetuple: Time as tuple, (seconds, microseconds)
|
498 |
@type timetuple: tuple
|
499 |
@return: Time as a floating point number expressed in seconds
|
500 |
|
501 |
"""
|
502 |
(seconds, microseconds) = timetuple |
503 |
|
504 |
assert 0 <= seconds, \ |
505 |
"Seconds must be larger than or equal to 0, but are %s" % seconds
|
506 |
assert 0 <= microseconds <= 999999, \ |
507 |
"Microseconds must be 0-999999, but are %s" % microseconds
|
508 |
|
509 |
return float(seconds) + (float(microseconds) * 0.000001) |
510 |
|
511 |
|
512 |
def FindMatch(data, name): |
513 |
"""Tries to find an item in a dictionary matching a name.
|
514 |
|
515 |
Callers have to ensure the data names aren't contradictory (e.g. a regexp
|
516 |
that matches a string). If the name isn't a direct key, all regular
|
517 |
expression objects in the dictionary are matched against it.
|
518 |
|
519 |
@type data: dict
|
520 |
@param data: Dictionary containing data
|
521 |
@type name: string
|
522 |
@param name: Name to look for
|
523 |
@rtype: tuple; (value in dictionary, matched groups as list)
|
524 |
|
525 |
"""
|
526 |
if name in data: |
527 |
return (data[name], [])
|
528 |
|
529 |
for key, value in data.items(): |
530 |
# Regex objects
|
531 |
if hasattr(key, "match"): |
532 |
m = key.match(name) |
533 |
if m:
|
534 |
return (value, list(m.groups())) |
535 |
|
536 |
return None |
537 |
|
538 |
|
539 |
def GetMounts(filename=constants.PROC_MOUNTS): |
540 |
"""Returns the list of mounted filesystems.
|
541 |
|
542 |
This function is Linux-specific.
|
543 |
|
544 |
@param filename: path of mounts file (/proc/mounts by default)
|
545 |
@rtype: list of tuples
|
546 |
@return: list of mount entries (device, mountpoint, fstype, options)
|
547 |
|
548 |
"""
|
549 |
# TODO(iustin): investigate non-Linux options (e.g. via mount output)
|
550 |
data = [] |
551 |
mountlines = ReadFile(filename).splitlines() |
552 |
for line in mountlines: |
553 |
device, mountpoint, fstype, options, _ = line.split(None, 4) |
554 |
data.append((device, mountpoint, fstype, options)) |
555 |
|
556 |
return data
|
557 |
|
558 |
|
559 |
def SignalHandled(signums): |
560 |
"""Signal Handled decoration.
|
561 |
|
562 |
This special decorator installs a signal handler and then calls the target
|
563 |
function. The function must accept a 'signal_handlers' keyword argument,
|
564 |
which will contain a dict indexed by signal number, with SignalHandler
|
565 |
objects as values.
|
566 |
|
567 |
The decorator can be safely stacked with iself, to handle multiple signals
|
568 |
with different handlers.
|
569 |
|
570 |
@type signums: list
|
571 |
@param signums: signals to intercept
|
572 |
|
573 |
"""
|
574 |
def wrap(fn): |
575 |
def sig_function(*args, **kwargs): |
576 |
assert "signal_handlers" not in kwargs or \ |
577 |
kwargs["signal_handlers"] is None or \ |
578 |
isinstance(kwargs["signal_handlers"], dict), \ |
579 |
"Wrong signal_handlers parameter in original function call"
|
580 |
if "signal_handlers" in kwargs and kwargs["signal_handlers"] is not None: |
581 |
signal_handlers = kwargs["signal_handlers"]
|
582 |
else:
|
583 |
signal_handlers = {} |
584 |
kwargs["signal_handlers"] = signal_handlers
|
585 |
sighandler = SignalHandler(signums) |
586 |
try:
|
587 |
for sig in signums: |
588 |
signal_handlers[sig] = sighandler |
589 |
return fn(*args, **kwargs)
|
590 |
finally:
|
591 |
sighandler.Reset() |
592 |
return sig_function
|
593 |
return wrap
|
594 |
|
595 |
|
596 |
def TimeoutExpired(epoch, timeout, _time_fn=time.time): |
597 |
"""Checks whether a timeout has expired.
|
598 |
|
599 |
"""
|
600 |
return _time_fn() > (epoch + timeout)
|
601 |
|
602 |
|
603 |
class SignalWakeupFd(object): |
604 |
try:
|
605 |
# This is only supported in Python 2.5 and above (some distributions
|
606 |
# backported it to Python 2.4)
|
607 |
_set_wakeup_fd_fn = signal.set_wakeup_fd |
608 |
except AttributeError: |
609 |
# Not supported
|
610 |
def _SetWakeupFd(self, _): # pylint: disable=R0201 |
611 |
return -1 |
612 |
else:
|
613 |
def _SetWakeupFd(self, fd): |
614 |
return self._set_wakeup_fd_fn(fd) |
615 |
|
616 |
def __init__(self): |
617 |
"""Initializes this class.
|
618 |
|
619 |
"""
|
620 |
(read_fd, write_fd) = os.pipe() |
621 |
|
622 |
# Once these succeeded, the file descriptors will be closed automatically.
|
623 |
# Buffer size 0 is important, otherwise .read() with a specified length
|
624 |
# might buffer data and the file descriptors won't be marked readable.
|
625 |
self._read_fh = os.fdopen(read_fd, "r", 0) |
626 |
self._write_fh = os.fdopen(write_fd, "w", 0) |
627 |
|
628 |
self._previous = self._SetWakeupFd(self._write_fh.fileno()) |
629 |
|
630 |
# Utility functions
|
631 |
self.fileno = self._read_fh.fileno |
632 |
self.read = self._read_fh.read |
633 |
|
634 |
def Reset(self): |
635 |
"""Restores the previous wakeup file descriptor.
|
636 |
|
637 |
"""
|
638 |
if hasattr(self, "_previous") and self._previous is not None: |
639 |
self._SetWakeupFd(self._previous) |
640 |
self._previous = None |
641 |
|
642 |
def Notify(self): |
643 |
"""Notifies the wakeup file descriptor.
|
644 |
|
645 |
"""
|
646 |
self._write_fh.write("\0") |
647 |
|
648 |
def __del__(self): |
649 |
"""Called before object deletion.
|
650 |
|
651 |
"""
|
652 |
self.Reset()
|
653 |
|
654 |
|
655 |
class SignalHandler(object): |
656 |
"""Generic signal handler class.
|
657 |
|
658 |
It automatically restores the original handler when deconstructed or
|
659 |
when L{Reset} is called. You can either pass your own handler
|
660 |
function in or query the L{called} attribute to detect whether the
|
661 |
signal was sent.
|
662 |
|
663 |
@type signum: list
|
664 |
@ivar signum: the signals we handle
|
665 |
@type called: boolean
|
666 |
@ivar called: tracks whether any of the signals have been raised
|
667 |
|
668 |
"""
|
669 |
def __init__(self, signum, handler_fn=None, wakeup=None): |
670 |
"""Constructs a new SignalHandler instance.
|
671 |
|
672 |
@type signum: int or list of ints
|
673 |
@param signum: Single signal number or set of signal numbers
|
674 |
@type handler_fn: callable
|
675 |
@param handler_fn: Signal handling function
|
676 |
|
677 |
"""
|
678 |
assert handler_fn is None or callable(handler_fn) |
679 |
|
680 |
self.signum = set(signum) |
681 |
self.called = False |
682 |
|
683 |
self._handler_fn = handler_fn
|
684 |
self._wakeup = wakeup
|
685 |
|
686 |
self._previous = {}
|
687 |
try:
|
688 |
for signum in self.signum: |
689 |
# Setup handler
|
690 |
prev_handler = signal.signal(signum, self._HandleSignal)
|
691 |
try:
|
692 |
self._previous[signum] = prev_handler
|
693 |
except:
|
694 |
# Restore previous handler
|
695 |
signal.signal(signum, prev_handler) |
696 |
raise
|
697 |
except:
|
698 |
# Reset all handlers
|
699 |
self.Reset()
|
700 |
# Here we have a race condition: a handler may have already been called,
|
701 |
# but there's not much we can do about it at this point.
|
702 |
raise
|
703 |
|
704 |
def __del__(self): |
705 |
self.Reset()
|
706 |
|
707 |
def Reset(self): |
708 |
"""Restore previous handler.
|
709 |
|
710 |
This will reset all the signals to their previous handlers.
|
711 |
|
712 |
"""
|
713 |
for signum, prev_handler in self._previous.items(): |
714 |
signal.signal(signum, prev_handler) |
715 |
# If successful, remove from dict
|
716 |
del self._previous[signum] |
717 |
|
718 |
def Clear(self): |
719 |
"""Unsets the L{called} flag.
|
720 |
|
721 |
This function can be used in case a signal may arrive several times.
|
722 |
|
723 |
"""
|
724 |
self.called = False |
725 |
|
726 |
def _HandleSignal(self, signum, frame): |
727 |
"""Actual signal handling function.
|
728 |
|
729 |
"""
|
730 |
# This is not nice and not absolutely atomic, but it appears to be the only
|
731 |
# solution in Python -- there are no atomic types.
|
732 |
self.called = True |
733 |
|
734 |
if self._wakeup: |
735 |
# Notify whoever is interested in signals
|
736 |
self._wakeup.Notify()
|
737 |
|
738 |
if self._handler_fn: |
739 |
self._handler_fn(signum, frame)
|
740 |
|
741 |
|
742 |
class FieldSet(object): |
743 |
"""A simple field set.
|
744 |
|
745 |
Among the features are:
|
746 |
- checking if a string is among a list of static string or regex objects
|
747 |
- checking if a whole list of string matches
|
748 |
- returning the matching groups from a regex match
|
749 |
|
750 |
Internally, all fields are held as regular expression objects.
|
751 |
|
752 |
"""
|
753 |
def __init__(self, *items): |
754 |
self.items = [re.compile("^%s$" % value) for value in items] |
755 |
|
756 |
def Extend(self, other_set): |
757 |
"""Extend the field set with the items from another one"""
|
758 |
self.items.extend(other_set.items)
|
759 |
|
760 |
def Matches(self, field): |
761 |
"""Checks if a field matches the current set
|
762 |
|
763 |
@type field: str
|
764 |
@param field: the string to match
|
765 |
@return: either None or a regular expression match object
|
766 |
|
767 |
"""
|
768 |
for m in itertools.ifilter(None, (val.match(field) for val in self.items)): |
769 |
return m
|
770 |
return None |
771 |
|
772 |
def NonMatching(self, items): |
773 |
"""Returns the list of fields not matching the current set
|
774 |
|
775 |
@type items: list
|
776 |
@param items: the list of fields to check
|
777 |
@rtype: list
|
778 |
@return: list of non-matching fields
|
779 |
|
780 |
"""
|
781 |
return [val for val in items if not self.Matches(val)] |