root / lib / utils / __init__.py @ c50645c0
History | View | Annotate | Download (52.7 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Ganeti utility module.
|
23 |
|
24 |
This module holds functions that can be used in both daemons (all) and
|
25 |
the command line scripts.
|
26 |
|
27 |
"""
|
28 |
|
29 |
|
30 |
import os |
31 |
import sys |
32 |
import time |
33 |
import subprocess |
34 |
import re |
35 |
import socket |
36 |
import tempfile |
37 |
import shutil |
38 |
import errno |
39 |
import pwd |
40 |
import itertools |
41 |
import select |
42 |
import fcntl |
43 |
import resource |
44 |
import logging |
45 |
import signal |
46 |
import datetime |
47 |
import calendar |
48 |
|
49 |
from cStringIO import StringIO |
50 |
|
51 |
from ganeti import errors |
52 |
from ganeti import constants |
53 |
from ganeti import compat |
54 |
|
55 |
from ganeti.utils.algo import * # pylint: disable-msg=W0401 |
56 |
from ganeti.utils.retry import * # pylint: disable-msg=W0401 |
57 |
from ganeti.utils.text import * # pylint: disable-msg=W0401 |
58 |
from ganeti.utils.mlock import * # pylint: disable-msg=W0401 |
59 |
from ganeti.utils.log import * # pylint: disable-msg=W0401 |
60 |
from ganeti.utils.hash import * # pylint: disable-msg=W0401 |
61 |
from ganeti.utils.wrapper import * # pylint: disable-msg=W0401 |
62 |
from ganeti.utils.filelock import * # pylint: disable-msg=W0401 |
63 |
from ganeti.utils.io import * # pylint: disable-msg=W0401 |
64 |
from ganeti.utils.x509 import * # pylint: disable-msg=W0401 |
65 |
|
66 |
|
67 |
#: when set to True, L{RunCmd} is disabled
|
68 |
_no_fork = False
|
69 |
|
70 |
_RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
|
71 |
|
72 |
_VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
|
73 |
|
74 |
UUID_RE = re.compile('^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-'
|
75 |
'[a-f0-9]{4}-[a-f0-9]{12}$')
|
76 |
|
77 |
(_TIMEOUT_NONE, |
78 |
_TIMEOUT_TERM, |
79 |
_TIMEOUT_KILL) = range(3) |
80 |
|
81 |
#: Shell param checker regexp
|
82 |
_SHELLPARAM_REGEX = re.compile(r"^[-a-zA-Z0-9._+/:%@]+$")
|
83 |
|
84 |
|
85 |
def DisableFork(): |
86 |
"""Disables the use of fork(2).
|
87 |
|
88 |
"""
|
89 |
global _no_fork # pylint: disable-msg=W0603 |
90 |
|
91 |
_no_fork = True
|
92 |
|
93 |
|
94 |
class RunResult(object): |
95 |
"""Holds the result of running external programs.
|
96 |
|
97 |
@type exit_code: int
|
98 |
@ivar exit_code: the exit code of the program, or None (if the program
|
99 |
didn't exit())
|
100 |
@type signal: int or None
|
101 |
@ivar signal: the signal that caused the program to finish, or None
|
102 |
(if the program wasn't terminated by a signal)
|
103 |
@type stdout: str
|
104 |
@ivar stdout: the standard output of the program
|
105 |
@type stderr: str
|
106 |
@ivar stderr: the standard error of the program
|
107 |
@type failed: boolean
|
108 |
@ivar failed: True in case the program was
|
109 |
terminated by a signal or exited with a non-zero exit code
|
110 |
@ivar fail_reason: a string detailing the termination reason
|
111 |
|
112 |
"""
|
113 |
__slots__ = ["exit_code", "signal", "stdout", "stderr", |
114 |
"failed", "fail_reason", "cmd"] |
115 |
|
116 |
|
117 |
def __init__(self, exit_code, signal_, stdout, stderr, cmd, timeout_action, |
118 |
timeout): |
119 |
self.cmd = cmd
|
120 |
self.exit_code = exit_code
|
121 |
self.signal = signal_
|
122 |
self.stdout = stdout
|
123 |
self.stderr = stderr
|
124 |
self.failed = (signal_ is not None or exit_code != 0) |
125 |
|
126 |
fail_msgs = [] |
127 |
if self.signal is not None: |
128 |
fail_msgs.append("terminated by signal %s" % self.signal) |
129 |
elif self.exit_code is not None: |
130 |
fail_msgs.append("exited with exit code %s" % self.exit_code) |
131 |
else:
|
132 |
fail_msgs.append("unable to determine termination reason")
|
133 |
|
134 |
if timeout_action == _TIMEOUT_TERM:
|
135 |
fail_msgs.append("terminated after timeout of %.2f seconds" % timeout)
|
136 |
elif timeout_action == _TIMEOUT_KILL:
|
137 |
fail_msgs.append(("force termination after timeout of %.2f seconds"
|
138 |
" and linger for another %.2f seconds") %
|
139 |
(timeout, constants.CHILD_LINGER_TIMEOUT)) |
140 |
|
141 |
if fail_msgs and self.failed: |
142 |
self.fail_reason = CommaJoin(fail_msgs)
|
143 |
|
144 |
if self.failed: |
145 |
logging.debug("Command '%s' failed (%s); output: %s",
|
146 |
self.cmd, self.fail_reason, self.output) |
147 |
|
148 |
def _GetOutput(self): |
149 |
"""Returns the combined stdout and stderr for easier usage.
|
150 |
|
151 |
"""
|
152 |
return self.stdout + self.stderr |
153 |
|
154 |
output = property(_GetOutput, None, None, "Return full output") |
155 |
|
156 |
|
157 |
def _BuildCmdEnvironment(env, reset): |
158 |
"""Builds the environment for an external program.
|
159 |
|
160 |
"""
|
161 |
if reset:
|
162 |
cmd_env = {} |
163 |
else:
|
164 |
cmd_env = os.environ.copy() |
165 |
cmd_env["LC_ALL"] = "C" |
166 |
|
167 |
if env is not None: |
168 |
cmd_env.update(env) |
169 |
|
170 |
return cmd_env
|
171 |
|
172 |
|
173 |
def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False, |
174 |
interactive=False, timeout=None): |
175 |
"""Execute a (shell) command.
|
176 |
|
177 |
The command should not read from its standard input, as it will be
|
178 |
closed.
|
179 |
|
180 |
@type cmd: string or list
|
181 |
@param cmd: Command to run
|
182 |
@type env: dict
|
183 |
@param env: Additional environment variables
|
184 |
@type output: str
|
185 |
@param output: if desired, the output of the command can be
|
186 |
saved in a file instead of the RunResult instance; this
|
187 |
parameter denotes the file name (if not None)
|
188 |
@type cwd: string
|
189 |
@param cwd: if specified, will be used as the working
|
190 |
directory for the command; the default will be /
|
191 |
@type reset_env: boolean
|
192 |
@param reset_env: whether to reset or keep the default os environment
|
193 |
@type interactive: boolean
|
194 |
@param interactive: weather we pipe stdin, stdout and stderr
|
195 |
(default behaviour) or run the command interactive
|
196 |
@type timeout: int
|
197 |
@param timeout: If not None, timeout in seconds until child process gets
|
198 |
killed
|
199 |
@rtype: L{RunResult}
|
200 |
@return: RunResult instance
|
201 |
@raise errors.ProgrammerError: if we call this when forks are disabled
|
202 |
|
203 |
"""
|
204 |
if _no_fork:
|
205 |
raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled") |
206 |
|
207 |
if output and interactive: |
208 |
raise errors.ProgrammerError("Parameters 'output' and 'interactive' can" |
209 |
" not be provided at the same time")
|
210 |
|
211 |
if isinstance(cmd, basestring): |
212 |
strcmd = cmd |
213 |
shell = True
|
214 |
else:
|
215 |
cmd = [str(val) for val in cmd] |
216 |
strcmd = ShellQuoteArgs(cmd) |
217 |
shell = False
|
218 |
|
219 |
if output:
|
220 |
logging.debug("RunCmd %s, output file '%s'", strcmd, output)
|
221 |
else:
|
222 |
logging.debug("RunCmd %s", strcmd)
|
223 |
|
224 |
cmd_env = _BuildCmdEnvironment(env, reset_env) |
225 |
|
226 |
try:
|
227 |
if output is None: |
228 |
out, err, status, timeout_action = _RunCmdPipe(cmd, cmd_env, shell, cwd, |
229 |
interactive, timeout) |
230 |
else:
|
231 |
timeout_action = _TIMEOUT_NONE |
232 |
status = _RunCmdFile(cmd, cmd_env, shell, output, cwd) |
233 |
out = err = ""
|
234 |
except OSError, err: |
235 |
if err.errno == errno.ENOENT:
|
236 |
raise errors.OpExecError("Can't execute '%s': not found (%s)" % |
237 |
(strcmd, err)) |
238 |
else:
|
239 |
raise
|
240 |
|
241 |
if status >= 0: |
242 |
exitcode = status |
243 |
signal_ = None
|
244 |
else:
|
245 |
exitcode = None
|
246 |
signal_ = -status |
247 |
|
248 |
return RunResult(exitcode, signal_, out, err, strcmd, timeout_action, timeout)
|
249 |
|
250 |
|
251 |
def SetupDaemonEnv(cwd="/", umask=077): |
252 |
"""Setup a daemon's environment.
|
253 |
|
254 |
This should be called between the first and second fork, due to
|
255 |
setsid usage.
|
256 |
|
257 |
@param cwd: the directory to which to chdir
|
258 |
@param umask: the umask to setup
|
259 |
|
260 |
"""
|
261 |
os.chdir(cwd) |
262 |
os.umask(umask) |
263 |
os.setsid() |
264 |
|
265 |
|
266 |
def SetupDaemonFDs(output_file, output_fd): |
267 |
"""Setups up a daemon's file descriptors.
|
268 |
|
269 |
@param output_file: if not None, the file to which to redirect
|
270 |
stdout/stderr
|
271 |
@param output_fd: if not None, the file descriptor for stdout/stderr
|
272 |
|
273 |
"""
|
274 |
# check that at most one is defined
|
275 |
assert [output_file, output_fd].count(None) >= 1 |
276 |
|
277 |
# Open /dev/null (read-only, only for stdin)
|
278 |
devnull_fd = os.open(os.devnull, os.O_RDONLY) |
279 |
|
280 |
if output_fd is not None: |
281 |
pass
|
282 |
elif output_file is not None: |
283 |
# Open output file
|
284 |
try:
|
285 |
output_fd = os.open(output_file, |
286 |
os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600)
|
287 |
except EnvironmentError, err: |
288 |
raise Exception("Opening output file failed: %s" % err) |
289 |
else:
|
290 |
output_fd = os.open(os.devnull, os.O_WRONLY) |
291 |
|
292 |
# Redirect standard I/O
|
293 |
os.dup2(devnull_fd, 0)
|
294 |
os.dup2(output_fd, 1)
|
295 |
os.dup2(output_fd, 2)
|
296 |
|
297 |
|
298 |
def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None, |
299 |
pidfile=None):
|
300 |
"""Start a daemon process after forking twice.
|
301 |
|
302 |
@type cmd: string or list
|
303 |
@param cmd: Command to run
|
304 |
@type env: dict
|
305 |
@param env: Additional environment variables
|
306 |
@type cwd: string
|
307 |
@param cwd: Working directory for the program
|
308 |
@type output: string
|
309 |
@param output: Path to file in which to save the output
|
310 |
@type output_fd: int
|
311 |
@param output_fd: File descriptor for output
|
312 |
@type pidfile: string
|
313 |
@param pidfile: Process ID file
|
314 |
@rtype: int
|
315 |
@return: Daemon process ID
|
316 |
@raise errors.ProgrammerError: if we call this when forks are disabled
|
317 |
|
318 |
"""
|
319 |
if _no_fork:
|
320 |
raise errors.ProgrammerError("utils.StartDaemon() called with fork()" |
321 |
" disabled")
|
322 |
|
323 |
if output and not (bool(output) ^ (output_fd is not None)): |
324 |
raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be" |
325 |
" specified")
|
326 |
|
327 |
if isinstance(cmd, basestring): |
328 |
cmd = ["/bin/sh", "-c", cmd] |
329 |
|
330 |
strcmd = ShellQuoteArgs(cmd) |
331 |
|
332 |
if output:
|
333 |
logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
|
334 |
else:
|
335 |
logging.debug("StartDaemon %s", strcmd)
|
336 |
|
337 |
cmd_env = _BuildCmdEnvironment(env, False)
|
338 |
|
339 |
# Create pipe for sending PID back
|
340 |
(pidpipe_read, pidpipe_write) = os.pipe() |
341 |
try:
|
342 |
try:
|
343 |
# Create pipe for sending error messages
|
344 |
(errpipe_read, errpipe_write) = os.pipe() |
345 |
try:
|
346 |
try:
|
347 |
# First fork
|
348 |
pid = os.fork() |
349 |
if pid == 0: |
350 |
try:
|
351 |
# Child process, won't return
|
352 |
_StartDaemonChild(errpipe_read, errpipe_write, |
353 |
pidpipe_read, pidpipe_write, |
354 |
cmd, cmd_env, cwd, |
355 |
output, output_fd, pidfile) |
356 |
finally:
|
357 |
# Well, maybe child process failed
|
358 |
os._exit(1) # pylint: disable-msg=W0212 |
359 |
finally:
|
360 |
CloseFdNoError(errpipe_write) |
361 |
|
362 |
# Wait for daemon to be started (or an error message to
|
363 |
# arrive) and read up to 100 KB as an error message
|
364 |
errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024) |
365 |
finally:
|
366 |
CloseFdNoError(errpipe_read) |
367 |
finally:
|
368 |
CloseFdNoError(pidpipe_write) |
369 |
|
370 |
# Read up to 128 bytes for PID
|
371 |
pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
|
372 |
finally:
|
373 |
CloseFdNoError(pidpipe_read) |
374 |
|
375 |
# Try to avoid zombies by waiting for child process
|
376 |
try:
|
377 |
os.waitpid(pid, 0)
|
378 |
except OSError: |
379 |
pass
|
380 |
|
381 |
if errormsg:
|
382 |
raise errors.OpExecError("Error when starting daemon process: %r" % |
383 |
errormsg) |
384 |
|
385 |
try:
|
386 |
return int(pidtext) |
387 |
except (ValueError, TypeError), err: |
388 |
raise errors.OpExecError("Error while trying to parse PID %r: %s" % |
389 |
(pidtext, err)) |
390 |
|
391 |
|
392 |
def _StartDaemonChild(errpipe_read, errpipe_write, |
393 |
pidpipe_read, pidpipe_write, |
394 |
args, env, cwd, |
395 |
output, fd_output, pidfile): |
396 |
"""Child process for starting daemon.
|
397 |
|
398 |
"""
|
399 |
try:
|
400 |
# Close parent's side
|
401 |
CloseFdNoError(errpipe_read) |
402 |
CloseFdNoError(pidpipe_read) |
403 |
|
404 |
# First child process
|
405 |
SetupDaemonEnv() |
406 |
|
407 |
# And fork for the second time
|
408 |
pid = os.fork() |
409 |
if pid != 0: |
410 |
# Exit first child process
|
411 |
os._exit(0) # pylint: disable-msg=W0212 |
412 |
|
413 |
# Make sure pipe is closed on execv* (and thereby notifies
|
414 |
# original process)
|
415 |
SetCloseOnExecFlag(errpipe_write, True)
|
416 |
|
417 |
# List of file descriptors to be left open
|
418 |
noclose_fds = [errpipe_write] |
419 |
|
420 |
# Open PID file
|
421 |
if pidfile:
|
422 |
fd_pidfile = WritePidFile(pidfile) |
423 |
|
424 |
# Keeping the file open to hold the lock
|
425 |
noclose_fds.append(fd_pidfile) |
426 |
|
427 |
SetCloseOnExecFlag(fd_pidfile, False)
|
428 |
else:
|
429 |
fd_pidfile = None
|
430 |
|
431 |
SetupDaemonFDs(output, fd_output) |
432 |
|
433 |
# Send daemon PID to parent
|
434 |
RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
|
435 |
|
436 |
# Close all file descriptors except stdio and error message pipe
|
437 |
CloseFDs(noclose_fds=noclose_fds) |
438 |
|
439 |
# Change working directory
|
440 |
os.chdir(cwd) |
441 |
|
442 |
if env is None: |
443 |
os.execvp(args[0], args)
|
444 |
else:
|
445 |
os.execvpe(args[0], args, env)
|
446 |
except: # pylint: disable-msg=W0702 |
447 |
try:
|
448 |
# Report errors to original process
|
449 |
WriteErrorToFD(errpipe_write, str(sys.exc_info()[1])) |
450 |
except: # pylint: disable-msg=W0702 |
451 |
# Ignore errors in error handling
|
452 |
pass
|
453 |
|
454 |
os._exit(1) # pylint: disable-msg=W0212 |
455 |
|
456 |
|
457 |
def WriteErrorToFD(fd, err): |
458 |
"""Possibly write an error message to a fd.
|
459 |
|
460 |
@type fd: None or int (file descriptor)
|
461 |
@param fd: if not None, the error will be written to this fd
|
462 |
@param err: string, the error message
|
463 |
|
464 |
"""
|
465 |
if fd is None: |
466 |
return
|
467 |
|
468 |
if not err: |
469 |
err = "<unknown error>"
|
470 |
|
471 |
RetryOnSignal(os.write, fd, err) |
472 |
|
473 |
|
474 |
def _CheckIfAlive(child): |
475 |
"""Raises L{RetryAgain} if child is still alive.
|
476 |
|
477 |
@raises RetryAgain: If child is still alive
|
478 |
|
479 |
"""
|
480 |
if child.poll() is None: |
481 |
raise RetryAgain()
|
482 |
|
483 |
|
484 |
def _WaitForProcess(child, timeout): |
485 |
"""Waits for the child to terminate or until we reach timeout.
|
486 |
|
487 |
"""
|
488 |
try:
|
489 |
Retry(_CheckIfAlive, (1.0, 1.2, 5.0), max(0, timeout), args=[child]) |
490 |
except RetryTimeout:
|
491 |
pass
|
492 |
|
493 |
|
494 |
def _RunCmdPipe(cmd, env, via_shell, cwd, interactive, timeout, |
495 |
_linger_timeout=constants.CHILD_LINGER_TIMEOUT): |
496 |
"""Run a command and return its output.
|
497 |
|
498 |
@type cmd: string or list
|
499 |
@param cmd: Command to run
|
500 |
@type env: dict
|
501 |
@param env: The environment to use
|
502 |
@type via_shell: bool
|
503 |
@param via_shell: if we should run via the shell
|
504 |
@type cwd: string
|
505 |
@param cwd: the working directory for the program
|
506 |
@type interactive: boolean
|
507 |
@param interactive: Run command interactive (without piping)
|
508 |
@type timeout: int
|
509 |
@param timeout: Timeout after the programm gets terminated
|
510 |
@rtype: tuple
|
511 |
@return: (out, err, status)
|
512 |
|
513 |
"""
|
514 |
poller = select.poll() |
515 |
|
516 |
stderr = subprocess.PIPE |
517 |
stdout = subprocess.PIPE |
518 |
stdin = subprocess.PIPE |
519 |
|
520 |
if interactive:
|
521 |
stderr = stdout = stdin = None
|
522 |
|
523 |
child = subprocess.Popen(cmd, shell=via_shell, |
524 |
stderr=stderr, |
525 |
stdout=stdout, |
526 |
stdin=stdin, |
527 |
close_fds=True, env=env,
|
528 |
cwd=cwd) |
529 |
|
530 |
out = StringIO() |
531 |
err = StringIO() |
532 |
|
533 |
linger_timeout = None
|
534 |
|
535 |
if timeout is None: |
536 |
poll_timeout = None
|
537 |
else:
|
538 |
poll_timeout = RunningTimeout(timeout, True).Remaining
|
539 |
|
540 |
msg_timeout = ("Command %s (%d) run into execution timeout, terminating" %
|
541 |
(cmd, child.pid)) |
542 |
msg_linger = ("Command %s (%d) run into linger timeout, killing" %
|
543 |
(cmd, child.pid)) |
544 |
|
545 |
timeout_action = _TIMEOUT_NONE |
546 |
|
547 |
if not interactive: |
548 |
child.stdin.close() |
549 |
poller.register(child.stdout, select.POLLIN) |
550 |
poller.register(child.stderr, select.POLLIN) |
551 |
fdmap = { |
552 |
child.stdout.fileno(): (out, child.stdout), |
553 |
child.stderr.fileno(): (err, child.stderr), |
554 |
} |
555 |
for fd in fdmap: |
556 |
SetNonblockFlag(fd, True)
|
557 |
|
558 |
while fdmap:
|
559 |
if poll_timeout:
|
560 |
pt = poll_timeout() * 1000
|
561 |
if pt < 0: |
562 |
if linger_timeout is None: |
563 |
logging.warning(msg_timeout) |
564 |
if child.poll() is None: |
565 |
timeout_action = _TIMEOUT_TERM |
566 |
IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM) |
567 |
linger_timeout = RunningTimeout(_linger_timeout, True).Remaining
|
568 |
pt = linger_timeout() * 1000
|
569 |
if pt < 0: |
570 |
break
|
571 |
else:
|
572 |
pt = None
|
573 |
|
574 |
pollresult = RetryOnSignal(poller.poll, pt) |
575 |
|
576 |
for fd, event in pollresult: |
577 |
if event & select.POLLIN or event & select.POLLPRI: |
578 |
data = fdmap[fd][1].read()
|
579 |
# no data from read signifies EOF (the same as POLLHUP)
|
580 |
if not data: |
581 |
poller.unregister(fd) |
582 |
del fdmap[fd]
|
583 |
continue
|
584 |
fdmap[fd][0].write(data)
|
585 |
if (event & select.POLLNVAL or event & select.POLLHUP or |
586 |
event & select.POLLERR): |
587 |
poller.unregister(fd) |
588 |
del fdmap[fd]
|
589 |
|
590 |
if timeout is not None: |
591 |
assert callable(poll_timeout) |
592 |
|
593 |
# We have no I/O left but it might still run
|
594 |
if child.poll() is None: |
595 |
_WaitForProcess(child, poll_timeout()) |
596 |
|
597 |
# Terminate if still alive after timeout
|
598 |
if child.poll() is None: |
599 |
if linger_timeout is None: |
600 |
logging.warning(msg_timeout) |
601 |
timeout_action = _TIMEOUT_TERM |
602 |
IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM) |
603 |
lt = _linger_timeout |
604 |
else:
|
605 |
lt = linger_timeout() |
606 |
_WaitForProcess(child, lt) |
607 |
|
608 |
# Okay, still alive after timeout and linger timeout? Kill it!
|
609 |
if child.poll() is None: |
610 |
timeout_action = _TIMEOUT_KILL |
611 |
logging.warning(msg_linger) |
612 |
IgnoreProcessNotFound(os.kill, child.pid, signal.SIGKILL) |
613 |
|
614 |
out = out.getvalue() |
615 |
err = err.getvalue() |
616 |
|
617 |
status = child.wait() |
618 |
return out, err, status, timeout_action
|
619 |
|
620 |
|
621 |
def _RunCmdFile(cmd, env, via_shell, output, cwd): |
622 |
"""Run a command and save its output to a file.
|
623 |
|
624 |
@type cmd: string or list
|
625 |
@param cmd: Command to run
|
626 |
@type env: dict
|
627 |
@param env: The environment to use
|
628 |
@type via_shell: bool
|
629 |
@param via_shell: if we should run via the shell
|
630 |
@type output: str
|
631 |
@param output: the filename in which to save the output
|
632 |
@type cwd: string
|
633 |
@param cwd: the working directory for the program
|
634 |
@rtype: int
|
635 |
@return: the exit status
|
636 |
|
637 |
"""
|
638 |
fh = open(output, "a") |
639 |
try:
|
640 |
child = subprocess.Popen(cmd, shell=via_shell, |
641 |
stderr=subprocess.STDOUT, |
642 |
stdout=fh, |
643 |
stdin=subprocess.PIPE, |
644 |
close_fds=True, env=env,
|
645 |
cwd=cwd) |
646 |
|
647 |
child.stdin.close() |
648 |
status = child.wait() |
649 |
finally:
|
650 |
fh.close() |
651 |
return status
|
652 |
|
653 |
|
654 |
def RunParts(dir_name, env=None, reset_env=False): |
655 |
"""Run Scripts or programs in a directory
|
656 |
|
657 |
@type dir_name: string
|
658 |
@param dir_name: absolute path to a directory
|
659 |
@type env: dict
|
660 |
@param env: The environment to use
|
661 |
@type reset_env: boolean
|
662 |
@param reset_env: whether to reset or keep the default os environment
|
663 |
@rtype: list of tuples
|
664 |
@return: list of (name, (one of RUNDIR_STATUS), RunResult)
|
665 |
|
666 |
"""
|
667 |
rr = [] |
668 |
|
669 |
try:
|
670 |
dir_contents = ListVisibleFiles(dir_name) |
671 |
except OSError, err: |
672 |
logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
|
673 |
return rr
|
674 |
|
675 |
for relname in sorted(dir_contents): |
676 |
fname = PathJoin(dir_name, relname) |
677 |
if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and |
678 |
constants.EXT_PLUGIN_MASK.match(relname) is not None): |
679 |
rr.append((relname, constants.RUNPARTS_SKIP, None))
|
680 |
else:
|
681 |
try:
|
682 |
result = RunCmd([fname], env=env, reset_env=reset_env) |
683 |
except Exception, err: # pylint: disable-msg=W0703 |
684 |
rr.append((relname, constants.RUNPARTS_ERR, str(err)))
|
685 |
else:
|
686 |
rr.append((relname, constants.RUNPARTS_RUN, result)) |
687 |
|
688 |
return rr
|
689 |
|
690 |
|
691 |
def ResetTempfileModule(): |
692 |
"""Resets the random name generator of the tempfile module.
|
693 |
|
694 |
This function should be called after C{os.fork} in the child process to
|
695 |
ensure it creates a newly seeded random generator. Otherwise it would
|
696 |
generate the same random parts as the parent process. If several processes
|
697 |
race for the creation of a temporary file, this could lead to one not getting
|
698 |
a temporary name.
|
699 |
|
700 |
"""
|
701 |
# pylint: disable-msg=W0212
|
702 |
if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"): |
703 |
tempfile._once_lock.acquire() |
704 |
try:
|
705 |
# Reset random name generator
|
706 |
tempfile._name_sequence = None
|
707 |
finally:
|
708 |
tempfile._once_lock.release() |
709 |
else:
|
710 |
logging.critical("The tempfile module misses at least one of the"
|
711 |
" '_once_lock' and '_name_sequence' attributes")
|
712 |
|
713 |
|
714 |
def ForceDictType(target, key_types, allowed_values=None): |
715 |
"""Force the values of a dict to have certain types.
|
716 |
|
717 |
@type target: dict
|
718 |
@param target: the dict to update
|
719 |
@type key_types: dict
|
720 |
@param key_types: dict mapping target dict keys to types
|
721 |
in constants.ENFORCEABLE_TYPES
|
722 |
@type allowed_values: list
|
723 |
@keyword allowed_values: list of specially allowed values
|
724 |
|
725 |
"""
|
726 |
if allowed_values is None: |
727 |
allowed_values = [] |
728 |
|
729 |
if not isinstance(target, dict): |
730 |
msg = "Expected dictionary, got '%s'" % target
|
731 |
raise errors.TypeEnforcementError(msg)
|
732 |
|
733 |
for key in target: |
734 |
if key not in key_types: |
735 |
msg = "Unknown key '%s'" % key
|
736 |
raise errors.TypeEnforcementError(msg)
|
737 |
|
738 |
if target[key] in allowed_values: |
739 |
continue
|
740 |
|
741 |
ktype = key_types[key] |
742 |
if ktype not in constants.ENFORCEABLE_TYPES: |
743 |
msg = "'%s' has non-enforceable type %s" % (key, ktype)
|
744 |
raise errors.ProgrammerError(msg)
|
745 |
|
746 |
if ktype in (constants.VTYPE_STRING, constants.VTYPE_MAYBE_STRING): |
747 |
if target[key] is None and ktype == constants.VTYPE_MAYBE_STRING: |
748 |
pass
|
749 |
elif not isinstance(target[key], basestring): |
750 |
if isinstance(target[key], bool) and not target[key]: |
751 |
target[key] = ''
|
752 |
else:
|
753 |
msg = "'%s' (value %s) is not a valid string" % (key, target[key])
|
754 |
raise errors.TypeEnforcementError(msg)
|
755 |
elif ktype == constants.VTYPE_BOOL:
|
756 |
if isinstance(target[key], basestring) and target[key]: |
757 |
if target[key].lower() == constants.VALUE_FALSE:
|
758 |
target[key] = False
|
759 |
elif target[key].lower() == constants.VALUE_TRUE:
|
760 |
target[key] = True
|
761 |
else:
|
762 |
msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
|
763 |
raise errors.TypeEnforcementError(msg)
|
764 |
elif target[key]:
|
765 |
target[key] = True
|
766 |
else:
|
767 |
target[key] = False
|
768 |
elif ktype == constants.VTYPE_SIZE:
|
769 |
try:
|
770 |
target[key] = ParseUnit(target[key]) |
771 |
except errors.UnitParseError, err:
|
772 |
msg = "'%s' (value %s) is not a valid size. error: %s" % \
|
773 |
(key, target[key], err) |
774 |
raise errors.TypeEnforcementError(msg)
|
775 |
elif ktype == constants.VTYPE_INT:
|
776 |
try:
|
777 |
target[key] = int(target[key])
|
778 |
except (ValueError, TypeError): |
779 |
msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
|
780 |
raise errors.TypeEnforcementError(msg)
|
781 |
|
782 |
|
783 |
def _GetProcStatusPath(pid): |
784 |
"""Returns the path for a PID's proc status file.
|
785 |
|
786 |
@type pid: int
|
787 |
@param pid: Process ID
|
788 |
@rtype: string
|
789 |
|
790 |
"""
|
791 |
return "/proc/%d/status" % pid |
792 |
|
793 |
|
794 |
def IsProcessAlive(pid): |
795 |
"""Check if a given pid exists on the system.
|
796 |
|
797 |
@note: zombie status is not handled, so zombie processes
|
798 |
will be returned as alive
|
799 |
@type pid: int
|
800 |
@param pid: the process ID to check
|
801 |
@rtype: boolean
|
802 |
@return: True if the process exists
|
803 |
|
804 |
"""
|
805 |
def _TryStat(name): |
806 |
try:
|
807 |
os.stat(name) |
808 |
return True |
809 |
except EnvironmentError, err: |
810 |
if err.errno in (errno.ENOENT, errno.ENOTDIR): |
811 |
return False |
812 |
elif err.errno == errno.EINVAL:
|
813 |
raise RetryAgain(err)
|
814 |
raise
|
815 |
|
816 |
assert isinstance(pid, int), "pid must be an integer" |
817 |
if pid <= 0: |
818 |
return False |
819 |
|
820 |
# /proc in a multiprocessor environment can have strange behaviors.
|
821 |
# Retry the os.stat a few times until we get a good result.
|
822 |
try:
|
823 |
return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5, |
824 |
args=[_GetProcStatusPath(pid)]) |
825 |
except RetryTimeout, err:
|
826 |
err.RaiseInner() |
827 |
|
828 |
|
829 |
def _ParseSigsetT(sigset): |
830 |
"""Parse a rendered sigset_t value.
|
831 |
|
832 |
This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
|
833 |
function.
|
834 |
|
835 |
@type sigset: string
|
836 |
@param sigset: Rendered signal set from /proc/$pid/status
|
837 |
@rtype: set
|
838 |
@return: Set of all enabled signal numbers
|
839 |
|
840 |
"""
|
841 |
result = set()
|
842 |
|
843 |
signum = 0
|
844 |
for ch in reversed(sigset): |
845 |
chv = int(ch, 16) |
846 |
|
847 |
# The following could be done in a loop, but it's easier to read and
|
848 |
# understand in the unrolled form
|
849 |
if chv & 1: |
850 |
result.add(signum + 1)
|
851 |
if chv & 2: |
852 |
result.add(signum + 2)
|
853 |
if chv & 4: |
854 |
result.add(signum + 3)
|
855 |
if chv & 8: |
856 |
result.add(signum + 4)
|
857 |
|
858 |
signum += 4
|
859 |
|
860 |
return result
|
861 |
|
862 |
|
863 |
def _GetProcStatusField(pstatus, field): |
864 |
"""Retrieves a field from the contents of a proc status file.
|
865 |
|
866 |
@type pstatus: string
|
867 |
@param pstatus: Contents of /proc/$pid/status
|
868 |
@type field: string
|
869 |
@param field: Name of field whose value should be returned
|
870 |
@rtype: string
|
871 |
|
872 |
"""
|
873 |
for line in pstatus.splitlines(): |
874 |
parts = line.split(":", 1) |
875 |
|
876 |
if len(parts) < 2 or parts[0] != field: |
877 |
continue
|
878 |
|
879 |
return parts[1].strip() |
880 |
|
881 |
return None |
882 |
|
883 |
|
884 |
def IsProcessHandlingSignal(pid, signum, status_path=None): |
885 |
"""Checks whether a process is handling a signal.
|
886 |
|
887 |
@type pid: int
|
888 |
@param pid: Process ID
|
889 |
@type signum: int
|
890 |
@param signum: Signal number
|
891 |
@rtype: bool
|
892 |
|
893 |
"""
|
894 |
if status_path is None: |
895 |
status_path = _GetProcStatusPath(pid) |
896 |
|
897 |
try:
|
898 |
proc_status = ReadFile(status_path) |
899 |
except EnvironmentError, err: |
900 |
# In at least one case, reading /proc/$pid/status failed with ESRCH.
|
901 |
if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH): |
902 |
return False |
903 |
raise
|
904 |
|
905 |
sigcgt = _GetProcStatusField(proc_status, "SigCgt")
|
906 |
if sigcgt is None: |
907 |
raise RuntimeError("%s is missing 'SigCgt' field" % status_path) |
908 |
|
909 |
# Now check whether signal is handled
|
910 |
return signum in _ParseSigsetT(sigcgt) |
911 |
|
912 |
|
913 |
def ValidateServiceName(name): |
914 |
"""Validate the given service name.
|
915 |
|
916 |
@type name: number or string
|
917 |
@param name: Service name or port specification
|
918 |
|
919 |
"""
|
920 |
try:
|
921 |
numport = int(name)
|
922 |
except (ValueError, TypeError): |
923 |
# Non-numeric service name
|
924 |
valid = _VALID_SERVICE_NAME_RE.match(name) |
925 |
else:
|
926 |
# Numeric port (protocols other than TCP or UDP might need adjustments
|
927 |
# here)
|
928 |
valid = (numport >= 0 and numport < (1 << 16)) |
929 |
|
930 |
if not valid: |
931 |
raise errors.OpPrereqError("Invalid service name '%s'" % name, |
932 |
errors.ECODE_INVAL) |
933 |
|
934 |
return name
|
935 |
|
936 |
|
937 |
def ListVolumeGroups(): |
938 |
"""List volume groups and their size
|
939 |
|
940 |
@rtype: dict
|
941 |
@return:
|
942 |
Dictionary with keys volume name and values
|
943 |
the size of the volume
|
944 |
|
945 |
"""
|
946 |
command = "vgs --noheadings --units m --nosuffix -o name,size"
|
947 |
result = RunCmd(command) |
948 |
retval = {} |
949 |
if result.failed:
|
950 |
return retval
|
951 |
|
952 |
for line in result.stdout.splitlines(): |
953 |
try:
|
954 |
name, size = line.split() |
955 |
size = int(float(size)) |
956 |
except (IndexError, ValueError), err: |
957 |
logging.error("Invalid output from vgs (%s): %s", err, line)
|
958 |
continue
|
959 |
|
960 |
retval[name] = size |
961 |
|
962 |
return retval
|
963 |
|
964 |
|
965 |
def BridgeExists(bridge): |
966 |
"""Check whether the given bridge exists in the system
|
967 |
|
968 |
@type bridge: str
|
969 |
@param bridge: the bridge name to check
|
970 |
@rtype: boolean
|
971 |
@return: True if it does
|
972 |
|
973 |
"""
|
974 |
return os.path.isdir("/sys/class/net/%s/bridge" % bridge) |
975 |
|
976 |
|
977 |
def TryConvert(fn, val): |
978 |
"""Try to convert a value ignoring errors.
|
979 |
|
980 |
This function tries to apply function I{fn} to I{val}. If no
|
981 |
C{ValueError} or C{TypeError} exceptions are raised, it will return
|
982 |
the result, else it will return the original value. Any other
|
983 |
exceptions are propagated to the caller.
|
984 |
|
985 |
@type fn: callable
|
986 |
@param fn: function to apply to the value
|
987 |
@param val: the value to be converted
|
988 |
@return: The converted value if the conversion was successful,
|
989 |
otherwise the original value.
|
990 |
|
991 |
"""
|
992 |
try:
|
993 |
nv = fn(val) |
994 |
except (ValueError, TypeError): |
995 |
nv = val |
996 |
return nv
|
997 |
|
998 |
|
999 |
def IsValidShellParam(word): |
1000 |
"""Verifies is the given word is safe from the shell's p.o.v.
|
1001 |
|
1002 |
This means that we can pass this to a command via the shell and be
|
1003 |
sure that it doesn't alter the command line and is passed as such to
|
1004 |
the actual command.
|
1005 |
|
1006 |
Note that we are overly restrictive here, in order to be on the safe
|
1007 |
side.
|
1008 |
|
1009 |
@type word: str
|
1010 |
@param word: the word to check
|
1011 |
@rtype: boolean
|
1012 |
@return: True if the word is 'safe'
|
1013 |
|
1014 |
"""
|
1015 |
return bool(_SHELLPARAM_REGEX.match(word)) |
1016 |
|
1017 |
|
1018 |
def BuildShellCmd(template, *args): |
1019 |
"""Build a safe shell command line from the given arguments.
|
1020 |
|
1021 |
This function will check all arguments in the args list so that they
|
1022 |
are valid shell parameters (i.e. they don't contain shell
|
1023 |
metacharacters). If everything is ok, it will return the result of
|
1024 |
template % args.
|
1025 |
|
1026 |
@type template: str
|
1027 |
@param template: the string holding the template for the
|
1028 |
string formatting
|
1029 |
@rtype: str
|
1030 |
@return: the expanded command line
|
1031 |
|
1032 |
"""
|
1033 |
for word in args: |
1034 |
if not IsValidShellParam(word): |
1035 |
raise errors.ProgrammerError("Shell argument '%s' contains" |
1036 |
" invalid characters" % word)
|
1037 |
return template % args
|
1038 |
|
1039 |
|
1040 |
def ParseCpuMask(cpu_mask): |
1041 |
"""Parse a CPU mask definition and return the list of CPU IDs.
|
1042 |
|
1043 |
CPU mask format: comma-separated list of CPU IDs
|
1044 |
or dash-separated ID ranges
|
1045 |
Example: "0-2,5" -> "0,1,2,5"
|
1046 |
|
1047 |
@type cpu_mask: str
|
1048 |
@param cpu_mask: CPU mask definition
|
1049 |
@rtype: list of int
|
1050 |
@return: list of CPU IDs
|
1051 |
|
1052 |
"""
|
1053 |
if not cpu_mask: |
1054 |
return []
|
1055 |
cpu_list = [] |
1056 |
for range_def in cpu_mask.split(","): |
1057 |
boundaries = range_def.split("-")
|
1058 |
n_elements = len(boundaries)
|
1059 |
if n_elements > 2: |
1060 |
raise errors.ParseError("Invalid CPU ID range definition" |
1061 |
" (only one hyphen allowed): %s" % range_def)
|
1062 |
try:
|
1063 |
lower = int(boundaries[0]) |
1064 |
except (ValueError, TypeError), err: |
1065 |
raise errors.ParseError("Invalid CPU ID value for lower boundary of" |
1066 |
" CPU ID range: %s" % str(err)) |
1067 |
try:
|
1068 |
higher = int(boundaries[-1]) |
1069 |
except (ValueError, TypeError), err: |
1070 |
raise errors.ParseError("Invalid CPU ID value for higher boundary of" |
1071 |
" CPU ID range: %s" % str(err)) |
1072 |
if lower > higher:
|
1073 |
raise errors.ParseError("Invalid CPU ID range definition" |
1074 |
" (%d > %d): %s" % (lower, higher, range_def))
|
1075 |
cpu_list.extend(range(lower, higher + 1)) |
1076 |
return cpu_list
|
1077 |
|
1078 |
|
1079 |
def SetEtcHostsEntry(file_name, ip, hostname, aliases): |
1080 |
"""Sets the name of an IP address and hostname in /etc/hosts.
|
1081 |
|
1082 |
@type file_name: str
|
1083 |
@param file_name: path to the file to modify (usually C{/etc/hosts})
|
1084 |
@type ip: str
|
1085 |
@param ip: the IP address
|
1086 |
@type hostname: str
|
1087 |
@param hostname: the hostname to be added
|
1088 |
@type aliases: list
|
1089 |
@param aliases: the list of aliases to add for the hostname
|
1090 |
|
1091 |
"""
|
1092 |
# Ensure aliases are unique
|
1093 |
aliases = UniqueSequence([hostname] + aliases)[1:]
|
1094 |
|
1095 |
def _WriteEtcHosts(fd): |
1096 |
# Duplicating file descriptor because os.fdopen's result will automatically
|
1097 |
# close the descriptor, but we would still like to have its functionality.
|
1098 |
out = os.fdopen(os.dup(fd), "w")
|
1099 |
try:
|
1100 |
for line in ReadFile(file_name).splitlines(True): |
1101 |
fields = line.split() |
1102 |
if fields and not fields[0].startswith("#") and ip == fields[0]: |
1103 |
continue
|
1104 |
out.write(line) |
1105 |
|
1106 |
out.write("%s\t%s" % (ip, hostname))
|
1107 |
if aliases:
|
1108 |
out.write(" %s" % " ".join(aliases)) |
1109 |
out.write("\n")
|
1110 |
out.flush() |
1111 |
finally:
|
1112 |
out.close() |
1113 |
|
1114 |
WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
|
1115 |
|
1116 |
|
1117 |
def AddHostToEtcHosts(hostname, ip): |
1118 |
"""Wrapper around SetEtcHostsEntry.
|
1119 |
|
1120 |
@type hostname: str
|
1121 |
@param hostname: a hostname that will be resolved and added to
|
1122 |
L{constants.ETC_HOSTS}
|
1123 |
@type ip: str
|
1124 |
@param ip: The ip address of the host
|
1125 |
|
1126 |
"""
|
1127 |
SetEtcHostsEntry(constants.ETC_HOSTS, ip, hostname, [hostname.split(".")[0]]) |
1128 |
|
1129 |
|
1130 |
def RemoveEtcHostsEntry(file_name, hostname): |
1131 |
"""Removes a hostname from /etc/hosts.
|
1132 |
|
1133 |
IP addresses without names are removed from the file.
|
1134 |
|
1135 |
@type file_name: str
|
1136 |
@param file_name: path to the file to modify (usually C{/etc/hosts})
|
1137 |
@type hostname: str
|
1138 |
@param hostname: the hostname to be removed
|
1139 |
|
1140 |
"""
|
1141 |
def _WriteEtcHosts(fd): |
1142 |
# Duplicating file descriptor because os.fdopen's result will automatically
|
1143 |
# close the descriptor, but we would still like to have its functionality.
|
1144 |
out = os.fdopen(os.dup(fd), "w")
|
1145 |
try:
|
1146 |
for line in ReadFile(file_name).splitlines(True): |
1147 |
fields = line.split() |
1148 |
if len(fields) > 1 and not fields[0].startswith("#"): |
1149 |
names = fields[1:]
|
1150 |
if hostname in names: |
1151 |
while hostname in names: |
1152 |
names.remove(hostname) |
1153 |
if names:
|
1154 |
out.write("%s %s\n" % (fields[0], " ".join(names))) |
1155 |
continue
|
1156 |
|
1157 |
out.write(line) |
1158 |
|
1159 |
out.flush() |
1160 |
finally:
|
1161 |
out.close() |
1162 |
|
1163 |
WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
|
1164 |
|
1165 |
|
1166 |
def RemoveHostFromEtcHosts(hostname): |
1167 |
"""Wrapper around RemoveEtcHostsEntry.
|
1168 |
|
1169 |
@type hostname: str
|
1170 |
@param hostname: hostname that will be resolved and its
|
1171 |
full and shot name will be removed from
|
1172 |
L{constants.ETC_HOSTS}
|
1173 |
|
1174 |
"""
|
1175 |
RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname) |
1176 |
RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname.split(".")[0]) |
1177 |
|
1178 |
|
1179 |
def GetHomeDir(user, default=None): |
1180 |
"""Try to get the homedir of the given user.
|
1181 |
|
1182 |
The user can be passed either as a string (denoting the name) or as
|
1183 |
an integer (denoting the user id). If the user is not found, the
|
1184 |
'default' argument is returned, which defaults to None.
|
1185 |
|
1186 |
"""
|
1187 |
try:
|
1188 |
if isinstance(user, basestring): |
1189 |
result = pwd.getpwnam(user) |
1190 |
elif isinstance(user, (int, long)): |
1191 |
result = pwd.getpwuid(user) |
1192 |
else:
|
1193 |
raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" % |
1194 |
type(user))
|
1195 |
except KeyError: |
1196 |
return default
|
1197 |
return result.pw_dir
|
1198 |
|
1199 |
|
1200 |
def NewUUID(): |
1201 |
"""Returns a random UUID.
|
1202 |
|
1203 |
@note: This is a Linux-specific method as it uses the /proc
|
1204 |
filesystem.
|
1205 |
@rtype: str
|
1206 |
|
1207 |
"""
|
1208 |
return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n") |
1209 |
|
1210 |
|
1211 |
def FirstFree(seq, base=0): |
1212 |
"""Returns the first non-existing integer from seq.
|
1213 |
|
1214 |
The seq argument should be a sorted list of positive integers. The
|
1215 |
first time the index of an element is smaller than the element
|
1216 |
value, the index will be returned.
|
1217 |
|
1218 |
The base argument is used to start at a different offset,
|
1219 |
i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
|
1220 |
|
1221 |
Example: C{[0, 1, 3]} will return I{2}.
|
1222 |
|
1223 |
@type seq: sequence
|
1224 |
@param seq: the sequence to be analyzed.
|
1225 |
@type base: int
|
1226 |
@param base: use this value as the base index of the sequence
|
1227 |
@rtype: int
|
1228 |
@return: the first non-used index in the sequence
|
1229 |
|
1230 |
"""
|
1231 |
for idx, elem in enumerate(seq): |
1232 |
assert elem >= base, "Passed element is higher than base offset" |
1233 |
if elem > idx + base:
|
1234 |
# idx is not used
|
1235 |
return idx + base
|
1236 |
return None |
1237 |
|
1238 |
|
1239 |
def SingleWaitForFdCondition(fdobj, event, timeout): |
1240 |
"""Waits for a condition to occur on the socket.
|
1241 |
|
1242 |
Immediately returns at the first interruption.
|
1243 |
|
1244 |
@type fdobj: integer or object supporting a fileno() method
|
1245 |
@param fdobj: entity to wait for events on
|
1246 |
@type event: integer
|
1247 |
@param event: ORed condition (see select module)
|
1248 |
@type timeout: float or None
|
1249 |
@param timeout: Timeout in seconds
|
1250 |
@rtype: int or None
|
1251 |
@return: None for timeout, otherwise occured conditions
|
1252 |
|
1253 |
"""
|
1254 |
check = (event | select.POLLPRI | |
1255 |
select.POLLNVAL | select.POLLHUP | select.POLLERR) |
1256 |
|
1257 |
if timeout is not None: |
1258 |
# Poller object expects milliseconds
|
1259 |
timeout *= 1000
|
1260 |
|
1261 |
poller = select.poll() |
1262 |
poller.register(fdobj, event) |
1263 |
try:
|
1264 |
# TODO: If the main thread receives a signal and we have no timeout, we
|
1265 |
# could wait forever. This should check a global "quit" flag or something
|
1266 |
# every so often.
|
1267 |
io_events = poller.poll(timeout) |
1268 |
except select.error, err:
|
1269 |
if err[0] != errno.EINTR: |
1270 |
raise
|
1271 |
io_events = [] |
1272 |
if io_events and io_events[0][1] & check: |
1273 |
return io_events[0][1] |
1274 |
else:
|
1275 |
return None |
1276 |
|
1277 |
|
1278 |
class FdConditionWaiterHelper(object): |
1279 |
"""Retry helper for WaitForFdCondition.
|
1280 |
|
1281 |
This class contains the retried and wait functions that make sure
|
1282 |
WaitForFdCondition can continue waiting until the timeout is actually
|
1283 |
expired.
|
1284 |
|
1285 |
"""
|
1286 |
|
1287 |
def __init__(self, timeout): |
1288 |
self.timeout = timeout
|
1289 |
|
1290 |
def Poll(self, fdobj, event): |
1291 |
result = SingleWaitForFdCondition(fdobj, event, self.timeout)
|
1292 |
if result is None: |
1293 |
raise RetryAgain()
|
1294 |
else:
|
1295 |
return result
|
1296 |
|
1297 |
def UpdateTimeout(self, timeout): |
1298 |
self.timeout = timeout
|
1299 |
|
1300 |
|
1301 |
def WaitForFdCondition(fdobj, event, timeout): |
1302 |
"""Waits for a condition to occur on the socket.
|
1303 |
|
1304 |
Retries until the timeout is expired, even if interrupted.
|
1305 |
|
1306 |
@type fdobj: integer or object supporting a fileno() method
|
1307 |
@param fdobj: entity to wait for events on
|
1308 |
@type event: integer
|
1309 |
@param event: ORed condition (see select module)
|
1310 |
@type timeout: float or None
|
1311 |
@param timeout: Timeout in seconds
|
1312 |
@rtype: int or None
|
1313 |
@return: None for timeout, otherwise occured conditions
|
1314 |
|
1315 |
"""
|
1316 |
if timeout is not None: |
1317 |
retrywaiter = FdConditionWaiterHelper(timeout) |
1318 |
try:
|
1319 |
result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout, |
1320 |
args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout) |
1321 |
except RetryTimeout:
|
1322 |
result = None
|
1323 |
else:
|
1324 |
result = None
|
1325 |
while result is None: |
1326 |
result = SingleWaitForFdCondition(fdobj, event, timeout) |
1327 |
return result
|
1328 |
|
1329 |
|
1330 |
def CloseFDs(noclose_fds=None): |
1331 |
"""Close file descriptors.
|
1332 |
|
1333 |
This closes all file descriptors above 2 (i.e. except
|
1334 |
stdin/out/err).
|
1335 |
|
1336 |
@type noclose_fds: list or None
|
1337 |
@param noclose_fds: if given, it denotes a list of file descriptor
|
1338 |
that should not be closed
|
1339 |
|
1340 |
"""
|
1341 |
# Default maximum for the number of available file descriptors.
|
1342 |
if 'SC_OPEN_MAX' in os.sysconf_names: |
1343 |
try:
|
1344 |
MAXFD = os.sysconf('SC_OPEN_MAX')
|
1345 |
if MAXFD < 0: |
1346 |
MAXFD = 1024
|
1347 |
except OSError: |
1348 |
MAXFD = 1024
|
1349 |
else:
|
1350 |
MAXFD = 1024
|
1351 |
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
|
1352 |
if (maxfd == resource.RLIM_INFINITY):
|
1353 |
maxfd = MAXFD |
1354 |
|
1355 |
# Iterate through and close all file descriptors (except the standard ones)
|
1356 |
for fd in range(3, maxfd): |
1357 |
if noclose_fds and fd in noclose_fds: |
1358 |
continue
|
1359 |
CloseFdNoError(fd) |
1360 |
|
1361 |
|
1362 |
def Daemonize(logfile): |
1363 |
"""Daemonize the current process.
|
1364 |
|
1365 |
This detaches the current process from the controlling terminal and
|
1366 |
runs it in the background as a daemon.
|
1367 |
|
1368 |
@type logfile: str
|
1369 |
@param logfile: the logfile to which we should redirect stdout/stderr
|
1370 |
@rtype: int
|
1371 |
@return: the value zero
|
1372 |
|
1373 |
"""
|
1374 |
# pylint: disable-msg=W0212
|
1375 |
# yes, we really want os._exit
|
1376 |
|
1377 |
# TODO: do another attempt to merge Daemonize and StartDaemon, or at
|
1378 |
# least abstract the pipe functionality between them
|
1379 |
|
1380 |
# Create pipe for sending error messages
|
1381 |
(rpipe, wpipe) = os.pipe() |
1382 |
|
1383 |
# this might fail
|
1384 |
pid = os.fork() |
1385 |
if (pid == 0): # The first child. |
1386 |
SetupDaemonEnv() |
1387 |
|
1388 |
# this might fail
|
1389 |
pid = os.fork() # Fork a second child.
|
1390 |
if (pid == 0): # The second child. |
1391 |
CloseFdNoError(rpipe) |
1392 |
else:
|
1393 |
# exit() or _exit()? See below.
|
1394 |
os._exit(0) # Exit parent (the first child) of the second child. |
1395 |
else:
|
1396 |
CloseFdNoError(wpipe) |
1397 |
# Wait for daemon to be started (or an error message to
|
1398 |
# arrive) and read up to 100 KB as an error message
|
1399 |
errormsg = RetryOnSignal(os.read, rpipe, 100 * 1024) |
1400 |
if errormsg:
|
1401 |
sys.stderr.write("Error when starting daemon process: %r\n" % errormsg)
|
1402 |
rcode = 1
|
1403 |
else:
|
1404 |
rcode = 0
|
1405 |
os._exit(rcode) # Exit parent of the first child.
|
1406 |
|
1407 |
SetupDaemonFDs(logfile, None)
|
1408 |
return wpipe
|
1409 |
|
1410 |
|
1411 |
def EnsureDaemon(name): |
1412 |
"""Check for and start daemon if not alive.
|
1413 |
|
1414 |
"""
|
1415 |
result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
|
1416 |
if result.failed:
|
1417 |
logging.error("Can't start daemon '%s', failure %s, output: %s",
|
1418 |
name, result.fail_reason, result.output) |
1419 |
return False |
1420 |
|
1421 |
return True |
1422 |
|
1423 |
|
1424 |
def StopDaemon(name): |
1425 |
"""Stop daemon
|
1426 |
|
1427 |
"""
|
1428 |
result = RunCmd([constants.DAEMON_UTIL, "stop", name])
|
1429 |
if result.failed:
|
1430 |
logging.error("Can't stop daemon '%s', failure %s, output: %s",
|
1431 |
name, result.fail_reason, result.output) |
1432 |
return False |
1433 |
|
1434 |
return True |
1435 |
|
1436 |
|
1437 |
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30, |
1438 |
waitpid=False):
|
1439 |
"""Kill a process given by its pid.
|
1440 |
|
1441 |
@type pid: int
|
1442 |
@param pid: The PID to terminate.
|
1443 |
@type signal_: int
|
1444 |
@param signal_: The signal to send, by default SIGTERM
|
1445 |
@type timeout: int
|
1446 |
@param timeout: The timeout after which, if the process is still alive,
|
1447 |
a SIGKILL will be sent. If not positive, no such checking
|
1448 |
will be done
|
1449 |
@type waitpid: boolean
|
1450 |
@param waitpid: If true, we should waitpid on this process after
|
1451 |
sending signals, since it's our own child and otherwise it
|
1452 |
would remain as zombie
|
1453 |
|
1454 |
"""
|
1455 |
def _helper(pid, signal_, wait): |
1456 |
"""Simple helper to encapsulate the kill/waitpid sequence"""
|
1457 |
if IgnoreProcessNotFound(os.kill, pid, signal_) and wait: |
1458 |
try:
|
1459 |
os.waitpid(pid, os.WNOHANG) |
1460 |
except OSError: |
1461 |
pass
|
1462 |
|
1463 |
if pid <= 0: |
1464 |
# kill with pid=0 == suicide
|
1465 |
raise errors.ProgrammerError("Invalid pid given '%s'" % pid) |
1466 |
|
1467 |
if not IsProcessAlive(pid): |
1468 |
return
|
1469 |
|
1470 |
_helper(pid, signal_, waitpid) |
1471 |
|
1472 |
if timeout <= 0: |
1473 |
return
|
1474 |
|
1475 |
def _CheckProcess(): |
1476 |
if not IsProcessAlive(pid): |
1477 |
return
|
1478 |
|
1479 |
try:
|
1480 |
(result_pid, _) = os.waitpid(pid, os.WNOHANG) |
1481 |
except OSError: |
1482 |
raise RetryAgain()
|
1483 |
|
1484 |
if result_pid > 0: |
1485 |
return
|
1486 |
|
1487 |
raise RetryAgain()
|
1488 |
|
1489 |
try:
|
1490 |
# Wait up to $timeout seconds
|
1491 |
Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout) |
1492 |
except RetryTimeout:
|
1493 |
pass
|
1494 |
|
1495 |
if IsProcessAlive(pid):
|
1496 |
# Kill process if it's still alive
|
1497 |
_helper(pid, signal.SIGKILL, waitpid) |
1498 |
|
1499 |
|
1500 |
def CheckVolumeGroupSize(vglist, vgname, minsize): |
1501 |
"""Checks if the volume group list is valid.
|
1502 |
|
1503 |
The function will check if a given volume group is in the list of
|
1504 |
volume groups and has a minimum size.
|
1505 |
|
1506 |
@type vglist: dict
|
1507 |
@param vglist: dictionary of volume group names and their size
|
1508 |
@type vgname: str
|
1509 |
@param vgname: the volume group we should check
|
1510 |
@type minsize: int
|
1511 |
@param minsize: the minimum size we accept
|
1512 |
@rtype: None or str
|
1513 |
@return: None for success, otherwise the error message
|
1514 |
|
1515 |
"""
|
1516 |
vgsize = vglist.get(vgname, None)
|
1517 |
if vgsize is None: |
1518 |
return "volume group '%s' missing" % vgname |
1519 |
elif vgsize < minsize:
|
1520 |
return ("volume group '%s' too small (%s MiB required, %d MiB found)" % |
1521 |
(vgname, minsize, vgsize)) |
1522 |
return None |
1523 |
|
1524 |
|
1525 |
def SplitTime(value): |
1526 |
"""Splits time as floating point number into a tuple.
|
1527 |
|
1528 |
@param value: Time in seconds
|
1529 |
@type value: int or float
|
1530 |
@return: Tuple containing (seconds, microseconds)
|
1531 |
|
1532 |
"""
|
1533 |
(seconds, microseconds) = divmod(int(value * 1000000), 1000000) |
1534 |
|
1535 |
assert 0 <= seconds, \ |
1536 |
"Seconds must be larger than or equal to 0, but are %s" % seconds
|
1537 |
assert 0 <= microseconds <= 999999, \ |
1538 |
"Microseconds must be 0-999999, but are %s" % microseconds
|
1539 |
|
1540 |
return (int(seconds), int(microseconds)) |
1541 |
|
1542 |
|
1543 |
def MergeTime(timetuple): |
1544 |
"""Merges a tuple into time as a floating point number.
|
1545 |
|
1546 |
@param timetuple: Time as tuple, (seconds, microseconds)
|
1547 |
@type timetuple: tuple
|
1548 |
@return: Time as a floating point number expressed in seconds
|
1549 |
|
1550 |
"""
|
1551 |
(seconds, microseconds) = timetuple |
1552 |
|
1553 |
assert 0 <= seconds, \ |
1554 |
"Seconds must be larger than or equal to 0, but are %s" % seconds
|
1555 |
assert 0 <= microseconds <= 999999, \ |
1556 |
"Microseconds must be 0-999999, but are %s" % microseconds
|
1557 |
|
1558 |
return float(seconds) + (float(microseconds) * 0.000001) |
1559 |
|
1560 |
|
1561 |
def FindMatch(data, name): |
1562 |
"""Tries to find an item in a dictionary matching a name.
|
1563 |
|
1564 |
Callers have to ensure the data names aren't contradictory (e.g. a regexp
|
1565 |
that matches a string). If the name isn't a direct key, all regular
|
1566 |
expression objects in the dictionary are matched against it.
|
1567 |
|
1568 |
@type data: dict
|
1569 |
@param data: Dictionary containing data
|
1570 |
@type name: string
|
1571 |
@param name: Name to look for
|
1572 |
@rtype: tuple; (value in dictionary, matched groups as list)
|
1573 |
|
1574 |
"""
|
1575 |
if name in data: |
1576 |
return (data[name], [])
|
1577 |
|
1578 |
for key, value in data.items(): |
1579 |
# Regex objects
|
1580 |
if hasattr(key, "match"): |
1581 |
m = key.match(name) |
1582 |
if m:
|
1583 |
return (value, list(m.groups())) |
1584 |
|
1585 |
return None |
1586 |
|
1587 |
|
1588 |
def GetMounts(filename=constants.PROC_MOUNTS): |
1589 |
"""Returns the list of mounted filesystems.
|
1590 |
|
1591 |
This function is Linux-specific.
|
1592 |
|
1593 |
@param filename: path of mounts file (/proc/mounts by default)
|
1594 |
@rtype: list of tuples
|
1595 |
@return: list of mount entries (device, mountpoint, fstype, options)
|
1596 |
|
1597 |
"""
|
1598 |
# TODO(iustin): investigate non-Linux options (e.g. via mount output)
|
1599 |
data = [] |
1600 |
mountlines = ReadFile(filename).splitlines() |
1601 |
for line in mountlines: |
1602 |
device, mountpoint, fstype, options, _ = line.split(None, 4) |
1603 |
data.append((device, mountpoint, fstype, options)) |
1604 |
|
1605 |
return data
|
1606 |
|
1607 |
|
1608 |
def RunInSeparateProcess(fn, *args): |
1609 |
"""Runs a function in a separate process.
|
1610 |
|
1611 |
Note: Only boolean return values are supported.
|
1612 |
|
1613 |
@type fn: callable
|
1614 |
@param fn: Function to be called
|
1615 |
@rtype: bool
|
1616 |
@return: Function's result
|
1617 |
|
1618 |
"""
|
1619 |
pid = os.fork() |
1620 |
if pid == 0: |
1621 |
# Child process
|
1622 |
try:
|
1623 |
# In case the function uses temporary files
|
1624 |
ResetTempfileModule() |
1625 |
|
1626 |
# Call function
|
1627 |
result = int(bool(fn(*args))) |
1628 |
assert result in (0, 1) |
1629 |
except: # pylint: disable-msg=W0702 |
1630 |
logging.exception("Error while calling function in separate process")
|
1631 |
# 0 and 1 are reserved for the return value
|
1632 |
result = 33
|
1633 |
|
1634 |
os._exit(result) # pylint: disable-msg=W0212
|
1635 |
|
1636 |
# Parent process
|
1637 |
|
1638 |
# Avoid zombies and check exit code
|
1639 |
(_, status) = os.waitpid(pid, 0)
|
1640 |
|
1641 |
if os.WIFSIGNALED(status):
|
1642 |
exitcode = None
|
1643 |
signum = os.WTERMSIG(status) |
1644 |
else:
|
1645 |
exitcode = os.WEXITSTATUS(status) |
1646 |
signum = None
|
1647 |
|
1648 |
if not (exitcode in (0, 1) and signum is None): |
1649 |
raise errors.GenericError("Child program failed (code=%s, signal=%s)" % |
1650 |
(exitcode, signum)) |
1651 |
|
1652 |
return bool(exitcode) |
1653 |
|
1654 |
|
1655 |
def SignalHandled(signums): |
1656 |
"""Signal Handled decoration.
|
1657 |
|
1658 |
This special decorator installs a signal handler and then calls the target
|
1659 |
function. The function must accept a 'signal_handlers' keyword argument,
|
1660 |
which will contain a dict indexed by signal number, with SignalHandler
|
1661 |
objects as values.
|
1662 |
|
1663 |
The decorator can be safely stacked with iself, to handle multiple signals
|
1664 |
with different handlers.
|
1665 |
|
1666 |
@type signums: list
|
1667 |
@param signums: signals to intercept
|
1668 |
|
1669 |
"""
|
1670 |
def wrap(fn): |
1671 |
def sig_function(*args, **kwargs): |
1672 |
assert 'signal_handlers' not in kwargs or \ |
1673 |
kwargs['signal_handlers'] is None or \ |
1674 |
isinstance(kwargs['signal_handlers'], dict), \ |
1675 |
"Wrong signal_handlers parameter in original function call"
|
1676 |
if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None: |
1677 |
signal_handlers = kwargs['signal_handlers']
|
1678 |
else:
|
1679 |
signal_handlers = {} |
1680 |
kwargs['signal_handlers'] = signal_handlers
|
1681 |
sighandler = SignalHandler(signums) |
1682 |
try:
|
1683 |
for sig in signums: |
1684 |
signal_handlers[sig] = sighandler |
1685 |
return fn(*args, **kwargs)
|
1686 |
finally:
|
1687 |
sighandler.Reset() |
1688 |
return sig_function
|
1689 |
return wrap
|
1690 |
|
1691 |
|
1692 |
class SignalWakeupFd(object): |
1693 |
try:
|
1694 |
# This is only supported in Python 2.5 and above (some distributions
|
1695 |
# backported it to Python 2.4)
|
1696 |
_set_wakeup_fd_fn = signal.set_wakeup_fd |
1697 |
except AttributeError: |
1698 |
# Not supported
|
1699 |
def _SetWakeupFd(self, _): # pylint: disable-msg=R0201 |
1700 |
return -1 |
1701 |
else:
|
1702 |
def _SetWakeupFd(self, fd): |
1703 |
return self._set_wakeup_fd_fn(fd) |
1704 |
|
1705 |
def __init__(self): |
1706 |
"""Initializes this class.
|
1707 |
|
1708 |
"""
|
1709 |
(read_fd, write_fd) = os.pipe() |
1710 |
|
1711 |
# Once these succeeded, the file descriptors will be closed automatically.
|
1712 |
# Buffer size 0 is important, otherwise .read() with a specified length
|
1713 |
# might buffer data and the file descriptors won't be marked readable.
|
1714 |
self._read_fh = os.fdopen(read_fd, "r", 0) |
1715 |
self._write_fh = os.fdopen(write_fd, "w", 0) |
1716 |
|
1717 |
self._previous = self._SetWakeupFd(self._write_fh.fileno()) |
1718 |
|
1719 |
# Utility functions
|
1720 |
self.fileno = self._read_fh.fileno |
1721 |
self.read = self._read_fh.read |
1722 |
|
1723 |
def Reset(self): |
1724 |
"""Restores the previous wakeup file descriptor.
|
1725 |
|
1726 |
"""
|
1727 |
if hasattr(self, "_previous") and self._previous is not None: |
1728 |
self._SetWakeupFd(self._previous) |
1729 |
self._previous = None |
1730 |
|
1731 |
def Notify(self): |
1732 |
"""Notifies the wakeup file descriptor.
|
1733 |
|
1734 |
"""
|
1735 |
self._write_fh.write("\0") |
1736 |
|
1737 |
def __del__(self): |
1738 |
"""Called before object deletion.
|
1739 |
|
1740 |
"""
|
1741 |
self.Reset()
|
1742 |
|
1743 |
|
1744 |
class SignalHandler(object): |
1745 |
"""Generic signal handler class.
|
1746 |
|
1747 |
It automatically restores the original handler when deconstructed or
|
1748 |
when L{Reset} is called. You can either pass your own handler
|
1749 |
function in or query the L{called} attribute to detect whether the
|
1750 |
signal was sent.
|
1751 |
|
1752 |
@type signum: list
|
1753 |
@ivar signum: the signals we handle
|
1754 |
@type called: boolean
|
1755 |
@ivar called: tracks whether any of the signals have been raised
|
1756 |
|
1757 |
"""
|
1758 |
def __init__(self, signum, handler_fn=None, wakeup=None): |
1759 |
"""Constructs a new SignalHandler instance.
|
1760 |
|
1761 |
@type signum: int or list of ints
|
1762 |
@param signum: Single signal number or set of signal numbers
|
1763 |
@type handler_fn: callable
|
1764 |
@param handler_fn: Signal handling function
|
1765 |
|
1766 |
"""
|
1767 |
assert handler_fn is None or callable(handler_fn) |
1768 |
|
1769 |
self.signum = set(signum) |
1770 |
self.called = False |
1771 |
|
1772 |
self._handler_fn = handler_fn
|
1773 |
self._wakeup = wakeup
|
1774 |
|
1775 |
self._previous = {}
|
1776 |
try:
|
1777 |
for signum in self.signum: |
1778 |
# Setup handler
|
1779 |
prev_handler = signal.signal(signum, self._HandleSignal)
|
1780 |
try:
|
1781 |
self._previous[signum] = prev_handler
|
1782 |
except:
|
1783 |
# Restore previous handler
|
1784 |
signal.signal(signum, prev_handler) |
1785 |
raise
|
1786 |
except:
|
1787 |
# Reset all handlers
|
1788 |
self.Reset()
|
1789 |
# Here we have a race condition: a handler may have already been called,
|
1790 |
# but there's not much we can do about it at this point.
|
1791 |
raise
|
1792 |
|
1793 |
def __del__(self): |
1794 |
self.Reset()
|
1795 |
|
1796 |
def Reset(self): |
1797 |
"""Restore previous handler.
|
1798 |
|
1799 |
This will reset all the signals to their previous handlers.
|
1800 |
|
1801 |
"""
|
1802 |
for signum, prev_handler in self._previous.items(): |
1803 |
signal.signal(signum, prev_handler) |
1804 |
# If successful, remove from dict
|
1805 |
del self._previous[signum] |
1806 |
|
1807 |
def Clear(self): |
1808 |
"""Unsets the L{called} flag.
|
1809 |
|
1810 |
This function can be used in case a signal may arrive several times.
|
1811 |
|
1812 |
"""
|
1813 |
self.called = False |
1814 |
|
1815 |
def _HandleSignal(self, signum, frame): |
1816 |
"""Actual signal handling function.
|
1817 |
|
1818 |
"""
|
1819 |
# This is not nice and not absolutely atomic, but it appears to be the only
|
1820 |
# solution in Python -- there are no atomic types.
|
1821 |
self.called = True |
1822 |
|
1823 |
if self._wakeup: |
1824 |
# Notify whoever is interested in signals
|
1825 |
self._wakeup.Notify()
|
1826 |
|
1827 |
if self._handler_fn: |
1828 |
self._handler_fn(signum, frame)
|
1829 |
|
1830 |
|
1831 |
class FieldSet(object): |
1832 |
"""A simple field set.
|
1833 |
|
1834 |
Among the features are:
|
1835 |
- checking if a string is among a list of static string or regex objects
|
1836 |
- checking if a whole list of string matches
|
1837 |
- returning the matching groups from a regex match
|
1838 |
|
1839 |
Internally, all fields are held as regular expression objects.
|
1840 |
|
1841 |
"""
|
1842 |
def __init__(self, *items): |
1843 |
self.items = [re.compile("^%s$" % value) for value in items] |
1844 |
|
1845 |
def Extend(self, other_set): |
1846 |
"""Extend the field set with the items from another one"""
|
1847 |
self.items.extend(other_set.items)
|
1848 |
|
1849 |
def Matches(self, field): |
1850 |
"""Checks if a field matches the current set
|
1851 |
|
1852 |
@type field: str
|
1853 |
@param field: the string to match
|
1854 |
@return: either None or a regular expression match object
|
1855 |
|
1856 |
"""
|
1857 |
for m in itertools.ifilter(None, (val.match(field) for val in self.items)): |
1858 |
return m
|
1859 |
return None |
1860 |
|
1861 |
def NonMatching(self, items): |
1862 |
"""Returns the list of fields not matching the current set
|
1863 |
|
1864 |
@type items: list
|
1865 |
@param items: the list of fields to check
|
1866 |
@rtype: list
|
1867 |
@return: list of non-matching fields
|
1868 |
|
1869 |
"""
|
1870 |
return [val for val in items if not self.Matches(val)] |
1871 |
|
1872 |
|
1873 |
class RunningTimeout(object): |
1874 |
"""Class to calculate remaining timeout when doing several operations.
|
1875 |
|
1876 |
"""
|
1877 |
__slots__ = [ |
1878 |
"_allow_negative",
|
1879 |
"_start_time",
|
1880 |
"_time_fn",
|
1881 |
"_timeout",
|
1882 |
] |
1883 |
|
1884 |
def __init__(self, timeout, allow_negative, _time_fn=time.time): |
1885 |
"""Initializes this class.
|
1886 |
|
1887 |
@type timeout: float
|
1888 |
@param timeout: Timeout duration
|
1889 |
@type allow_negative: bool
|
1890 |
@param allow_negative: Whether to return values below zero
|
1891 |
@param _time_fn: Time function for unittests
|
1892 |
|
1893 |
"""
|
1894 |
object.__init__(self) |
1895 |
|
1896 |
if timeout is not None and timeout < 0.0: |
1897 |
raise ValueError("Timeout must not be negative") |
1898 |
|
1899 |
self._timeout = timeout
|
1900 |
self._allow_negative = allow_negative
|
1901 |
self._time_fn = _time_fn
|
1902 |
|
1903 |
self._start_time = None |
1904 |
|
1905 |
def Remaining(self): |
1906 |
"""Returns the remaining timeout.
|
1907 |
|
1908 |
"""
|
1909 |
if self._timeout is None: |
1910 |
return None |
1911 |
|
1912 |
# Get start time on first calculation
|
1913 |
if self._start_time is None: |
1914 |
self._start_time = self._time_fn() |
1915 |
|
1916 |
# Calculate remaining time
|
1917 |
remaining_timeout = self._start_time + self._timeout - self._time_fn() |
1918 |
|
1919 |
if not self._allow_negative: |
1920 |
# Ensure timeout is always >= 0
|
1921 |
return max(0.0, remaining_timeout) |
1922 |
|
1923 |
return remaining_timeout
|