root / lib / utils.py @ 41a26b68
History | View | Annotate | Download (104.9 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Ganeti utility module.
|
23 |
|
24 |
This module holds functions that can be used in both daemons (all) and
|
25 |
the command line scripts.
|
26 |
|
27 |
"""
|
28 |
|
29 |
|
30 |
import os |
31 |
import sys |
32 |
import time |
33 |
import subprocess |
34 |
import re |
35 |
import socket |
36 |
import tempfile |
37 |
import shutil |
38 |
import errno |
39 |
import pwd |
40 |
import itertools |
41 |
import select |
42 |
import fcntl |
43 |
import resource |
44 |
import logging |
45 |
import logging.handlers |
46 |
import signal |
47 |
import OpenSSL |
48 |
import datetime |
49 |
import calendar |
50 |
import hmac |
51 |
import collections |
52 |
import struct |
53 |
import IN |
54 |
|
55 |
from cStringIO import StringIO |
56 |
|
57 |
try:
|
58 |
import ctypes |
59 |
except ImportError: |
60 |
ctypes = None
|
61 |
|
62 |
from ganeti import errors |
63 |
from ganeti import constants |
64 |
from ganeti import compat |
65 |
|
66 |
|
67 |
_locksheld = [] |
68 |
_re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
|
69 |
|
70 |
debug_locks = False
|
71 |
|
72 |
#: when set to True, L{RunCmd} is disabled
|
73 |
no_fork = False
|
74 |
|
75 |
_RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
|
76 |
|
77 |
HEX_CHAR_RE = r"[a-zA-Z0-9]"
|
78 |
VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
|
79 |
X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
|
80 |
(re.escape(constants.X509_CERT_SIGNATURE_HEADER), |
81 |
HEX_CHAR_RE, HEX_CHAR_RE), |
82 |
re.S | re.I) |
83 |
|
84 |
_VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
|
85 |
|
86 |
# Structure definition for getsockopt(SOL_SOCKET, SO_PEERCRED, ...):
|
87 |
# struct ucred { pid_t pid; uid_t uid; gid_t gid; };
|
88 |
#
|
89 |
# The GNU C Library defines gid_t and uid_t to be "unsigned int" and
|
90 |
# pid_t to "int".
|
91 |
#
|
92 |
# IEEE Std 1003.1-2008:
|
93 |
# "nlink_t, uid_t, gid_t, and id_t shall be integer types"
|
94 |
# "blksize_t, pid_t, and ssize_t shall be signed integer types"
|
95 |
_STRUCT_UCRED = "iII"
|
96 |
_STRUCT_UCRED_SIZE = struct.calcsize(_STRUCT_UCRED) |
97 |
|
98 |
# Certificate verification results
|
99 |
(CERT_WARNING, |
100 |
CERT_ERROR) = range(1, 3) |
101 |
|
102 |
# Flags for mlockall() (from bits/mman.h)
|
103 |
_MCL_CURRENT = 1
|
104 |
_MCL_FUTURE = 2
|
105 |
|
106 |
|
107 |
class RunResult(object): |
108 |
"""Holds the result of running external programs.
|
109 |
|
110 |
@type exit_code: int
|
111 |
@ivar exit_code: the exit code of the program, or None (if the program
|
112 |
didn't exit())
|
113 |
@type signal: int or None
|
114 |
@ivar signal: the signal that caused the program to finish, or None
|
115 |
(if the program wasn't terminated by a signal)
|
116 |
@type stdout: str
|
117 |
@ivar stdout: the standard output of the program
|
118 |
@type stderr: str
|
119 |
@ivar stderr: the standard error of the program
|
120 |
@type failed: boolean
|
121 |
@ivar failed: True in case the program was
|
122 |
terminated by a signal or exited with a non-zero exit code
|
123 |
@ivar fail_reason: a string detailing the termination reason
|
124 |
|
125 |
"""
|
126 |
__slots__ = ["exit_code", "signal", "stdout", "stderr", |
127 |
"failed", "fail_reason", "cmd"] |
128 |
|
129 |
|
130 |
def __init__(self, exit_code, signal_, stdout, stderr, cmd): |
131 |
self.cmd = cmd
|
132 |
self.exit_code = exit_code
|
133 |
self.signal = signal_
|
134 |
self.stdout = stdout
|
135 |
self.stderr = stderr
|
136 |
self.failed = (signal_ is not None or exit_code != 0) |
137 |
|
138 |
if self.signal is not None: |
139 |
self.fail_reason = "terminated by signal %s" % self.signal |
140 |
elif self.exit_code is not None: |
141 |
self.fail_reason = "exited with exit code %s" % self.exit_code |
142 |
else:
|
143 |
self.fail_reason = "unable to determine termination reason" |
144 |
|
145 |
if self.failed: |
146 |
logging.debug("Command '%s' failed (%s); output: %s",
|
147 |
self.cmd, self.fail_reason, self.output) |
148 |
|
149 |
def _GetOutput(self): |
150 |
"""Returns the combined stdout and stderr for easier usage.
|
151 |
|
152 |
"""
|
153 |
return self.stdout + self.stderr |
154 |
|
155 |
output = property(_GetOutput, None, None, "Return full output") |
156 |
|
157 |
|
158 |
def _BuildCmdEnvironment(env, reset): |
159 |
"""Builds the environment for an external program.
|
160 |
|
161 |
"""
|
162 |
if reset:
|
163 |
cmd_env = {} |
164 |
else:
|
165 |
cmd_env = os.environ.copy() |
166 |
cmd_env["LC_ALL"] = "C" |
167 |
|
168 |
if env is not None: |
169 |
cmd_env.update(env) |
170 |
|
171 |
return cmd_env
|
172 |
|
173 |
|
174 |
def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False): |
175 |
"""Execute a (shell) command.
|
176 |
|
177 |
The command should not read from its standard input, as it will be
|
178 |
closed.
|
179 |
|
180 |
@type cmd: string or list
|
181 |
@param cmd: Command to run
|
182 |
@type env: dict
|
183 |
@param env: Additional environment variables
|
184 |
@type output: str
|
185 |
@param output: if desired, the output of the command can be
|
186 |
saved in a file instead of the RunResult instance; this
|
187 |
parameter denotes the file name (if not None)
|
188 |
@type cwd: string
|
189 |
@param cwd: if specified, will be used as the working
|
190 |
directory for the command; the default will be /
|
191 |
@type reset_env: boolean
|
192 |
@param reset_env: whether to reset or keep the default os environment
|
193 |
@rtype: L{RunResult}
|
194 |
@return: RunResult instance
|
195 |
@raise errors.ProgrammerError: if we call this when forks are disabled
|
196 |
|
197 |
"""
|
198 |
if no_fork:
|
199 |
raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled") |
200 |
|
201 |
if isinstance(cmd, basestring): |
202 |
strcmd = cmd |
203 |
shell = True
|
204 |
else:
|
205 |
cmd = [str(val) for val in cmd] |
206 |
strcmd = ShellQuoteArgs(cmd) |
207 |
shell = False
|
208 |
|
209 |
if output:
|
210 |
logging.debug("RunCmd %s, output file '%s'", strcmd, output)
|
211 |
else:
|
212 |
logging.debug("RunCmd %s", strcmd)
|
213 |
|
214 |
cmd_env = _BuildCmdEnvironment(env, reset_env) |
215 |
|
216 |
try:
|
217 |
if output is None: |
218 |
out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd) |
219 |
else:
|
220 |
status = _RunCmdFile(cmd, cmd_env, shell, output, cwd) |
221 |
out = err = ""
|
222 |
except OSError, err: |
223 |
if err.errno == errno.ENOENT:
|
224 |
raise errors.OpExecError("Can't execute '%s': not found (%s)" % |
225 |
(strcmd, err)) |
226 |
else:
|
227 |
raise
|
228 |
|
229 |
if status >= 0: |
230 |
exitcode = status |
231 |
signal_ = None
|
232 |
else:
|
233 |
exitcode = None
|
234 |
signal_ = -status |
235 |
|
236 |
return RunResult(exitcode, signal_, out, err, strcmd)
|
237 |
|
238 |
|
239 |
def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None, |
240 |
pidfile=None):
|
241 |
"""Start a daemon process after forking twice.
|
242 |
|
243 |
@type cmd: string or list
|
244 |
@param cmd: Command to run
|
245 |
@type env: dict
|
246 |
@param env: Additional environment variables
|
247 |
@type cwd: string
|
248 |
@param cwd: Working directory for the program
|
249 |
@type output: string
|
250 |
@param output: Path to file in which to save the output
|
251 |
@type output_fd: int
|
252 |
@param output_fd: File descriptor for output
|
253 |
@type pidfile: string
|
254 |
@param pidfile: Process ID file
|
255 |
@rtype: int
|
256 |
@return: Daemon process ID
|
257 |
@raise errors.ProgrammerError: if we call this when forks are disabled
|
258 |
|
259 |
"""
|
260 |
if no_fork:
|
261 |
raise errors.ProgrammerError("utils.StartDaemon() called with fork()" |
262 |
" disabled")
|
263 |
|
264 |
if output and not (bool(output) ^ (output_fd is not None)): |
265 |
raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be" |
266 |
" specified")
|
267 |
|
268 |
if isinstance(cmd, basestring): |
269 |
cmd = ["/bin/sh", "-c", cmd] |
270 |
|
271 |
strcmd = ShellQuoteArgs(cmd) |
272 |
|
273 |
if output:
|
274 |
logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
|
275 |
else:
|
276 |
logging.debug("StartDaemon %s", strcmd)
|
277 |
|
278 |
cmd_env = _BuildCmdEnvironment(env, False)
|
279 |
|
280 |
# Create pipe for sending PID back
|
281 |
(pidpipe_read, pidpipe_write) = os.pipe() |
282 |
try:
|
283 |
try:
|
284 |
# Create pipe for sending error messages
|
285 |
(errpipe_read, errpipe_write) = os.pipe() |
286 |
try:
|
287 |
try:
|
288 |
# First fork
|
289 |
pid = os.fork() |
290 |
if pid == 0: |
291 |
try:
|
292 |
# Child process, won't return
|
293 |
_StartDaemonChild(errpipe_read, errpipe_write, |
294 |
pidpipe_read, pidpipe_write, |
295 |
cmd, cmd_env, cwd, |
296 |
output, output_fd, pidfile) |
297 |
finally:
|
298 |
# Well, maybe child process failed
|
299 |
os._exit(1) # pylint: disable-msg=W0212 |
300 |
finally:
|
301 |
_CloseFDNoErr(errpipe_write) |
302 |
|
303 |
# Wait for daemon to be started (or an error message to arrive) and read
|
304 |
# up to 100 KB as an error message
|
305 |
errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024) |
306 |
finally:
|
307 |
_CloseFDNoErr(errpipe_read) |
308 |
finally:
|
309 |
_CloseFDNoErr(pidpipe_write) |
310 |
|
311 |
# Read up to 128 bytes for PID
|
312 |
pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
|
313 |
finally:
|
314 |
_CloseFDNoErr(pidpipe_read) |
315 |
|
316 |
# Try to avoid zombies by waiting for child process
|
317 |
try:
|
318 |
os.waitpid(pid, 0)
|
319 |
except OSError: |
320 |
pass
|
321 |
|
322 |
if errormsg:
|
323 |
raise errors.OpExecError("Error when starting daemon process: %r" % |
324 |
errormsg) |
325 |
|
326 |
try:
|
327 |
return int(pidtext) |
328 |
except (ValueError, TypeError), err: |
329 |
raise errors.OpExecError("Error while trying to parse PID %r: %s" % |
330 |
(pidtext, err)) |
331 |
|
332 |
|
333 |
def _StartDaemonChild(errpipe_read, errpipe_write, |
334 |
pidpipe_read, pidpipe_write, |
335 |
args, env, cwd, |
336 |
output, fd_output, pidfile): |
337 |
"""Child process for starting daemon.
|
338 |
|
339 |
"""
|
340 |
try:
|
341 |
# Close parent's side
|
342 |
_CloseFDNoErr(errpipe_read) |
343 |
_CloseFDNoErr(pidpipe_read) |
344 |
|
345 |
# First child process
|
346 |
os.chdir("/")
|
347 |
os.umask(077)
|
348 |
os.setsid() |
349 |
|
350 |
# And fork for the second time
|
351 |
pid = os.fork() |
352 |
if pid != 0: |
353 |
# Exit first child process
|
354 |
os._exit(0) # pylint: disable-msg=W0212 |
355 |
|
356 |
# Make sure pipe is closed on execv* (and thereby notifies original process)
|
357 |
SetCloseOnExecFlag(errpipe_write, True)
|
358 |
|
359 |
# List of file descriptors to be left open
|
360 |
noclose_fds = [errpipe_write] |
361 |
|
362 |
# Open PID file
|
363 |
if pidfile:
|
364 |
try:
|
365 |
# TODO: Atomic replace with another locked file instead of writing into
|
366 |
# it after creating
|
367 |
fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
|
368 |
|
369 |
# Lock the PID file (and fail if not possible to do so). Any code
|
370 |
# wanting to send a signal to the daemon should try to lock the PID
|
371 |
# file before reading it. If acquiring the lock succeeds, the daemon is
|
372 |
# no longer running and the signal should not be sent.
|
373 |
LockFile(fd_pidfile) |
374 |
|
375 |
os.write(fd_pidfile, "%d\n" % os.getpid())
|
376 |
except Exception, err: |
377 |
raise Exception("Creating and locking PID file failed: %s" % err) |
378 |
|
379 |
# Keeping the file open to hold the lock
|
380 |
noclose_fds.append(fd_pidfile) |
381 |
|
382 |
SetCloseOnExecFlag(fd_pidfile, False)
|
383 |
else:
|
384 |
fd_pidfile = None
|
385 |
|
386 |
# Open /dev/null
|
387 |
fd_devnull = os.open(os.devnull, os.O_RDWR) |
388 |
|
389 |
assert not output or (bool(output) ^ (fd_output is not None)) |
390 |
|
391 |
if fd_output is not None: |
392 |
pass
|
393 |
elif output:
|
394 |
# Open output file
|
395 |
try:
|
396 |
# TODO: Implement flag to set append=yes/no
|
397 |
fd_output = os.open(output, os.O_WRONLY | os.O_CREAT, 0600)
|
398 |
except EnvironmentError, err: |
399 |
raise Exception("Opening output file failed: %s" % err) |
400 |
else:
|
401 |
fd_output = fd_devnull |
402 |
|
403 |
# Redirect standard I/O
|
404 |
os.dup2(fd_devnull, 0)
|
405 |
os.dup2(fd_output, 1)
|
406 |
os.dup2(fd_output, 2)
|
407 |
|
408 |
# Send daemon PID to parent
|
409 |
RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
|
410 |
|
411 |
# Close all file descriptors except stdio and error message pipe
|
412 |
CloseFDs(noclose_fds=noclose_fds) |
413 |
|
414 |
# Change working directory
|
415 |
os.chdir(cwd) |
416 |
|
417 |
if env is None: |
418 |
os.execvp(args[0], args)
|
419 |
else:
|
420 |
os.execvpe(args[0], args, env)
|
421 |
except: # pylint: disable-msg=W0702 |
422 |
try:
|
423 |
# Report errors to original process
|
424 |
buf = str(sys.exc_info()[1]) |
425 |
|
426 |
RetryOnSignal(os.write, errpipe_write, buf) |
427 |
except: # pylint: disable-msg=W0702 |
428 |
# Ignore errors in error handling
|
429 |
pass
|
430 |
|
431 |
os._exit(1) # pylint: disable-msg=W0212 |
432 |
|
433 |
|
434 |
def _RunCmdPipe(cmd, env, via_shell, cwd): |
435 |
"""Run a command and return its output.
|
436 |
|
437 |
@type cmd: string or list
|
438 |
@param cmd: Command to run
|
439 |
@type env: dict
|
440 |
@param env: The environment to use
|
441 |
@type via_shell: bool
|
442 |
@param via_shell: if we should run via the shell
|
443 |
@type cwd: string
|
444 |
@param cwd: the working directory for the program
|
445 |
@rtype: tuple
|
446 |
@return: (out, err, status)
|
447 |
|
448 |
"""
|
449 |
poller = select.poll() |
450 |
child = subprocess.Popen(cmd, shell=via_shell, |
451 |
stderr=subprocess.PIPE, |
452 |
stdout=subprocess.PIPE, |
453 |
stdin=subprocess.PIPE, |
454 |
close_fds=True, env=env,
|
455 |
cwd=cwd) |
456 |
|
457 |
child.stdin.close() |
458 |
poller.register(child.stdout, select.POLLIN) |
459 |
poller.register(child.stderr, select.POLLIN) |
460 |
out = StringIO() |
461 |
err = StringIO() |
462 |
fdmap = { |
463 |
child.stdout.fileno(): (out, child.stdout), |
464 |
child.stderr.fileno(): (err, child.stderr), |
465 |
} |
466 |
for fd in fdmap: |
467 |
SetNonblockFlag(fd, True)
|
468 |
|
469 |
while fdmap:
|
470 |
pollresult = RetryOnSignal(poller.poll) |
471 |
|
472 |
for fd, event in pollresult: |
473 |
if event & select.POLLIN or event & select.POLLPRI: |
474 |
data = fdmap[fd][1].read()
|
475 |
# no data from read signifies EOF (the same as POLLHUP)
|
476 |
if not data: |
477 |
poller.unregister(fd) |
478 |
del fdmap[fd]
|
479 |
continue
|
480 |
fdmap[fd][0].write(data)
|
481 |
if (event & select.POLLNVAL or event & select.POLLHUP or |
482 |
event & select.POLLERR): |
483 |
poller.unregister(fd) |
484 |
del fdmap[fd]
|
485 |
|
486 |
out = out.getvalue() |
487 |
err = err.getvalue() |
488 |
|
489 |
status = child.wait() |
490 |
return out, err, status
|
491 |
|
492 |
|
493 |
def _RunCmdFile(cmd, env, via_shell, output, cwd): |
494 |
"""Run a command and save its output to a file.
|
495 |
|
496 |
@type cmd: string or list
|
497 |
@param cmd: Command to run
|
498 |
@type env: dict
|
499 |
@param env: The environment to use
|
500 |
@type via_shell: bool
|
501 |
@param via_shell: if we should run via the shell
|
502 |
@type output: str
|
503 |
@param output: the filename in which to save the output
|
504 |
@type cwd: string
|
505 |
@param cwd: the working directory for the program
|
506 |
@rtype: int
|
507 |
@return: the exit status
|
508 |
|
509 |
"""
|
510 |
fh = open(output, "a") |
511 |
try:
|
512 |
child = subprocess.Popen(cmd, shell=via_shell, |
513 |
stderr=subprocess.STDOUT, |
514 |
stdout=fh, |
515 |
stdin=subprocess.PIPE, |
516 |
close_fds=True, env=env,
|
517 |
cwd=cwd) |
518 |
|
519 |
child.stdin.close() |
520 |
status = child.wait() |
521 |
finally:
|
522 |
fh.close() |
523 |
return status
|
524 |
|
525 |
|
526 |
def SetCloseOnExecFlag(fd, enable): |
527 |
"""Sets or unsets the close-on-exec flag on a file descriptor.
|
528 |
|
529 |
@type fd: int
|
530 |
@param fd: File descriptor
|
531 |
@type enable: bool
|
532 |
@param enable: Whether to set or unset it.
|
533 |
|
534 |
"""
|
535 |
flags = fcntl.fcntl(fd, fcntl.F_GETFD) |
536 |
|
537 |
if enable:
|
538 |
flags |= fcntl.FD_CLOEXEC |
539 |
else:
|
540 |
flags &= ~fcntl.FD_CLOEXEC |
541 |
|
542 |
fcntl.fcntl(fd, fcntl.F_SETFD, flags) |
543 |
|
544 |
|
545 |
def SetNonblockFlag(fd, enable): |
546 |
"""Sets or unsets the O_NONBLOCK flag on on a file descriptor.
|
547 |
|
548 |
@type fd: int
|
549 |
@param fd: File descriptor
|
550 |
@type enable: bool
|
551 |
@param enable: Whether to set or unset it
|
552 |
|
553 |
"""
|
554 |
flags = fcntl.fcntl(fd, fcntl.F_GETFL) |
555 |
|
556 |
if enable:
|
557 |
flags |= os.O_NONBLOCK |
558 |
else:
|
559 |
flags &= ~os.O_NONBLOCK |
560 |
|
561 |
fcntl.fcntl(fd, fcntl.F_SETFL, flags) |
562 |
|
563 |
|
564 |
def RetryOnSignal(fn, *args, **kwargs): |
565 |
"""Calls a function again if it failed due to EINTR.
|
566 |
|
567 |
"""
|
568 |
while True: |
569 |
try:
|
570 |
return fn(*args, **kwargs)
|
571 |
except EnvironmentError, err: |
572 |
if err.errno != errno.EINTR:
|
573 |
raise
|
574 |
except (socket.error, select.error), err:
|
575 |
# In python 2.6 and above select.error is an IOError, so it's handled
|
576 |
# above, in 2.5 and below it's not, and it's handled here.
|
577 |
if not (err.args and err.args[0] == errno.EINTR): |
578 |
raise
|
579 |
|
580 |
|
581 |
def RunParts(dir_name, env=None, reset_env=False): |
582 |
"""Run Scripts or programs in a directory
|
583 |
|
584 |
@type dir_name: string
|
585 |
@param dir_name: absolute path to a directory
|
586 |
@type env: dict
|
587 |
@param env: The environment to use
|
588 |
@type reset_env: boolean
|
589 |
@param reset_env: whether to reset or keep the default os environment
|
590 |
@rtype: list of tuples
|
591 |
@return: list of (name, (one of RUNDIR_STATUS), RunResult)
|
592 |
|
593 |
"""
|
594 |
rr = [] |
595 |
|
596 |
try:
|
597 |
dir_contents = ListVisibleFiles(dir_name) |
598 |
except OSError, err: |
599 |
logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
|
600 |
return rr
|
601 |
|
602 |
for relname in sorted(dir_contents): |
603 |
fname = PathJoin(dir_name, relname) |
604 |
if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and |
605 |
constants.EXT_PLUGIN_MASK.match(relname) is not None): |
606 |
rr.append((relname, constants.RUNPARTS_SKIP, None))
|
607 |
else:
|
608 |
try:
|
609 |
result = RunCmd([fname], env=env, reset_env=reset_env) |
610 |
except Exception, err: # pylint: disable-msg=W0703 |
611 |
rr.append((relname, constants.RUNPARTS_ERR, str(err)))
|
612 |
else:
|
613 |
rr.append((relname, constants.RUNPARTS_RUN, result)) |
614 |
|
615 |
return rr
|
616 |
|
617 |
|
618 |
def GetSocketCredentials(sock): |
619 |
"""Returns the credentials of the foreign process connected to a socket.
|
620 |
|
621 |
@param sock: Unix socket
|
622 |
@rtype: tuple; (number, number, number)
|
623 |
@return: The PID, UID and GID of the connected foreign process.
|
624 |
|
625 |
"""
|
626 |
peercred = sock.getsockopt(socket.SOL_SOCKET, IN.SO_PEERCRED, |
627 |
_STRUCT_UCRED_SIZE) |
628 |
return struct.unpack(_STRUCT_UCRED, peercred)
|
629 |
|
630 |
|
631 |
def RemoveFile(filename): |
632 |
"""Remove a file ignoring some errors.
|
633 |
|
634 |
Remove a file, ignoring non-existing ones or directories. Other
|
635 |
errors are passed.
|
636 |
|
637 |
@type filename: str
|
638 |
@param filename: the file to be removed
|
639 |
|
640 |
"""
|
641 |
try:
|
642 |
os.unlink(filename) |
643 |
except OSError, err: |
644 |
if err.errno not in (errno.ENOENT, errno.EISDIR): |
645 |
raise
|
646 |
|
647 |
|
648 |
def RemoveDir(dirname): |
649 |
"""Remove an empty directory.
|
650 |
|
651 |
Remove a directory, ignoring non-existing ones.
|
652 |
Other errors are passed. This includes the case,
|
653 |
where the directory is not empty, so it can't be removed.
|
654 |
|
655 |
@type dirname: str
|
656 |
@param dirname: the empty directory to be removed
|
657 |
|
658 |
"""
|
659 |
try:
|
660 |
os.rmdir(dirname) |
661 |
except OSError, err: |
662 |
if err.errno != errno.ENOENT:
|
663 |
raise
|
664 |
|
665 |
|
666 |
def RenameFile(old, new, mkdir=False, mkdir_mode=0750): |
667 |
"""Renames a file.
|
668 |
|
669 |
@type old: string
|
670 |
@param old: Original path
|
671 |
@type new: string
|
672 |
@param new: New path
|
673 |
@type mkdir: bool
|
674 |
@param mkdir: Whether to create target directory if it doesn't exist
|
675 |
@type mkdir_mode: int
|
676 |
@param mkdir_mode: Mode for newly created directories
|
677 |
|
678 |
"""
|
679 |
try:
|
680 |
return os.rename(old, new)
|
681 |
except OSError, err: |
682 |
# In at least one use case of this function, the job queue, directory
|
683 |
# creation is very rare. Checking for the directory before renaming is not
|
684 |
# as efficient.
|
685 |
if mkdir and err.errno == errno.ENOENT: |
686 |
# Create directory and try again
|
687 |
Makedirs(os.path.dirname(new), mode=mkdir_mode) |
688 |
|
689 |
return os.rename(old, new)
|
690 |
|
691 |
raise
|
692 |
|
693 |
|
694 |
def Makedirs(path, mode=0750): |
695 |
"""Super-mkdir; create a leaf directory and all intermediate ones.
|
696 |
|
697 |
This is a wrapper around C{os.makedirs} adding error handling not implemented
|
698 |
before Python 2.5.
|
699 |
|
700 |
"""
|
701 |
try:
|
702 |
os.makedirs(path, mode) |
703 |
except OSError, err: |
704 |
# Ignore EEXIST. This is only handled in os.makedirs as included in
|
705 |
# Python 2.5 and above.
|
706 |
if err.errno != errno.EEXIST or not os.path.exists(path): |
707 |
raise
|
708 |
|
709 |
|
710 |
def ResetTempfileModule(): |
711 |
"""Resets the random name generator of the tempfile module.
|
712 |
|
713 |
This function should be called after C{os.fork} in the child process to
|
714 |
ensure it creates a newly seeded random generator. Otherwise it would
|
715 |
generate the same random parts as the parent process. If several processes
|
716 |
race for the creation of a temporary file, this could lead to one not getting
|
717 |
a temporary name.
|
718 |
|
719 |
"""
|
720 |
# pylint: disable-msg=W0212
|
721 |
if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"): |
722 |
tempfile._once_lock.acquire() |
723 |
try:
|
724 |
# Reset random name generator
|
725 |
tempfile._name_sequence = None
|
726 |
finally:
|
727 |
tempfile._once_lock.release() |
728 |
else:
|
729 |
logging.critical("The tempfile module misses at least one of the"
|
730 |
" '_once_lock' and '_name_sequence' attributes")
|
731 |
|
732 |
|
733 |
def _FingerprintFile(filename): |
734 |
"""Compute the fingerprint of a file.
|
735 |
|
736 |
If the file does not exist, a None will be returned
|
737 |
instead.
|
738 |
|
739 |
@type filename: str
|
740 |
@param filename: the filename to checksum
|
741 |
@rtype: str
|
742 |
@return: the hex digest of the sha checksum of the contents
|
743 |
of the file
|
744 |
|
745 |
"""
|
746 |
if not (os.path.exists(filename) and os.path.isfile(filename)): |
747 |
return None |
748 |
|
749 |
f = open(filename)
|
750 |
|
751 |
fp = compat.sha1_hash() |
752 |
while True: |
753 |
data = f.read(4096)
|
754 |
if not data: |
755 |
break
|
756 |
|
757 |
fp.update(data) |
758 |
|
759 |
return fp.hexdigest()
|
760 |
|
761 |
|
762 |
def FingerprintFiles(files): |
763 |
"""Compute fingerprints for a list of files.
|
764 |
|
765 |
@type files: list
|
766 |
@param files: the list of filename to fingerprint
|
767 |
@rtype: dict
|
768 |
@return: a dictionary filename: fingerprint, holding only
|
769 |
existing files
|
770 |
|
771 |
"""
|
772 |
ret = {} |
773 |
|
774 |
for filename in files: |
775 |
cksum = _FingerprintFile(filename) |
776 |
if cksum:
|
777 |
ret[filename] = cksum |
778 |
|
779 |
return ret
|
780 |
|
781 |
|
782 |
def ForceDictType(target, key_types, allowed_values=None): |
783 |
"""Force the values of a dict to have certain types.
|
784 |
|
785 |
@type target: dict
|
786 |
@param target: the dict to update
|
787 |
@type key_types: dict
|
788 |
@param key_types: dict mapping target dict keys to types
|
789 |
in constants.ENFORCEABLE_TYPES
|
790 |
@type allowed_values: list
|
791 |
@keyword allowed_values: list of specially allowed values
|
792 |
|
793 |
"""
|
794 |
if allowed_values is None: |
795 |
allowed_values = [] |
796 |
|
797 |
if not isinstance(target, dict): |
798 |
msg = "Expected dictionary, got '%s'" % target
|
799 |
raise errors.TypeEnforcementError(msg)
|
800 |
|
801 |
for key in target: |
802 |
if key not in key_types: |
803 |
msg = "Unknown key '%s'" % key
|
804 |
raise errors.TypeEnforcementError(msg)
|
805 |
|
806 |
if target[key] in allowed_values: |
807 |
continue
|
808 |
|
809 |
ktype = key_types[key] |
810 |
if ktype not in constants.ENFORCEABLE_TYPES: |
811 |
msg = "'%s' has non-enforceable type %s" % (key, ktype)
|
812 |
raise errors.ProgrammerError(msg)
|
813 |
|
814 |
if ktype == constants.VTYPE_STRING:
|
815 |
if not isinstance(target[key], basestring): |
816 |
if isinstance(target[key], bool) and not target[key]: |
817 |
target[key] = ''
|
818 |
else:
|
819 |
msg = "'%s' (value %s) is not a valid string" % (key, target[key])
|
820 |
raise errors.TypeEnforcementError(msg)
|
821 |
elif ktype == constants.VTYPE_BOOL:
|
822 |
if isinstance(target[key], basestring) and target[key]: |
823 |
if target[key].lower() == constants.VALUE_FALSE:
|
824 |
target[key] = False
|
825 |
elif target[key].lower() == constants.VALUE_TRUE:
|
826 |
target[key] = True
|
827 |
else:
|
828 |
msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
|
829 |
raise errors.TypeEnforcementError(msg)
|
830 |
elif target[key]:
|
831 |
target[key] = True
|
832 |
else:
|
833 |
target[key] = False
|
834 |
elif ktype == constants.VTYPE_SIZE:
|
835 |
try:
|
836 |
target[key] = ParseUnit(target[key]) |
837 |
except errors.UnitParseError, err:
|
838 |
msg = "'%s' (value %s) is not a valid size. error: %s" % \
|
839 |
(key, target[key], err) |
840 |
raise errors.TypeEnforcementError(msg)
|
841 |
elif ktype == constants.VTYPE_INT:
|
842 |
try:
|
843 |
target[key] = int(target[key])
|
844 |
except (ValueError, TypeError): |
845 |
msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
|
846 |
raise errors.TypeEnforcementError(msg)
|
847 |
|
848 |
|
849 |
def _GetProcStatusPath(pid): |
850 |
"""Returns the path for a PID's proc status file.
|
851 |
|
852 |
@type pid: int
|
853 |
@param pid: Process ID
|
854 |
@rtype: string
|
855 |
|
856 |
"""
|
857 |
return "/proc/%d/status" % pid |
858 |
|
859 |
|
860 |
def IsProcessAlive(pid): |
861 |
"""Check if a given pid exists on the system.
|
862 |
|
863 |
@note: zombie status is not handled, so zombie processes
|
864 |
will be returned as alive
|
865 |
@type pid: int
|
866 |
@param pid: the process ID to check
|
867 |
@rtype: boolean
|
868 |
@return: True if the process exists
|
869 |
|
870 |
"""
|
871 |
def _TryStat(name): |
872 |
try:
|
873 |
os.stat(name) |
874 |
return True |
875 |
except EnvironmentError, err: |
876 |
if err.errno in (errno.ENOENT, errno.ENOTDIR): |
877 |
return False |
878 |
elif err.errno == errno.EINVAL:
|
879 |
raise RetryAgain(err)
|
880 |
raise
|
881 |
|
882 |
assert isinstance(pid, int), "pid must be an integer" |
883 |
if pid <= 0: |
884 |
return False |
885 |
|
886 |
# /proc in a multiprocessor environment can have strange behaviors.
|
887 |
# Retry the os.stat a few times until we get a good result.
|
888 |
try:
|
889 |
return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5, |
890 |
args=[_GetProcStatusPath(pid)]) |
891 |
except RetryTimeout, err:
|
892 |
err.RaiseInner() |
893 |
|
894 |
|
895 |
def _ParseSigsetT(sigset): |
896 |
"""Parse a rendered sigset_t value.
|
897 |
|
898 |
This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
|
899 |
function.
|
900 |
|
901 |
@type sigset: string
|
902 |
@param sigset: Rendered signal set from /proc/$pid/status
|
903 |
@rtype: set
|
904 |
@return: Set of all enabled signal numbers
|
905 |
|
906 |
"""
|
907 |
result = set()
|
908 |
|
909 |
signum = 0
|
910 |
for ch in reversed(sigset): |
911 |
chv = int(ch, 16) |
912 |
|
913 |
# The following could be done in a loop, but it's easier to read and
|
914 |
# understand in the unrolled form
|
915 |
if chv & 1: |
916 |
result.add(signum + 1)
|
917 |
if chv & 2: |
918 |
result.add(signum + 2)
|
919 |
if chv & 4: |
920 |
result.add(signum + 3)
|
921 |
if chv & 8: |
922 |
result.add(signum + 4)
|
923 |
|
924 |
signum += 4
|
925 |
|
926 |
return result
|
927 |
|
928 |
|
929 |
def _GetProcStatusField(pstatus, field): |
930 |
"""Retrieves a field from the contents of a proc status file.
|
931 |
|
932 |
@type pstatus: string
|
933 |
@param pstatus: Contents of /proc/$pid/status
|
934 |
@type field: string
|
935 |
@param field: Name of field whose value should be returned
|
936 |
@rtype: string
|
937 |
|
938 |
"""
|
939 |
for line in pstatus.splitlines(): |
940 |
parts = line.split(":", 1) |
941 |
|
942 |
if len(parts) < 2 or parts[0] != field: |
943 |
continue
|
944 |
|
945 |
return parts[1].strip() |
946 |
|
947 |
return None |
948 |
|
949 |
|
950 |
def IsProcessHandlingSignal(pid, signum, status_path=None): |
951 |
"""Checks whether a process is handling a signal.
|
952 |
|
953 |
@type pid: int
|
954 |
@param pid: Process ID
|
955 |
@type signum: int
|
956 |
@param signum: Signal number
|
957 |
@rtype: bool
|
958 |
|
959 |
"""
|
960 |
if status_path is None: |
961 |
status_path = _GetProcStatusPath(pid) |
962 |
|
963 |
try:
|
964 |
proc_status = ReadFile(status_path) |
965 |
except EnvironmentError, err: |
966 |
# In at least one case, reading /proc/$pid/status failed with ESRCH.
|
967 |
if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH): |
968 |
return False |
969 |
raise
|
970 |
|
971 |
sigcgt = _GetProcStatusField(proc_status, "SigCgt")
|
972 |
if sigcgt is None: |
973 |
raise RuntimeError("%s is missing 'SigCgt' field" % status_path) |
974 |
|
975 |
# Now check whether signal is handled
|
976 |
return signum in _ParseSigsetT(sigcgt) |
977 |
|
978 |
|
979 |
def ReadPidFile(pidfile): |
980 |
"""Read a pid from a file.
|
981 |
|
982 |
@type pidfile: string
|
983 |
@param pidfile: path to the file containing the pid
|
984 |
@rtype: int
|
985 |
@return: The process id, if the file exists and contains a valid PID,
|
986 |
otherwise 0
|
987 |
|
988 |
"""
|
989 |
try:
|
990 |
raw_data = ReadOneLineFile(pidfile) |
991 |
except EnvironmentError, err: |
992 |
if err.errno != errno.ENOENT:
|
993 |
logging.exception("Can't read pid file")
|
994 |
return 0 |
995 |
|
996 |
try:
|
997 |
pid = int(raw_data)
|
998 |
except (TypeError, ValueError), err: |
999 |
logging.info("Can't parse pid file contents", exc_info=True) |
1000 |
return 0 |
1001 |
|
1002 |
return pid
|
1003 |
|
1004 |
|
1005 |
def ReadLockedPidFile(path): |
1006 |
"""Reads a locked PID file.
|
1007 |
|
1008 |
This can be used together with L{StartDaemon}.
|
1009 |
|
1010 |
@type path: string
|
1011 |
@param path: Path to PID file
|
1012 |
@return: PID as integer or, if file was unlocked or couldn't be opened, None
|
1013 |
|
1014 |
"""
|
1015 |
try:
|
1016 |
fd = os.open(path, os.O_RDONLY) |
1017 |
except EnvironmentError, err: |
1018 |
if err.errno == errno.ENOENT:
|
1019 |
# PID file doesn't exist
|
1020 |
return None |
1021 |
raise
|
1022 |
|
1023 |
try:
|
1024 |
try:
|
1025 |
# Try to acquire lock
|
1026 |
LockFile(fd) |
1027 |
except errors.LockError:
|
1028 |
# Couldn't lock, daemon is running
|
1029 |
return int(os.read(fd, 100)) |
1030 |
finally:
|
1031 |
os.close(fd) |
1032 |
|
1033 |
return None |
1034 |
|
1035 |
|
1036 |
def MatchNameComponent(key, name_list, case_sensitive=True): |
1037 |
"""Try to match a name against a list.
|
1038 |
|
1039 |
This function will try to match a name like test1 against a list
|
1040 |
like C{['test1.example.com', 'test2.example.com', ...]}. Against
|
1041 |
this list, I{'test1'} as well as I{'test1.example'} will match, but
|
1042 |
not I{'test1.ex'}. A multiple match will be considered as no match
|
1043 |
at all (e.g. I{'test1'} against C{['test1.example.com',
|
1044 |
'test1.example.org']}), except when the key fully matches an entry
|
1045 |
(e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
|
1046 |
|
1047 |
@type key: str
|
1048 |
@param key: the name to be searched
|
1049 |
@type name_list: list
|
1050 |
@param name_list: the list of strings against which to search the key
|
1051 |
@type case_sensitive: boolean
|
1052 |
@param case_sensitive: whether to provide a case-sensitive match
|
1053 |
|
1054 |
@rtype: None or str
|
1055 |
@return: None if there is no match I{or} if there are multiple matches,
|
1056 |
otherwise the element from the list which matches
|
1057 |
|
1058 |
"""
|
1059 |
if key in name_list: |
1060 |
return key
|
1061 |
|
1062 |
re_flags = 0
|
1063 |
if not case_sensitive: |
1064 |
re_flags |= re.IGNORECASE |
1065 |
key = key.upper() |
1066 |
mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
|
1067 |
names_filtered = [] |
1068 |
string_matches = [] |
1069 |
for name in name_list: |
1070 |
if mo.match(name) is not None: |
1071 |
names_filtered.append(name) |
1072 |
if not case_sensitive and key == name.upper(): |
1073 |
string_matches.append(name) |
1074 |
|
1075 |
if len(string_matches) == 1: |
1076 |
return string_matches[0] |
1077 |
if len(names_filtered) == 1: |
1078 |
return names_filtered[0] |
1079 |
return None |
1080 |
|
1081 |
|
1082 |
class HostInfo: |
1083 |
"""Class implementing resolver and hostname functionality
|
1084 |
|
1085 |
"""
|
1086 |
_VALID_NAME_RE = re.compile("^[a-z0-9._-]{1,255}$")
|
1087 |
|
1088 |
def __init__(self, name=None): |
1089 |
"""Initialize the host name object.
|
1090 |
|
1091 |
If the name argument is not passed, it will use this system's
|
1092 |
name.
|
1093 |
|
1094 |
"""
|
1095 |
if name is None: |
1096 |
name = self.SysName()
|
1097 |
|
1098 |
self.query = name
|
1099 |
self.name, self.aliases, self.ipaddrs = self.LookupHostname(name) |
1100 |
self.ip = self.ipaddrs[0] |
1101 |
|
1102 |
def ShortName(self): |
1103 |
"""Returns the hostname without domain.
|
1104 |
|
1105 |
"""
|
1106 |
return self.name.split('.')[0] |
1107 |
|
1108 |
@staticmethod
|
1109 |
def SysName(): |
1110 |
"""Return the current system's name.
|
1111 |
|
1112 |
This is simply a wrapper over C{socket.gethostname()}.
|
1113 |
|
1114 |
"""
|
1115 |
return socket.gethostname()
|
1116 |
|
1117 |
@staticmethod
|
1118 |
def LookupHostname(hostname): |
1119 |
"""Look up hostname
|
1120 |
|
1121 |
@type hostname: str
|
1122 |
@param hostname: hostname to look up
|
1123 |
|
1124 |
@rtype: tuple
|
1125 |
@return: a tuple (name, aliases, ipaddrs) as returned by
|
1126 |
C{socket.gethostbyname_ex}
|
1127 |
@raise errors.ResolverError: in case of errors in resolving
|
1128 |
|
1129 |
"""
|
1130 |
try:
|
1131 |
result = socket.gethostbyname_ex(hostname) |
1132 |
except (socket.gaierror, socket.herror, socket.error), err:
|
1133 |
# hostname not found in DNS, or other socket exception in the
|
1134 |
# (code, description format)
|
1135 |
raise errors.ResolverError(hostname, err.args[0], err.args[1]) |
1136 |
|
1137 |
return result
|
1138 |
|
1139 |
@classmethod
|
1140 |
def NormalizeName(cls, hostname): |
1141 |
"""Validate and normalize the given hostname.
|
1142 |
|
1143 |
@attention: the validation is a bit more relaxed than the standards
|
1144 |
require; most importantly, we allow underscores in names
|
1145 |
@raise errors.OpPrereqError: when the name is not valid
|
1146 |
|
1147 |
"""
|
1148 |
hostname = hostname.lower() |
1149 |
if (not cls._VALID_NAME_RE.match(hostname) or |
1150 |
# double-dots, meaning empty label
|
1151 |
".." in hostname or |
1152 |
# empty initial label
|
1153 |
hostname.startswith(".")):
|
1154 |
raise errors.OpPrereqError("Invalid hostname '%s'" % hostname, |
1155 |
errors.ECODE_INVAL) |
1156 |
if hostname.endswith("."): |
1157 |
hostname = hostname.rstrip(".")
|
1158 |
return hostname
|
1159 |
|
1160 |
|
1161 |
def ValidateServiceName(name): |
1162 |
"""Validate the given service name.
|
1163 |
|
1164 |
@type name: number or string
|
1165 |
@param name: Service name or port specification
|
1166 |
|
1167 |
"""
|
1168 |
try:
|
1169 |
numport = int(name)
|
1170 |
except (ValueError, TypeError): |
1171 |
# Non-numeric service name
|
1172 |
valid = _VALID_SERVICE_NAME_RE.match(name) |
1173 |
else:
|
1174 |
# Numeric port (protocols other than TCP or UDP might need adjustments
|
1175 |
# here)
|
1176 |
valid = (numport >= 0 and numport < (1 << 16)) |
1177 |
|
1178 |
if not valid: |
1179 |
raise errors.OpPrereqError("Invalid service name '%s'" % name, |
1180 |
errors.ECODE_INVAL) |
1181 |
|
1182 |
return name
|
1183 |
|
1184 |
|
1185 |
def GetHostInfo(name=None): |
1186 |
"""Lookup host name and raise an OpPrereqError for failures"""
|
1187 |
|
1188 |
try:
|
1189 |
return HostInfo(name)
|
1190 |
except errors.ResolverError, err:
|
1191 |
raise errors.OpPrereqError("The given name (%s) does not resolve: %s" % |
1192 |
(err[0], err[2]), errors.ECODE_RESOLVER) |
1193 |
|
1194 |
|
1195 |
def ListVolumeGroups(): |
1196 |
"""List volume groups and their size
|
1197 |
|
1198 |
@rtype: dict
|
1199 |
@return:
|
1200 |
Dictionary with keys volume name and values
|
1201 |
the size of the volume
|
1202 |
|
1203 |
"""
|
1204 |
command = "vgs --noheadings --units m --nosuffix -o name,size"
|
1205 |
result = RunCmd(command) |
1206 |
retval = {} |
1207 |
if result.failed:
|
1208 |
return retval
|
1209 |
|
1210 |
for line in result.stdout.splitlines(): |
1211 |
try:
|
1212 |
name, size = line.split() |
1213 |
size = int(float(size)) |
1214 |
except (IndexError, ValueError), err: |
1215 |
logging.error("Invalid output from vgs (%s): %s", err, line)
|
1216 |
continue
|
1217 |
|
1218 |
retval[name] = size |
1219 |
|
1220 |
return retval
|
1221 |
|
1222 |
|
1223 |
def BridgeExists(bridge): |
1224 |
"""Check whether the given bridge exists in the system
|
1225 |
|
1226 |
@type bridge: str
|
1227 |
@param bridge: the bridge name to check
|
1228 |
@rtype: boolean
|
1229 |
@return: True if it does
|
1230 |
|
1231 |
"""
|
1232 |
return os.path.isdir("/sys/class/net/%s/bridge" % bridge) |
1233 |
|
1234 |
|
1235 |
def NiceSort(name_list): |
1236 |
"""Sort a list of strings based on digit and non-digit groupings.
|
1237 |
|
1238 |
Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
|
1239 |
will sort the list in the logical order C{['a1', 'a2', 'a10',
|
1240 |
'a11']}.
|
1241 |
|
1242 |
The sort algorithm breaks each name in groups of either only-digits
|
1243 |
or no-digits. Only the first eight such groups are considered, and
|
1244 |
after that we just use what's left of the string.
|
1245 |
|
1246 |
@type name_list: list
|
1247 |
@param name_list: the names to be sorted
|
1248 |
@rtype: list
|
1249 |
@return: a copy of the name list sorted with our algorithm
|
1250 |
|
1251 |
"""
|
1252 |
_SORTER_BASE = "(\D+|\d+)"
|
1253 |
_SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
|
1254 |
_SORTER_BASE, _SORTER_BASE, |
1255 |
_SORTER_BASE, _SORTER_BASE, |
1256 |
_SORTER_BASE, _SORTER_BASE) |
1257 |
_SORTER_RE = re.compile(_SORTER_FULL) |
1258 |
_SORTER_NODIGIT = re.compile("^\D*$")
|
1259 |
def _TryInt(val): |
1260 |
"""Attempts to convert a variable to integer."""
|
1261 |
if val is None or _SORTER_NODIGIT.match(val): |
1262 |
return val
|
1263 |
rval = int(val)
|
1264 |
return rval
|
1265 |
|
1266 |
to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name) |
1267 |
for name in name_list] |
1268 |
to_sort.sort() |
1269 |
return [tup[1] for tup in to_sort] |
1270 |
|
1271 |
|
1272 |
def TryConvert(fn, val): |
1273 |
"""Try to convert a value ignoring errors.
|
1274 |
|
1275 |
This function tries to apply function I{fn} to I{val}. If no
|
1276 |
C{ValueError} or C{TypeError} exceptions are raised, it will return
|
1277 |
the result, else it will return the original value. Any other
|
1278 |
exceptions are propagated to the caller.
|
1279 |
|
1280 |
@type fn: callable
|
1281 |
@param fn: function to apply to the value
|
1282 |
@param val: the value to be converted
|
1283 |
@return: The converted value if the conversion was successful,
|
1284 |
otherwise the original value.
|
1285 |
|
1286 |
"""
|
1287 |
try:
|
1288 |
nv = fn(val) |
1289 |
except (ValueError, TypeError): |
1290 |
nv = val |
1291 |
return nv
|
1292 |
|
1293 |
|
1294 |
def IsValidIP(ip): |
1295 |
"""Verifies the syntax of an IPv4 address.
|
1296 |
|
1297 |
This function checks if the IPv4 address passes is valid or not based
|
1298 |
on syntax (not IP range, class calculations, etc.).
|
1299 |
|
1300 |
@type ip: str
|
1301 |
@param ip: the address to be checked
|
1302 |
@rtype: a regular expression match object
|
1303 |
@return: a regular expression match object, or None if the
|
1304 |
address is not valid
|
1305 |
|
1306 |
"""
|
1307 |
unit = "(0|[1-9]\d{0,2})"
|
1308 |
#TODO: convert and return only boolean
|
1309 |
return re.match("^%s\.%s\.%s\.%s$" % (unit, unit, unit, unit), ip) |
1310 |
|
1311 |
|
1312 |
def IsValidShellParam(word): |
1313 |
"""Verifies is the given word is safe from the shell's p.o.v.
|
1314 |
|
1315 |
This means that we can pass this to a command via the shell and be
|
1316 |
sure that it doesn't alter the command line and is passed as such to
|
1317 |
the actual command.
|
1318 |
|
1319 |
Note that we are overly restrictive here, in order to be on the safe
|
1320 |
side.
|
1321 |
|
1322 |
@type word: str
|
1323 |
@param word: the word to check
|
1324 |
@rtype: boolean
|
1325 |
@return: True if the word is 'safe'
|
1326 |
|
1327 |
"""
|
1328 |
return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word)) |
1329 |
|
1330 |
|
1331 |
def BuildShellCmd(template, *args): |
1332 |
"""Build a safe shell command line from the given arguments.
|
1333 |
|
1334 |
This function will check all arguments in the args list so that they
|
1335 |
are valid shell parameters (i.e. they don't contain shell
|
1336 |
metacharacters). If everything is ok, it will return the result of
|
1337 |
template % args.
|
1338 |
|
1339 |
@type template: str
|
1340 |
@param template: the string holding the template for the
|
1341 |
string formatting
|
1342 |
@rtype: str
|
1343 |
@return: the expanded command line
|
1344 |
|
1345 |
"""
|
1346 |
for word in args: |
1347 |
if not IsValidShellParam(word): |
1348 |
raise errors.ProgrammerError("Shell argument '%s' contains" |
1349 |
" invalid characters" % word)
|
1350 |
return template % args
|
1351 |
|
1352 |
|
1353 |
def FormatUnit(value, units): |
1354 |
"""Formats an incoming number of MiB with the appropriate unit.
|
1355 |
|
1356 |
@type value: int
|
1357 |
@param value: integer representing the value in MiB (1048576)
|
1358 |
@type units: char
|
1359 |
@param units: the type of formatting we should do:
|
1360 |
- 'h' for automatic scaling
|
1361 |
- 'm' for MiBs
|
1362 |
- 'g' for GiBs
|
1363 |
- 't' for TiBs
|
1364 |
@rtype: str
|
1365 |
@return: the formatted value (with suffix)
|
1366 |
|
1367 |
"""
|
1368 |
if units not in ('m', 'g', 't', 'h'): |
1369 |
raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units)) |
1370 |
|
1371 |
suffix = ''
|
1372 |
|
1373 |
if units == 'm' or (units == 'h' and value < 1024): |
1374 |
if units == 'h': |
1375 |
suffix = 'M'
|
1376 |
return "%d%s" % (round(value, 0), suffix) |
1377 |
|
1378 |
elif units == 'g' or (units == 'h' and value < (1024 * 1024)): |
1379 |
if units == 'h': |
1380 |
suffix = 'G'
|
1381 |
return "%0.1f%s" % (round(float(value) / 1024, 1), suffix) |
1382 |
|
1383 |
else:
|
1384 |
if units == 'h': |
1385 |
suffix = 'T'
|
1386 |
return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix) |
1387 |
|
1388 |
|
1389 |
def ParseUnit(input_string): |
1390 |
"""Tries to extract number and scale from the given string.
|
1391 |
|
1392 |
Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
|
1393 |
[UNIT]}. If no unit is specified, it defaults to MiB. Return value
|
1394 |
is always an int in MiB.
|
1395 |
|
1396 |
"""
|
1397 |
m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string)) |
1398 |
if not m: |
1399 |
raise errors.UnitParseError("Invalid format") |
1400 |
|
1401 |
value = float(m.groups()[0]) |
1402 |
|
1403 |
unit = m.groups()[1]
|
1404 |
if unit:
|
1405 |
lcunit = unit.lower() |
1406 |
else:
|
1407 |
lcunit = 'm'
|
1408 |
|
1409 |
if lcunit in ('m', 'mb', 'mib'): |
1410 |
# Value already in MiB
|
1411 |
pass
|
1412 |
|
1413 |
elif lcunit in ('g', 'gb', 'gib'): |
1414 |
value *= 1024
|
1415 |
|
1416 |
elif lcunit in ('t', 'tb', 'tib'): |
1417 |
value *= 1024 * 1024 |
1418 |
|
1419 |
else:
|
1420 |
raise errors.UnitParseError("Unknown unit: %s" % unit) |
1421 |
|
1422 |
# Make sure we round up
|
1423 |
if int(value) < value: |
1424 |
value += 1
|
1425 |
|
1426 |
# Round up to the next multiple of 4
|
1427 |
value = int(value)
|
1428 |
if value % 4: |
1429 |
value += 4 - value % 4 |
1430 |
|
1431 |
return value
|
1432 |
|
1433 |
|
1434 |
def AddAuthorizedKey(file_name, key): |
1435 |
"""Adds an SSH public key to an authorized_keys file.
|
1436 |
|
1437 |
@type file_name: str
|
1438 |
@param file_name: path to authorized_keys file
|
1439 |
@type key: str
|
1440 |
@param key: string containing key
|
1441 |
|
1442 |
"""
|
1443 |
key_fields = key.split() |
1444 |
|
1445 |
f = open(file_name, 'a+') |
1446 |
try:
|
1447 |
nl = True
|
1448 |
for line in f: |
1449 |
# Ignore whitespace changes
|
1450 |
if line.split() == key_fields:
|
1451 |
break
|
1452 |
nl = line.endswith('\n')
|
1453 |
else:
|
1454 |
if not nl: |
1455 |
f.write("\n")
|
1456 |
f.write(key.rstrip('\r\n'))
|
1457 |
f.write("\n")
|
1458 |
f.flush() |
1459 |
finally:
|
1460 |
f.close() |
1461 |
|
1462 |
|
1463 |
def RemoveAuthorizedKey(file_name, key): |
1464 |
"""Removes an SSH public key from an authorized_keys file.
|
1465 |
|
1466 |
@type file_name: str
|
1467 |
@param file_name: path to authorized_keys file
|
1468 |
@type key: str
|
1469 |
@param key: string containing key
|
1470 |
|
1471 |
"""
|
1472 |
key_fields = key.split() |
1473 |
|
1474 |
fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name)) |
1475 |
try:
|
1476 |
out = os.fdopen(fd, 'w')
|
1477 |
try:
|
1478 |
f = open(file_name, 'r') |
1479 |
try:
|
1480 |
for line in f: |
1481 |
# Ignore whitespace changes while comparing lines
|
1482 |
if line.split() != key_fields:
|
1483 |
out.write(line) |
1484 |
|
1485 |
out.flush() |
1486 |
os.rename(tmpname, file_name) |
1487 |
finally:
|
1488 |
f.close() |
1489 |
finally:
|
1490 |
out.close() |
1491 |
except:
|
1492 |
RemoveFile(tmpname) |
1493 |
raise
|
1494 |
|
1495 |
|
1496 |
def SetEtcHostsEntry(file_name, ip, hostname, aliases): |
1497 |
"""Sets the name of an IP address and hostname in /etc/hosts.
|
1498 |
|
1499 |
@type file_name: str
|
1500 |
@param file_name: path to the file to modify (usually C{/etc/hosts})
|
1501 |
@type ip: str
|
1502 |
@param ip: the IP address
|
1503 |
@type hostname: str
|
1504 |
@param hostname: the hostname to be added
|
1505 |
@type aliases: list
|
1506 |
@param aliases: the list of aliases to add for the hostname
|
1507 |
|
1508 |
"""
|
1509 |
# FIXME: use WriteFile + fn rather than duplicating its efforts
|
1510 |
# Ensure aliases are unique
|
1511 |
aliases = UniqueSequence([hostname] + aliases)[1:]
|
1512 |
|
1513 |
fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name)) |
1514 |
try:
|
1515 |
out = os.fdopen(fd, 'w')
|
1516 |
try:
|
1517 |
f = open(file_name, 'r') |
1518 |
try:
|
1519 |
for line in f: |
1520 |
fields = line.split() |
1521 |
if fields and not fields[0].startswith('#') and ip == fields[0]: |
1522 |
continue
|
1523 |
out.write(line) |
1524 |
|
1525 |
out.write("%s\t%s" % (ip, hostname))
|
1526 |
if aliases:
|
1527 |
out.write(" %s" % ' '.join(aliases)) |
1528 |
out.write('\n')
|
1529 |
|
1530 |
out.flush() |
1531 |
os.fsync(out) |
1532 |
os.chmod(tmpname, 0644)
|
1533 |
os.rename(tmpname, file_name) |
1534 |
finally:
|
1535 |
f.close() |
1536 |
finally:
|
1537 |
out.close() |
1538 |
except:
|
1539 |
RemoveFile(tmpname) |
1540 |
raise
|
1541 |
|
1542 |
|
1543 |
def AddHostToEtcHosts(hostname): |
1544 |
"""Wrapper around SetEtcHostsEntry.
|
1545 |
|
1546 |
@type hostname: str
|
1547 |
@param hostname: a hostname that will be resolved and added to
|
1548 |
L{constants.ETC_HOSTS}
|
1549 |
|
1550 |
"""
|
1551 |
hi = HostInfo(name=hostname) |
1552 |
SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()]) |
1553 |
|
1554 |
|
1555 |
def RemoveEtcHostsEntry(file_name, hostname): |
1556 |
"""Removes a hostname from /etc/hosts.
|
1557 |
|
1558 |
IP addresses without names are removed from the file.
|
1559 |
|
1560 |
@type file_name: str
|
1561 |
@param file_name: path to the file to modify (usually C{/etc/hosts})
|
1562 |
@type hostname: str
|
1563 |
@param hostname: the hostname to be removed
|
1564 |
|
1565 |
"""
|
1566 |
# FIXME: use WriteFile + fn rather than duplicating its efforts
|
1567 |
fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name)) |
1568 |
try:
|
1569 |
out = os.fdopen(fd, 'w')
|
1570 |
try:
|
1571 |
f = open(file_name, 'r') |
1572 |
try:
|
1573 |
for line in f: |
1574 |
fields = line.split() |
1575 |
if len(fields) > 1 and not fields[0].startswith('#'): |
1576 |
names = fields[1:]
|
1577 |
if hostname in names: |
1578 |
while hostname in names: |
1579 |
names.remove(hostname) |
1580 |
if names:
|
1581 |
out.write("%s %s\n" % (fields[0], ' '.join(names))) |
1582 |
continue
|
1583 |
|
1584 |
out.write(line) |
1585 |
|
1586 |
out.flush() |
1587 |
os.fsync(out) |
1588 |
os.chmod(tmpname, 0644)
|
1589 |
os.rename(tmpname, file_name) |
1590 |
finally:
|
1591 |
f.close() |
1592 |
finally:
|
1593 |
out.close() |
1594 |
except:
|
1595 |
RemoveFile(tmpname) |
1596 |
raise
|
1597 |
|
1598 |
|
1599 |
def RemoveHostFromEtcHosts(hostname): |
1600 |
"""Wrapper around RemoveEtcHostsEntry.
|
1601 |
|
1602 |
@type hostname: str
|
1603 |
@param hostname: hostname that will be resolved and its
|
1604 |
full and shot name will be removed from
|
1605 |
L{constants.ETC_HOSTS}
|
1606 |
|
1607 |
"""
|
1608 |
hi = HostInfo(name=hostname) |
1609 |
RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name) |
1610 |
RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName()) |
1611 |
|
1612 |
|
1613 |
def TimestampForFilename(): |
1614 |
"""Returns the current time formatted for filenames.
|
1615 |
|
1616 |
The format doesn't contain colons as some shells and applications them as
|
1617 |
separators.
|
1618 |
|
1619 |
"""
|
1620 |
return time.strftime("%Y-%m-%d_%H_%M_%S") |
1621 |
|
1622 |
|
1623 |
def CreateBackup(file_name): |
1624 |
"""Creates a backup of a file.
|
1625 |
|
1626 |
@type file_name: str
|
1627 |
@param file_name: file to be backed up
|
1628 |
@rtype: str
|
1629 |
@return: the path to the newly created backup
|
1630 |
@raise errors.ProgrammerError: for invalid file names
|
1631 |
|
1632 |
"""
|
1633 |
if not os.path.isfile(file_name): |
1634 |
raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" % |
1635 |
file_name) |
1636 |
|
1637 |
prefix = ("%s.backup-%s." %
|
1638 |
(os.path.basename(file_name), TimestampForFilename())) |
1639 |
dir_name = os.path.dirname(file_name) |
1640 |
|
1641 |
fsrc = open(file_name, 'rb') |
1642 |
try:
|
1643 |
(fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name) |
1644 |
fdst = os.fdopen(fd, 'wb')
|
1645 |
try:
|
1646 |
logging.debug("Backing up %s at %s", file_name, backup_name)
|
1647 |
shutil.copyfileobj(fsrc, fdst) |
1648 |
finally:
|
1649 |
fdst.close() |
1650 |
finally:
|
1651 |
fsrc.close() |
1652 |
|
1653 |
return backup_name
|
1654 |
|
1655 |
|
1656 |
def ShellQuote(value): |
1657 |
"""Quotes shell argument according to POSIX.
|
1658 |
|
1659 |
@type value: str
|
1660 |
@param value: the argument to be quoted
|
1661 |
@rtype: str
|
1662 |
@return: the quoted value
|
1663 |
|
1664 |
"""
|
1665 |
if _re_shell_unquoted.match(value):
|
1666 |
return value
|
1667 |
else:
|
1668 |
return "'%s'" % value.replace("'", "'\\''") |
1669 |
|
1670 |
|
1671 |
def ShellQuoteArgs(args): |
1672 |
"""Quotes a list of shell arguments.
|
1673 |
|
1674 |
@type args: list
|
1675 |
@param args: list of arguments to be quoted
|
1676 |
@rtype: str
|
1677 |
@return: the quoted arguments concatenated with spaces
|
1678 |
|
1679 |
"""
|
1680 |
return ' '.join([ShellQuote(i) for i in args]) |
1681 |
|
1682 |
|
1683 |
def TcpPing(target, port, timeout=10, live_port_needed=False, source=None): |
1684 |
"""Simple ping implementation using TCP connect(2).
|
1685 |
|
1686 |
Check if the given IP is reachable by doing attempting a TCP connect
|
1687 |
to it.
|
1688 |
|
1689 |
@type target: str
|
1690 |
@param target: the IP or hostname to ping
|
1691 |
@type port: int
|
1692 |
@param port: the port to connect to
|
1693 |
@type timeout: int
|
1694 |
@param timeout: the timeout on the connection attempt
|
1695 |
@type live_port_needed: boolean
|
1696 |
@param live_port_needed: whether a closed port will cause the
|
1697 |
function to return failure, as if there was a timeout
|
1698 |
@type source: str or None
|
1699 |
@param source: if specified, will cause the connect to be made
|
1700 |
from this specific source address; failures to bind other
|
1701 |
than C{EADDRNOTAVAIL} will be ignored
|
1702 |
|
1703 |
"""
|
1704 |
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
1705 |
|
1706 |
success = False
|
1707 |
|
1708 |
if source is not None: |
1709 |
try:
|
1710 |
sock.bind((source, 0))
|
1711 |
except socket.error, (errcode, _):
|
1712 |
if errcode == errno.EADDRNOTAVAIL:
|
1713 |
success = False
|
1714 |
|
1715 |
sock.settimeout(timeout) |
1716 |
|
1717 |
try:
|
1718 |
sock.connect((target, port)) |
1719 |
sock.close() |
1720 |
success = True
|
1721 |
except socket.timeout:
|
1722 |
success = False
|
1723 |
except socket.error, (errcode, _):
|
1724 |
success = (not live_port_needed) and (errcode == errno.ECONNREFUSED) |
1725 |
|
1726 |
return success
|
1727 |
|
1728 |
|
1729 |
def OwnIpAddress(address): |
1730 |
"""Check if the current host has the the given IP address.
|
1731 |
|
1732 |
Currently this is done by TCP-pinging the address from the loopback
|
1733 |
address.
|
1734 |
|
1735 |
@type address: string
|
1736 |
@param address: the address to check
|
1737 |
@rtype: bool
|
1738 |
@return: True if we own the address
|
1739 |
|
1740 |
"""
|
1741 |
return TcpPing(address, constants.DEFAULT_NODED_PORT,
|
1742 |
source=constants.LOCALHOST_IP_ADDRESS) |
1743 |
|
1744 |
|
1745 |
def ListVisibleFiles(path): |
1746 |
"""Returns a list of visible files in a directory.
|
1747 |
|
1748 |
@type path: str
|
1749 |
@param path: the directory to enumerate
|
1750 |
@rtype: list
|
1751 |
@return: the list of all files not starting with a dot
|
1752 |
@raise ProgrammerError: if L{path} is not an absolue and normalized path
|
1753 |
|
1754 |
"""
|
1755 |
if not IsNormAbsPath(path): |
1756 |
raise errors.ProgrammerError("Path passed to ListVisibleFiles is not" |
1757 |
" absolute/normalized: '%s'" % path)
|
1758 |
files = [i for i in os.listdir(path) if not i.startswith(".")] |
1759 |
return files
|
1760 |
|
1761 |
|
1762 |
def GetHomeDir(user, default=None): |
1763 |
"""Try to get the homedir of the given user.
|
1764 |
|
1765 |
The user can be passed either as a string (denoting the name) or as
|
1766 |
an integer (denoting the user id). If the user is not found, the
|
1767 |
'default' argument is returned, which defaults to None.
|
1768 |
|
1769 |
"""
|
1770 |
try:
|
1771 |
if isinstance(user, basestring): |
1772 |
result = pwd.getpwnam(user) |
1773 |
elif isinstance(user, (int, long)): |
1774 |
result = pwd.getpwuid(user) |
1775 |
else:
|
1776 |
raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" % |
1777 |
type(user))
|
1778 |
except KeyError: |
1779 |
return default
|
1780 |
return result.pw_dir
|
1781 |
|
1782 |
|
1783 |
def NewUUID(): |
1784 |
"""Returns a random UUID.
|
1785 |
|
1786 |
@note: This is a Linux-specific method as it uses the /proc
|
1787 |
filesystem.
|
1788 |
@rtype: str
|
1789 |
|
1790 |
"""
|
1791 |
return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n") |
1792 |
|
1793 |
|
1794 |
def GenerateSecret(numbytes=20): |
1795 |
"""Generates a random secret.
|
1796 |
|
1797 |
This will generate a pseudo-random secret returning an hex string
|
1798 |
(so that it can be used where an ASCII string is needed).
|
1799 |
|
1800 |
@param numbytes: the number of bytes which will be represented by the returned
|
1801 |
string (defaulting to 20, the length of a SHA1 hash)
|
1802 |
@rtype: str
|
1803 |
@return: an hex representation of the pseudo-random sequence
|
1804 |
|
1805 |
"""
|
1806 |
return os.urandom(numbytes).encode('hex') |
1807 |
|
1808 |
|
1809 |
def EnsureDirs(dirs): |
1810 |
"""Make required directories, if they don't exist.
|
1811 |
|
1812 |
@param dirs: list of tuples (dir_name, dir_mode)
|
1813 |
@type dirs: list of (string, integer)
|
1814 |
|
1815 |
"""
|
1816 |
for dir_name, dir_mode in dirs: |
1817 |
try:
|
1818 |
os.mkdir(dir_name, dir_mode) |
1819 |
except EnvironmentError, err: |
1820 |
if err.errno != errno.EEXIST:
|
1821 |
raise errors.GenericError("Cannot create needed directory" |
1822 |
" '%s': %s" % (dir_name, err))
|
1823 |
try:
|
1824 |
os.chmod(dir_name, dir_mode) |
1825 |
except EnvironmentError, err: |
1826 |
raise errors.GenericError("Cannot change directory permissions on" |
1827 |
" '%s': %s" % (dir_name, err))
|
1828 |
if not os.path.isdir(dir_name): |
1829 |
raise errors.GenericError("%s is not a directory" % dir_name) |
1830 |
|
1831 |
|
1832 |
def ReadFile(file_name, size=-1): |
1833 |
"""Reads a file.
|
1834 |
|
1835 |
@type size: int
|
1836 |
@param size: Read at most size bytes (if negative, entire file)
|
1837 |
@rtype: str
|
1838 |
@return: the (possibly partial) content of the file
|
1839 |
|
1840 |
"""
|
1841 |
f = open(file_name, "r") |
1842 |
try:
|
1843 |
return f.read(size)
|
1844 |
finally:
|
1845 |
f.close() |
1846 |
|
1847 |
|
1848 |
def WriteFile(file_name, fn=None, data=None, |
1849 |
mode=None, uid=-1, gid=-1, |
1850 |
atime=None, mtime=None, close=True, |
1851 |
dry_run=False, backup=False, |
1852 |
prewrite=None, postwrite=None): |
1853 |
"""(Over)write a file atomically.
|
1854 |
|
1855 |
The file_name and either fn (a function taking one argument, the
|
1856 |
file descriptor, and which should write the data to it) or data (the
|
1857 |
contents of the file) must be passed. The other arguments are
|
1858 |
optional and allow setting the file mode, owner and group, and the
|
1859 |
mtime/atime of the file.
|
1860 |
|
1861 |
If the function doesn't raise an exception, it has succeeded and the
|
1862 |
target file has the new contents. If the function has raised an
|
1863 |
exception, an existing target file should be unmodified and the
|
1864 |
temporary file should be removed.
|
1865 |
|
1866 |
@type file_name: str
|
1867 |
@param file_name: the target filename
|
1868 |
@type fn: callable
|
1869 |
@param fn: content writing function, called with
|
1870 |
file descriptor as parameter
|
1871 |
@type data: str
|
1872 |
@param data: contents of the file
|
1873 |
@type mode: int
|
1874 |
@param mode: file mode
|
1875 |
@type uid: int
|
1876 |
@param uid: the owner of the file
|
1877 |
@type gid: int
|
1878 |
@param gid: the group of the file
|
1879 |
@type atime: int
|
1880 |
@param atime: a custom access time to be set on the file
|
1881 |
@type mtime: int
|
1882 |
@param mtime: a custom modification time to be set on the file
|
1883 |
@type close: boolean
|
1884 |
@param close: whether to close file after writing it
|
1885 |
@type prewrite: callable
|
1886 |
@param prewrite: function to be called before writing content
|
1887 |
@type postwrite: callable
|
1888 |
@param postwrite: function to be called after writing content
|
1889 |
|
1890 |
@rtype: None or int
|
1891 |
@return: None if the 'close' parameter evaluates to True,
|
1892 |
otherwise the file descriptor
|
1893 |
|
1894 |
@raise errors.ProgrammerError: if any of the arguments are not valid
|
1895 |
|
1896 |
"""
|
1897 |
if not os.path.isabs(file_name): |
1898 |
raise errors.ProgrammerError("Path passed to WriteFile is not" |
1899 |
" absolute: '%s'" % file_name)
|
1900 |
|
1901 |
if [fn, data].count(None) != 1: |
1902 |
raise errors.ProgrammerError("fn or data required") |
1903 |
|
1904 |
if [atime, mtime].count(None) == 1: |
1905 |
raise errors.ProgrammerError("Both atime and mtime must be either" |
1906 |
" set or None")
|
1907 |
|
1908 |
if backup and not dry_run and os.path.isfile(file_name): |
1909 |
CreateBackup(file_name) |
1910 |
|
1911 |
dir_name, base_name = os.path.split(file_name) |
1912 |
fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
|
1913 |
do_remove = True
|
1914 |
# here we need to make sure we remove the temp file, if any error
|
1915 |
# leaves it in place
|
1916 |
try:
|
1917 |
if uid != -1 or gid != -1: |
1918 |
os.chown(new_name, uid, gid) |
1919 |
if mode:
|
1920 |
os.chmod(new_name, mode) |
1921 |
if callable(prewrite): |
1922 |
prewrite(fd) |
1923 |
if data is not None: |
1924 |
os.write(fd, data) |
1925 |
else:
|
1926 |
fn(fd) |
1927 |
if callable(postwrite): |
1928 |
postwrite(fd) |
1929 |
os.fsync(fd) |
1930 |
if atime is not None and mtime is not None: |
1931 |
os.utime(new_name, (atime, mtime)) |
1932 |
if not dry_run: |
1933 |
os.rename(new_name, file_name) |
1934 |
do_remove = False
|
1935 |
finally:
|
1936 |
if close:
|
1937 |
os.close(fd) |
1938 |
result = None
|
1939 |
else:
|
1940 |
result = fd |
1941 |
if do_remove:
|
1942 |
RemoveFile(new_name) |
1943 |
|
1944 |
return result
|
1945 |
|
1946 |
|
1947 |
def ReadOneLineFile(file_name, strict=False): |
1948 |
"""Return the first non-empty line from a file.
|
1949 |
|
1950 |
@type strict: boolean
|
1951 |
@param strict: if True, abort if the file has more than one
|
1952 |
non-empty line
|
1953 |
|
1954 |
"""
|
1955 |
file_lines = ReadFile(file_name).splitlines() |
1956 |
full_lines = filter(bool, file_lines) |
1957 |
if not file_lines or not full_lines: |
1958 |
raise errors.GenericError("No data in one-liner file %s" % file_name) |
1959 |
elif strict and len(full_lines) > 1: |
1960 |
raise errors.GenericError("Too many lines in one-liner file %s" % |
1961 |
file_name) |
1962 |
return full_lines[0] |
1963 |
|
1964 |
|
1965 |
def FirstFree(seq, base=0): |
1966 |
"""Returns the first non-existing integer from seq.
|
1967 |
|
1968 |
The seq argument should be a sorted list of positive integers. The
|
1969 |
first time the index of an element is smaller than the element
|
1970 |
value, the index will be returned.
|
1971 |
|
1972 |
The base argument is used to start at a different offset,
|
1973 |
i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
|
1974 |
|
1975 |
Example: C{[0, 1, 3]} will return I{2}.
|
1976 |
|
1977 |
@type seq: sequence
|
1978 |
@param seq: the sequence to be analyzed.
|
1979 |
@type base: int
|
1980 |
@param base: use this value as the base index of the sequence
|
1981 |
@rtype: int
|
1982 |
@return: the first non-used index in the sequence
|
1983 |
|
1984 |
"""
|
1985 |
for idx, elem in enumerate(seq): |
1986 |
assert elem >= base, "Passed element is higher than base offset" |
1987 |
if elem > idx + base:
|
1988 |
# idx is not used
|
1989 |
return idx + base
|
1990 |
return None |
1991 |
|
1992 |
|
1993 |
def SingleWaitForFdCondition(fdobj, event, timeout): |
1994 |
"""Waits for a condition to occur on the socket.
|
1995 |
|
1996 |
Immediately returns at the first interruption.
|
1997 |
|
1998 |
@type fdobj: integer or object supporting a fileno() method
|
1999 |
@param fdobj: entity to wait for events on
|
2000 |
@type event: integer
|
2001 |
@param event: ORed condition (see select module)
|
2002 |
@type timeout: float or None
|
2003 |
@param timeout: Timeout in seconds
|
2004 |
@rtype: int or None
|
2005 |
@return: None for timeout, otherwise occured conditions
|
2006 |
|
2007 |
"""
|
2008 |
check = (event | select.POLLPRI | |
2009 |
select.POLLNVAL | select.POLLHUP | select.POLLERR) |
2010 |
|
2011 |
if timeout is not None: |
2012 |
# Poller object expects milliseconds
|
2013 |
timeout *= 1000
|
2014 |
|
2015 |
poller = select.poll() |
2016 |
poller.register(fdobj, event) |
2017 |
try:
|
2018 |
# TODO: If the main thread receives a signal and we have no timeout, we
|
2019 |
# could wait forever. This should check a global "quit" flag or something
|
2020 |
# every so often.
|
2021 |
io_events = poller.poll(timeout) |
2022 |
except select.error, err:
|
2023 |
if err[0] != errno.EINTR: |
2024 |
raise
|
2025 |
io_events = [] |
2026 |
if io_events and io_events[0][1] & check: |
2027 |
return io_events[0][1] |
2028 |
else:
|
2029 |
return None |
2030 |
|
2031 |
|
2032 |
class FdConditionWaiterHelper(object): |
2033 |
"""Retry helper for WaitForFdCondition.
|
2034 |
|
2035 |
This class contains the retried and wait functions that make sure
|
2036 |
WaitForFdCondition can continue waiting until the timeout is actually
|
2037 |
expired.
|
2038 |
|
2039 |
"""
|
2040 |
|
2041 |
def __init__(self, timeout): |
2042 |
self.timeout = timeout
|
2043 |
|
2044 |
def Poll(self, fdobj, event): |
2045 |
result = SingleWaitForFdCondition(fdobj, event, self.timeout)
|
2046 |
if result is None: |
2047 |
raise RetryAgain()
|
2048 |
else:
|
2049 |
return result
|
2050 |
|
2051 |
def UpdateTimeout(self, timeout): |
2052 |
self.timeout = timeout
|
2053 |
|
2054 |
|
2055 |
def WaitForFdCondition(fdobj, event, timeout): |
2056 |
"""Waits for a condition to occur on the socket.
|
2057 |
|
2058 |
Retries until the timeout is expired, even if interrupted.
|
2059 |
|
2060 |
@type fdobj: integer or object supporting a fileno() method
|
2061 |
@param fdobj: entity to wait for events on
|
2062 |
@type event: integer
|
2063 |
@param event: ORed condition (see select module)
|
2064 |
@type timeout: float or None
|
2065 |
@param timeout: Timeout in seconds
|
2066 |
@rtype: int or None
|
2067 |
@return: None for timeout, otherwise occured conditions
|
2068 |
|
2069 |
"""
|
2070 |
if timeout is not None: |
2071 |
retrywaiter = FdConditionWaiterHelper(timeout) |
2072 |
try:
|
2073 |
result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout, |
2074 |
args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout) |
2075 |
except RetryTimeout:
|
2076 |
result = None
|
2077 |
else:
|
2078 |
result = None
|
2079 |
while result is None: |
2080 |
result = SingleWaitForFdCondition(fdobj, event, timeout) |
2081 |
return result
|
2082 |
|
2083 |
|
2084 |
def UniqueSequence(seq): |
2085 |
"""Returns a list with unique elements.
|
2086 |
|
2087 |
Element order is preserved.
|
2088 |
|
2089 |
@type seq: sequence
|
2090 |
@param seq: the sequence with the source elements
|
2091 |
@rtype: list
|
2092 |
@return: list of unique elements from seq
|
2093 |
|
2094 |
"""
|
2095 |
seen = set()
|
2096 |
return [i for i in seq if i not in seen and not seen.add(i)] |
2097 |
|
2098 |
|
2099 |
def NormalizeAndValidateMac(mac): |
2100 |
"""Normalizes and check if a MAC address is valid.
|
2101 |
|
2102 |
Checks whether the supplied MAC address is formally correct, only
|
2103 |
accepts colon separated format. Normalize it to all lower.
|
2104 |
|
2105 |
@type mac: str
|
2106 |
@param mac: the MAC to be validated
|
2107 |
@rtype: str
|
2108 |
@return: returns the normalized and validated MAC.
|
2109 |
|
2110 |
@raise errors.OpPrereqError: If the MAC isn't valid
|
2111 |
|
2112 |
"""
|
2113 |
mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$", re.I)
|
2114 |
if not mac_check.match(mac): |
2115 |
raise errors.OpPrereqError("Invalid MAC address specified: %s" % |
2116 |
mac, errors.ECODE_INVAL) |
2117 |
|
2118 |
return mac.lower()
|
2119 |
|
2120 |
|
2121 |
def TestDelay(duration): |
2122 |
"""Sleep for a fixed amount of time.
|
2123 |
|
2124 |
@type duration: float
|
2125 |
@param duration: the sleep duration
|
2126 |
@rtype: boolean
|
2127 |
@return: False for negative value, True otherwise
|
2128 |
|
2129 |
"""
|
2130 |
if duration < 0: |
2131 |
return False, "Invalid sleep duration" |
2132 |
time.sleep(duration) |
2133 |
return True, None |
2134 |
|
2135 |
|
2136 |
def _CloseFDNoErr(fd, retries=5): |
2137 |
"""Close a file descriptor ignoring errors.
|
2138 |
|
2139 |
@type fd: int
|
2140 |
@param fd: the file descriptor
|
2141 |
@type retries: int
|
2142 |
@param retries: how many retries to make, in case we get any
|
2143 |
other error than EBADF
|
2144 |
|
2145 |
"""
|
2146 |
try:
|
2147 |
os.close(fd) |
2148 |
except OSError, err: |
2149 |
if err.errno != errno.EBADF:
|
2150 |
if retries > 0: |
2151 |
_CloseFDNoErr(fd, retries - 1)
|
2152 |
# else either it's closed already or we're out of retries, so we
|
2153 |
# ignore this and go on
|
2154 |
|
2155 |
|
2156 |
def CloseFDs(noclose_fds=None): |
2157 |
"""Close file descriptors.
|
2158 |
|
2159 |
This closes all file descriptors above 2 (i.e. except
|
2160 |
stdin/out/err).
|
2161 |
|
2162 |
@type noclose_fds: list or None
|
2163 |
@param noclose_fds: if given, it denotes a list of file descriptor
|
2164 |
that should not be closed
|
2165 |
|
2166 |
"""
|
2167 |
# Default maximum for the number of available file descriptors.
|
2168 |
if 'SC_OPEN_MAX' in os.sysconf_names: |
2169 |
try:
|
2170 |
MAXFD = os.sysconf('SC_OPEN_MAX')
|
2171 |
if MAXFD < 0: |
2172 |
MAXFD = 1024
|
2173 |
except OSError: |
2174 |
MAXFD = 1024
|
2175 |
else:
|
2176 |
MAXFD = 1024
|
2177 |
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
|
2178 |
if (maxfd == resource.RLIM_INFINITY):
|
2179 |
maxfd = MAXFD |
2180 |
|
2181 |
# Iterate through and close all file descriptors (except the standard ones)
|
2182 |
for fd in range(3, maxfd): |
2183 |
if noclose_fds and fd in noclose_fds: |
2184 |
continue
|
2185 |
_CloseFDNoErr(fd) |
2186 |
|
2187 |
|
2188 |
def Mlockall(): |
2189 |
"""Lock current process' virtual address space into RAM.
|
2190 |
|
2191 |
This is equivalent to the C call mlockall(MCL_CURRENT|MCL_FUTURE),
|
2192 |
see mlock(2) for more details. This function requires ctypes module.
|
2193 |
|
2194 |
"""
|
2195 |
if ctypes is None: |
2196 |
logging.warning("Cannot set memory lock, ctypes module not found")
|
2197 |
return
|
2198 |
|
2199 |
libc = ctypes.cdll.LoadLibrary("libc.so.6")
|
2200 |
if libc is None: |
2201 |
logging.error("Cannot set memory lock, ctypes cannot load libc")
|
2202 |
return
|
2203 |
|
2204 |
# Some older version of the ctypes module don't have built-in functionality
|
2205 |
# to access the errno global variable, where function error codes are stored.
|
2206 |
# By declaring this variable as a pointer to an integer we can then access
|
2207 |
# its value correctly, should the mlockall call fail, in order to see what
|
2208 |
# the actual error code was.
|
2209 |
# pylint: disable-msg=W0212
|
2210 |
libc.__errno_location.restype = ctypes.POINTER(ctypes.c_int) |
2211 |
|
2212 |
if libc.mlockall(_MCL_CURRENT | _MCL_FUTURE):
|
2213 |
# pylint: disable-msg=W0212
|
2214 |
logging.error("Cannot set memory lock: %s",
|
2215 |
os.strerror(libc.__errno_location().contents.value)) |
2216 |
return
|
2217 |
|
2218 |
logging.debug("Memory lock set")
|
2219 |
|
2220 |
|
2221 |
def Daemonize(logfile, run_uid, run_gid): |
2222 |
"""Daemonize the current process.
|
2223 |
|
2224 |
This detaches the current process from the controlling terminal and
|
2225 |
runs it in the background as a daemon.
|
2226 |
|
2227 |
@type logfile: str
|
2228 |
@param logfile: the logfile to which we should redirect stdout/stderr
|
2229 |
@type run_uid: int
|
2230 |
@param run_uid: Run the child under this uid
|
2231 |
@type run_gid: int
|
2232 |
@param run_gid: Run the child under this gid
|
2233 |
@rtype: int
|
2234 |
@return: the value zero
|
2235 |
|
2236 |
"""
|
2237 |
# pylint: disable-msg=W0212
|
2238 |
# yes, we really want os._exit
|
2239 |
UMASK = 077
|
2240 |
WORKDIR = "/"
|
2241 |
|
2242 |
# this might fail
|
2243 |
pid = os.fork() |
2244 |
if (pid == 0): # The first child. |
2245 |
os.setsid() |
2246 |
# FIXME: When removing again and moving to start-stop-daemon privilege drop
|
2247 |
# make sure to check for config permission and bail out when invoked
|
2248 |
# with wrong user.
|
2249 |
os.setgid(run_gid) |
2250 |
os.setuid(run_uid) |
2251 |
# this might fail
|
2252 |
pid = os.fork() # Fork a second child.
|
2253 |
if (pid == 0): # The second child. |
2254 |
os.chdir(WORKDIR) |
2255 |
os.umask(UMASK) |
2256 |
else:
|
2257 |
# exit() or _exit()? See below.
|
2258 |
os._exit(0) # Exit parent (the first child) of the second child. |
2259 |
else:
|
2260 |
os._exit(0) # Exit parent of the first child. |
2261 |
|
2262 |
for fd in range(3): |
2263 |
_CloseFDNoErr(fd) |
2264 |
i = os.open("/dev/null", os.O_RDONLY) # stdin |
2265 |
assert i == 0, "Can't close/reopen stdin" |
2266 |
i = os.open(logfile, os.O_WRONLY|os.O_CREAT|os.O_APPEND, 0600) # stdout |
2267 |
assert i == 1, "Can't close/reopen stdout" |
2268 |
# Duplicate standard output to standard error.
|
2269 |
os.dup2(1, 2) |
2270 |
return 0 |
2271 |
|
2272 |
|
2273 |
def DaemonPidFileName(name): |
2274 |
"""Compute a ganeti pid file absolute path
|
2275 |
|
2276 |
@type name: str
|
2277 |
@param name: the daemon name
|
2278 |
@rtype: str
|
2279 |
@return: the full path to the pidfile corresponding to the given
|
2280 |
daemon name
|
2281 |
|
2282 |
"""
|
2283 |
return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name) |
2284 |
|
2285 |
|
2286 |
def EnsureDaemon(name): |
2287 |
"""Check for and start daemon if not alive.
|
2288 |
|
2289 |
"""
|
2290 |
result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
|
2291 |
if result.failed:
|
2292 |
logging.error("Can't start daemon '%s', failure %s, output: %s",
|
2293 |
name, result.fail_reason, result.output) |
2294 |
return False |
2295 |
|
2296 |
return True |
2297 |
|
2298 |
|
2299 |
def StopDaemon(name): |
2300 |
"""Stop daemon
|
2301 |
|
2302 |
"""
|
2303 |
result = RunCmd([constants.DAEMON_UTIL, "stop", name])
|
2304 |
if result.failed:
|
2305 |
logging.error("Can't stop daemon '%s', failure %s, output: %s",
|
2306 |
name, result.fail_reason, result.output) |
2307 |
return False |
2308 |
|
2309 |
return True |
2310 |
|
2311 |
|
2312 |
def WritePidFile(name): |
2313 |
"""Write the current process pidfile.
|
2314 |
|
2315 |
The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid}
|
2316 |
|
2317 |
@type name: str
|
2318 |
@param name: the daemon name to use
|
2319 |
@raise errors.GenericError: if the pid file already exists and
|
2320 |
points to a live process
|
2321 |
|
2322 |
"""
|
2323 |
pid = os.getpid() |
2324 |
pidfilename = DaemonPidFileName(name) |
2325 |
if IsProcessAlive(ReadPidFile(pidfilename)):
|
2326 |
raise errors.GenericError("%s contains a live process" % pidfilename) |
2327 |
|
2328 |
WriteFile(pidfilename, data="%d\n" % pid)
|
2329 |
|
2330 |
|
2331 |
def RemovePidFile(name): |
2332 |
"""Remove the current process pidfile.
|
2333 |
|
2334 |
Any errors are ignored.
|
2335 |
|
2336 |
@type name: str
|
2337 |
@param name: the daemon name used to derive the pidfile name
|
2338 |
|
2339 |
"""
|
2340 |
pidfilename = DaemonPidFileName(name) |
2341 |
# TODO: we could check here that the file contains our pid
|
2342 |
try:
|
2343 |
RemoveFile(pidfilename) |
2344 |
except: # pylint: disable-msg=W0702 |
2345 |
pass
|
2346 |
|
2347 |
|
2348 |
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30, |
2349 |
waitpid=False):
|
2350 |
"""Kill a process given by its pid.
|
2351 |
|
2352 |
@type pid: int
|
2353 |
@param pid: The PID to terminate.
|
2354 |
@type signal_: int
|
2355 |
@param signal_: The signal to send, by default SIGTERM
|
2356 |
@type timeout: int
|
2357 |
@param timeout: The timeout after which, if the process is still alive,
|
2358 |
a SIGKILL will be sent. If not positive, no such checking
|
2359 |
will be done
|
2360 |
@type waitpid: boolean
|
2361 |
@param waitpid: If true, we should waitpid on this process after
|
2362 |
sending signals, since it's our own child and otherwise it
|
2363 |
would remain as zombie
|
2364 |
|
2365 |
"""
|
2366 |
def _helper(pid, signal_, wait): |
2367 |
"""Simple helper to encapsulate the kill/waitpid sequence"""
|
2368 |
if IgnoreProcessNotFound(os.kill, pid, signal_) and wait: |
2369 |
try:
|
2370 |
os.waitpid(pid, os.WNOHANG) |
2371 |
except OSError: |
2372 |
pass
|
2373 |
|
2374 |
if pid <= 0: |
2375 |
# kill with pid=0 == suicide
|
2376 |
raise errors.ProgrammerError("Invalid pid given '%s'" % pid) |
2377 |
|
2378 |
if not IsProcessAlive(pid): |
2379 |
return
|
2380 |
|
2381 |
_helper(pid, signal_, waitpid) |
2382 |
|
2383 |
if timeout <= 0: |
2384 |
return
|
2385 |
|
2386 |
def _CheckProcess(): |
2387 |
if not IsProcessAlive(pid): |
2388 |
return
|
2389 |
|
2390 |
try:
|
2391 |
(result_pid, _) = os.waitpid(pid, os.WNOHANG) |
2392 |
except OSError: |
2393 |
raise RetryAgain()
|
2394 |
|
2395 |
if result_pid > 0: |
2396 |
return
|
2397 |
|
2398 |
raise RetryAgain()
|
2399 |
|
2400 |
try:
|
2401 |
# Wait up to $timeout seconds
|
2402 |
Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout) |
2403 |
except RetryTimeout:
|
2404 |
pass
|
2405 |
|
2406 |
if IsProcessAlive(pid):
|
2407 |
# Kill process if it's still alive
|
2408 |
_helper(pid, signal.SIGKILL, waitpid) |
2409 |
|
2410 |
|
2411 |
def FindFile(name, search_path, test=os.path.exists): |
2412 |
"""Look for a filesystem object in a given path.
|
2413 |
|
2414 |
This is an abstract method to search for filesystem object (files,
|
2415 |
dirs) under a given search path.
|
2416 |
|
2417 |
@type name: str
|
2418 |
@param name: the name to look for
|
2419 |
@type search_path: str
|
2420 |
@param search_path: location to start at
|
2421 |
@type test: callable
|
2422 |
@param test: a function taking one argument that should return True
|
2423 |
if the a given object is valid; the default value is
|
2424 |
os.path.exists, causing only existing files to be returned
|
2425 |
@rtype: str or None
|
2426 |
@return: full path to the object if found, None otherwise
|
2427 |
|
2428 |
"""
|
2429 |
# validate the filename mask
|
2430 |
if constants.EXT_PLUGIN_MASK.match(name) is None: |
2431 |
logging.critical("Invalid value passed for external script name: '%s'",
|
2432 |
name) |
2433 |
return None |
2434 |
|
2435 |
for dir_name in search_path: |
2436 |
# FIXME: investigate switch to PathJoin
|
2437 |
item_name = os.path.sep.join([dir_name, name]) |
2438 |
# check the user test and that we're indeed resolving to the given
|
2439 |
# basename
|
2440 |
if test(item_name) and os.path.basename(item_name) == name: |
2441 |
return item_name
|
2442 |
return None |
2443 |
|
2444 |
|
2445 |
def CheckVolumeGroupSize(vglist, vgname, minsize): |
2446 |
"""Checks if the volume group list is valid.
|
2447 |
|
2448 |
The function will check if a given volume group is in the list of
|
2449 |
volume groups and has a minimum size.
|
2450 |
|
2451 |
@type vglist: dict
|
2452 |
@param vglist: dictionary of volume group names and their size
|
2453 |
@type vgname: str
|
2454 |
@param vgname: the volume group we should check
|
2455 |
@type minsize: int
|
2456 |
@param minsize: the minimum size we accept
|
2457 |
@rtype: None or str
|
2458 |
@return: None for success, otherwise the error message
|
2459 |
|
2460 |
"""
|
2461 |
vgsize = vglist.get(vgname, None)
|
2462 |
if vgsize is None: |
2463 |
return "volume group '%s' missing" % vgname |
2464 |
elif vgsize < minsize:
|
2465 |
return ("volume group '%s' too small (%s MiB required, %d MiB found)" % |
2466 |
(vgname, minsize, vgsize)) |
2467 |
return None |
2468 |
|
2469 |
|
2470 |
def SplitTime(value): |
2471 |
"""Splits time as floating point number into a tuple.
|
2472 |
|
2473 |
@param value: Time in seconds
|
2474 |
@type value: int or float
|
2475 |
@return: Tuple containing (seconds, microseconds)
|
2476 |
|
2477 |
"""
|
2478 |
(seconds, microseconds) = divmod(int(value * 1000000), 1000000) |
2479 |
|
2480 |
assert 0 <= seconds, \ |
2481 |
"Seconds must be larger than or equal to 0, but are %s" % seconds
|
2482 |
assert 0 <= microseconds <= 999999, \ |
2483 |
"Microseconds must be 0-999999, but are %s" % microseconds
|
2484 |
|
2485 |
return (int(seconds), int(microseconds)) |
2486 |
|
2487 |
|
2488 |
def MergeTime(timetuple): |
2489 |
"""Merges a tuple into time as a floating point number.
|
2490 |
|
2491 |
@param timetuple: Time as tuple, (seconds, microseconds)
|
2492 |
@type timetuple: tuple
|
2493 |
@return: Time as a floating point number expressed in seconds
|
2494 |
|
2495 |
"""
|
2496 |
(seconds, microseconds) = timetuple |
2497 |
|
2498 |
assert 0 <= seconds, \ |
2499 |
"Seconds must be larger than or equal to 0, but are %s" % seconds
|
2500 |
assert 0 <= microseconds <= 999999, \ |
2501 |
"Microseconds must be 0-999999, but are %s" % microseconds
|
2502 |
|
2503 |
return float(seconds) + (float(microseconds) * 0.000001) |
2504 |
|
2505 |
|
2506 |
def GetDaemonPort(daemon_name): |
2507 |
"""Get the daemon port for this cluster.
|
2508 |
|
2509 |
Note that this routine does not read a ganeti-specific file, but
|
2510 |
instead uses C{socket.getservbyname} to allow pre-customization of
|
2511 |
this parameter outside of Ganeti.
|
2512 |
|
2513 |
@type daemon_name: string
|
2514 |
@param daemon_name: daemon name (in constants.DAEMONS_PORTS)
|
2515 |
@rtype: int
|
2516 |
|
2517 |
"""
|
2518 |
if daemon_name not in constants.DAEMONS_PORTS: |
2519 |
raise errors.ProgrammerError("Unknown daemon: %s" % daemon_name) |
2520 |
|
2521 |
(proto, default_port) = constants.DAEMONS_PORTS[daemon_name] |
2522 |
try:
|
2523 |
port = socket.getservbyname(daemon_name, proto) |
2524 |
except socket.error:
|
2525 |
port = default_port |
2526 |
|
2527 |
return port
|
2528 |
|
2529 |
|
2530 |
class LogFileHandler(logging.FileHandler): |
2531 |
"""Log handler that doesn't fallback to stderr.
|
2532 |
|
2533 |
When an error occurs while writing on the logfile, logging.FileHandler tries
|
2534 |
to log on stderr. This doesn't work in ganeti since stderr is redirected to
|
2535 |
the logfile. This class avoids failures reporting errors to /dev/console.
|
2536 |
|
2537 |
"""
|
2538 |
def __init__(self, filename, mode="a", encoding=None): |
2539 |
"""Open the specified file and use it as the stream for logging.
|
2540 |
|
2541 |
Also open /dev/console to report errors while logging.
|
2542 |
|
2543 |
"""
|
2544 |
logging.FileHandler.__init__(self, filename, mode, encoding)
|
2545 |
self.console = open(constants.DEV_CONSOLE, "a") |
2546 |
|
2547 |
def handleError(self, record): # pylint: disable-msg=C0103 |
2548 |
"""Handle errors which occur during an emit() call.
|
2549 |
|
2550 |
Try to handle errors with FileHandler method, if it fails write to
|
2551 |
/dev/console.
|
2552 |
|
2553 |
"""
|
2554 |
try:
|
2555 |
logging.FileHandler.handleError(self, record)
|
2556 |
except Exception: # pylint: disable-msg=W0703 |
2557 |
try:
|
2558 |
self.console.write("Cannot log message:\n%s\n" % self.format(record)) |
2559 |
except Exception: # pylint: disable-msg=W0703 |
2560 |
# Log handler tried everything it could, now just give up
|
2561 |
pass
|
2562 |
|
2563 |
|
2564 |
def SetupLogging(logfile, debug=0, stderr_logging=False, program="", |
2565 |
multithreaded=False, syslog=constants.SYSLOG_USAGE,
|
2566 |
console_logging=False):
|
2567 |
"""Configures the logging module.
|
2568 |
|
2569 |
@type logfile: str
|
2570 |
@param logfile: the filename to which we should log
|
2571 |
@type debug: integer
|
2572 |
@param debug: if greater than zero, enable debug messages, otherwise
|
2573 |
only those at C{INFO} and above level
|
2574 |
@type stderr_logging: boolean
|
2575 |
@param stderr_logging: whether we should also log to the standard error
|
2576 |
@type program: str
|
2577 |
@param program: the name under which we should log messages
|
2578 |
@type multithreaded: boolean
|
2579 |
@param multithreaded: if True, will add the thread name to the log file
|
2580 |
@type syslog: string
|
2581 |
@param syslog: one of 'no', 'yes', 'only':
|
2582 |
- if no, syslog is not used
|
2583 |
- if yes, syslog is used (in addition to file-logging)
|
2584 |
- if only, only syslog is used
|
2585 |
@type console_logging: boolean
|
2586 |
@param console_logging: if True, will use a FileHandler which falls back to
|
2587 |
the system console if logging fails
|
2588 |
@raise EnvironmentError: if we can't open the log file and
|
2589 |
syslog/stderr logging is disabled
|
2590 |
|
2591 |
"""
|
2592 |
fmt = "%(asctime)s: " + program + " pid=%(process)d" |
2593 |
sft = program + "[%(process)d]:"
|
2594 |
if multithreaded:
|
2595 |
fmt += "/%(threadName)s"
|
2596 |
sft += " (%(threadName)s)"
|
2597 |
if debug:
|
2598 |
fmt += " %(module)s:%(lineno)s"
|
2599 |
# no debug info for syslog loggers
|
2600 |
fmt += " %(levelname)s %(message)s"
|
2601 |
# yes, we do want the textual level, as remote syslog will probably
|
2602 |
# lose the error level, and it's easier to grep for it
|
2603 |
sft += " %(levelname)s %(message)s"
|
2604 |
formatter = logging.Formatter(fmt) |
2605 |
sys_fmt = logging.Formatter(sft) |
2606 |
|
2607 |
root_logger = logging.getLogger("")
|
2608 |
root_logger.setLevel(logging.NOTSET) |
2609 |
|
2610 |
# Remove all previously setup handlers
|
2611 |
for handler in root_logger.handlers: |
2612 |
handler.close() |
2613 |
root_logger.removeHandler(handler) |
2614 |
|
2615 |
if stderr_logging:
|
2616 |
stderr_handler = logging.StreamHandler() |
2617 |
stderr_handler.setFormatter(formatter) |
2618 |
if debug:
|
2619 |
stderr_handler.setLevel(logging.NOTSET) |
2620 |
else:
|
2621 |
stderr_handler.setLevel(logging.CRITICAL) |
2622 |
root_logger.addHandler(stderr_handler) |
2623 |
|
2624 |
if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY): |
2625 |
facility = logging.handlers.SysLogHandler.LOG_DAEMON |
2626 |
syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET, |
2627 |
facility) |
2628 |
syslog_handler.setFormatter(sys_fmt) |
2629 |
# Never enable debug over syslog
|
2630 |
syslog_handler.setLevel(logging.INFO) |
2631 |
root_logger.addHandler(syslog_handler) |
2632 |
|
2633 |
if syslog != constants.SYSLOG_ONLY:
|
2634 |
# this can fail, if the logging directories are not setup or we have
|
2635 |
# a permisssion problem; in this case, it's best to log but ignore
|
2636 |
# the error if stderr_logging is True, and if false we re-raise the
|
2637 |
# exception since otherwise we could run but without any logs at all
|
2638 |
try:
|
2639 |
if console_logging:
|
2640 |
logfile_handler = LogFileHandler(logfile) |
2641 |
else:
|
2642 |
logfile_handler = logging.FileHandler(logfile) |
2643 |
logfile_handler.setFormatter(formatter) |
2644 |
if debug:
|
2645 |
logfile_handler.setLevel(logging.DEBUG) |
2646 |
else:
|
2647 |
logfile_handler.setLevel(logging.INFO) |
2648 |
root_logger.addHandler(logfile_handler) |
2649 |
except EnvironmentError: |
2650 |
if stderr_logging or syslog == constants.SYSLOG_YES: |
2651 |
logging.exception("Failed to enable logging to file '%s'", logfile)
|
2652 |
else:
|
2653 |
# we need to re-raise the exception
|
2654 |
raise
|
2655 |
|
2656 |
|
2657 |
def IsNormAbsPath(path): |
2658 |
"""Check whether a path is absolute and also normalized
|
2659 |
|
2660 |
This avoids things like /dir/../../other/path to be valid.
|
2661 |
|
2662 |
"""
|
2663 |
return os.path.normpath(path) == path and os.path.isabs(path) |
2664 |
|
2665 |
|
2666 |
def PathJoin(*args): |
2667 |
"""Safe-join a list of path components.
|
2668 |
|
2669 |
Requirements:
|
2670 |
- the first argument must be an absolute path
|
2671 |
- no component in the path must have backtracking (e.g. /../),
|
2672 |
since we check for normalization at the end
|
2673 |
|
2674 |
@param args: the path components to be joined
|
2675 |
@raise ValueError: for invalid paths
|
2676 |
|
2677 |
"""
|
2678 |
# ensure we're having at least one path passed in
|
2679 |
assert args
|
2680 |
# ensure the first component is an absolute and normalized path name
|
2681 |
root = args[0]
|
2682 |
if not IsNormAbsPath(root): |
2683 |
raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0])) |
2684 |
result = os.path.join(*args) |
2685 |
# ensure that the whole path is normalized
|
2686 |
if not IsNormAbsPath(result): |
2687 |
raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args)) |
2688 |
# check that we're still under the original prefix
|
2689 |
prefix = os.path.commonprefix([root, result]) |
2690 |
if prefix != root:
|
2691 |
raise ValueError("Error: path joining resulted in different prefix" |
2692 |
" (%s != %s)" % (prefix, root))
|
2693 |
return result
|
2694 |
|
2695 |
|
2696 |
def TailFile(fname, lines=20): |
2697 |
"""Return the last lines from a file.
|
2698 |
|
2699 |
@note: this function will only read and parse the last 4KB of
|
2700 |
the file; if the lines are very long, it could be that less
|
2701 |
than the requested number of lines are returned
|
2702 |
|
2703 |
@param fname: the file name
|
2704 |
@type lines: int
|
2705 |
@param lines: the (maximum) number of lines to return
|
2706 |
|
2707 |
"""
|
2708 |
fd = open(fname, "r") |
2709 |
try:
|
2710 |
fd.seek(0, 2) |
2711 |
pos = fd.tell() |
2712 |
pos = max(0, pos-4096) |
2713 |
fd.seek(pos, 0)
|
2714 |
raw_data = fd.read() |
2715 |
finally:
|
2716 |
fd.close() |
2717 |
|
2718 |
rows = raw_data.splitlines() |
2719 |
return rows[-lines:]
|
2720 |
|
2721 |
|
2722 |
def FormatTimestampWithTZ(secs): |
2723 |
"""Formats a Unix timestamp with the local timezone.
|
2724 |
|
2725 |
"""
|
2726 |
return time.strftime("%F %T %Z", time.gmtime(secs)) |
2727 |
|
2728 |
|
2729 |
def _ParseAsn1Generalizedtime(value): |
2730 |
"""Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
|
2731 |
|
2732 |
@type value: string
|
2733 |
@param value: ASN1 GENERALIZEDTIME timestamp
|
2734 |
|
2735 |
"""
|
2736 |
m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
|
2737 |
if m:
|
2738 |
# We have an offset
|
2739 |
asn1time = m.group(1)
|
2740 |
hours = int(m.group(2)) |
2741 |
minutes = int(m.group(3)) |
2742 |
utcoffset = (60 * hours) + minutes
|
2743 |
else:
|
2744 |
if not value.endswith("Z"): |
2745 |
raise ValueError("Missing timezone") |
2746 |
asn1time = value[:-1]
|
2747 |
utcoffset = 0
|
2748 |
|
2749 |
parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
|
2750 |
|
2751 |
tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
|
2752 |
|
2753 |
return calendar.timegm(tt.utctimetuple())
|
2754 |
|
2755 |
|
2756 |
def GetX509CertValidity(cert): |
2757 |
"""Returns the validity period of the certificate.
|
2758 |
|
2759 |
@type cert: OpenSSL.crypto.X509
|
2760 |
@param cert: X509 certificate object
|
2761 |
|
2762 |
"""
|
2763 |
# The get_notBefore and get_notAfter functions are only supported in
|
2764 |
# pyOpenSSL 0.7 and above.
|
2765 |
try:
|
2766 |
get_notbefore_fn = cert.get_notBefore |
2767 |
except AttributeError: |
2768 |
not_before = None
|
2769 |
else:
|
2770 |
not_before_asn1 = get_notbefore_fn() |
2771 |
|
2772 |
if not_before_asn1 is None: |
2773 |
not_before = None
|
2774 |
else:
|
2775 |
not_before = _ParseAsn1Generalizedtime(not_before_asn1) |
2776 |
|
2777 |
try:
|
2778 |
get_notafter_fn = cert.get_notAfter |
2779 |
except AttributeError: |
2780 |
not_after = None
|
2781 |
else:
|
2782 |
not_after_asn1 = get_notafter_fn() |
2783 |
|
2784 |
if not_after_asn1 is None: |
2785 |
not_after = None
|
2786 |
else:
|
2787 |
not_after = _ParseAsn1Generalizedtime(not_after_asn1) |
2788 |
|
2789 |
return (not_before, not_after)
|
2790 |
|
2791 |
|
2792 |
def _VerifyCertificateInner(expired, not_before, not_after, now, |
2793 |
warn_days, error_days): |
2794 |
"""Verifies certificate validity.
|
2795 |
|
2796 |
@type expired: bool
|
2797 |
@param expired: Whether pyOpenSSL considers the certificate as expired
|
2798 |
@type not_before: number or None
|
2799 |
@param not_before: Unix timestamp before which certificate is not valid
|
2800 |
@type not_after: number or None
|
2801 |
@param not_after: Unix timestamp after which certificate is invalid
|
2802 |
@type now: number
|
2803 |
@param now: Current time as Unix timestamp
|
2804 |
@type warn_days: number or None
|
2805 |
@param warn_days: How many days before expiration a warning should be reported
|
2806 |
@type error_days: number or None
|
2807 |
@param error_days: How many days before expiration an error should be reported
|
2808 |
|
2809 |
"""
|
2810 |
if expired:
|
2811 |
msg = "Certificate is expired"
|
2812 |
|
2813 |
if not_before is not None and not_after is not None: |
2814 |
msg += (" (valid from %s to %s)" %
|
2815 |
(FormatTimestampWithTZ(not_before), |
2816 |
FormatTimestampWithTZ(not_after))) |
2817 |
elif not_before is not None: |
2818 |
msg += " (valid from %s)" % FormatTimestampWithTZ(not_before)
|
2819 |
elif not_after is not None: |
2820 |
msg += " (valid until %s)" % FormatTimestampWithTZ(not_after)
|
2821 |
|
2822 |
return (CERT_ERROR, msg)
|
2823 |
|
2824 |
elif not_before is not None and not_before > now: |
2825 |
return (CERT_WARNING,
|
2826 |
"Certificate not yet valid (valid from %s)" %
|
2827 |
FormatTimestampWithTZ(not_before)) |
2828 |
|
2829 |
elif not_after is not None: |
2830 |
remaining_days = int((not_after - now) / (24 * 3600)) |
2831 |
|
2832 |
msg = "Certificate expires in about %d days" % remaining_days
|
2833 |
|
2834 |
if error_days is not None and remaining_days <= error_days: |
2835 |
return (CERT_ERROR, msg)
|
2836 |
|
2837 |
if warn_days is not None and remaining_days <= warn_days: |
2838 |
return (CERT_WARNING, msg)
|
2839 |
|
2840 |
return (None, None) |
2841 |
|
2842 |
|
2843 |
def VerifyX509Certificate(cert, warn_days, error_days): |
2844 |
"""Verifies a certificate for LUVerifyCluster.
|
2845 |
|
2846 |
@type cert: OpenSSL.crypto.X509
|
2847 |
@param cert: X509 certificate object
|
2848 |
@type warn_days: number or None
|
2849 |
@param warn_days: How many days before expiration a warning should be reported
|
2850 |
@type error_days: number or None
|
2851 |
@param error_days: How many days before expiration an error should be reported
|
2852 |
|
2853 |
"""
|
2854 |
# Depending on the pyOpenSSL version, this can just return (None, None)
|
2855 |
(not_before, not_after) = GetX509CertValidity(cert) |
2856 |
|
2857 |
return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
|
2858 |
time.time(), warn_days, error_days) |
2859 |
|
2860 |
|
2861 |
def SignX509Certificate(cert, key, salt): |
2862 |
"""Sign a X509 certificate.
|
2863 |
|
2864 |
An RFC822-like signature header is added in front of the certificate.
|
2865 |
|
2866 |
@type cert: OpenSSL.crypto.X509
|
2867 |
@param cert: X509 certificate object
|
2868 |
@type key: string
|
2869 |
@param key: Key for HMAC
|
2870 |
@type salt: string
|
2871 |
@param salt: Salt for HMAC
|
2872 |
@rtype: string
|
2873 |
@return: Serialized and signed certificate in PEM format
|
2874 |
|
2875 |
"""
|
2876 |
if not VALID_X509_SIGNATURE_SALT.match(salt): |
2877 |
raise errors.GenericError("Invalid salt: %r" % salt) |
2878 |
|
2879 |
# Dumping as PEM here ensures the certificate is in a sane format
|
2880 |
cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert) |
2881 |
|
2882 |
return ("%s: %s/%s\n\n%s" % |
2883 |
(constants.X509_CERT_SIGNATURE_HEADER, salt, |
2884 |
Sha1Hmac(key, cert_pem, salt=salt), |
2885 |
cert_pem)) |
2886 |
|
2887 |
|
2888 |
def _ExtractX509CertificateSignature(cert_pem): |
2889 |
"""Helper function to extract signature from X509 certificate.
|
2890 |
|
2891 |
"""
|
2892 |
# Extract signature from original PEM data
|
2893 |
for line in cert_pem.splitlines(): |
2894 |
if line.startswith("---"): |
2895 |
break
|
2896 |
|
2897 |
m = X509_SIGNATURE.match(line.strip()) |
2898 |
if m:
|
2899 |
return (m.group("salt"), m.group("sign")) |
2900 |
|
2901 |
raise errors.GenericError("X509 certificate signature is missing") |
2902 |
|
2903 |
|
2904 |
def LoadSignedX509Certificate(cert_pem, key): |
2905 |
"""Verifies a signed X509 certificate.
|
2906 |
|
2907 |
@type cert_pem: string
|
2908 |
@param cert_pem: Certificate in PEM format and with signature header
|
2909 |
@type key: string
|
2910 |
@param key: Key for HMAC
|
2911 |
@rtype: tuple; (OpenSSL.crypto.X509, string)
|
2912 |
@return: X509 certificate object and salt
|
2913 |
|
2914 |
"""
|
2915 |
(salt, signature) = _ExtractX509CertificateSignature(cert_pem) |
2916 |
|
2917 |
# Load certificate
|
2918 |
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem) |
2919 |
|
2920 |
# Dump again to ensure it's in a sane format
|
2921 |
sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert) |
2922 |
|
2923 |
if not VerifySha1Hmac(key, sane_pem, signature, salt=salt): |
2924 |
raise errors.GenericError("X509 certificate signature is invalid") |
2925 |
|
2926 |
return (cert, salt)
|
2927 |
|
2928 |
|
2929 |
def Sha1Hmac(key, text, salt=None): |
2930 |
"""Calculates the HMAC-SHA1 digest of a text.
|
2931 |
|
2932 |
HMAC is defined in RFC2104.
|
2933 |
|
2934 |
@type key: string
|
2935 |
@param key: Secret key
|
2936 |
@type text: string
|
2937 |
|
2938 |
"""
|
2939 |
if salt:
|
2940 |
salted_text = salt + text |
2941 |
else:
|
2942 |
salted_text = text |
2943 |
|
2944 |
return hmac.new(key, salted_text, compat.sha1).hexdigest()
|
2945 |
|
2946 |
|
2947 |
def VerifySha1Hmac(key, text, digest, salt=None): |
2948 |
"""Verifies the HMAC-SHA1 digest of a text.
|
2949 |
|
2950 |
HMAC is defined in RFC2104.
|
2951 |
|
2952 |
@type key: string
|
2953 |
@param key: Secret key
|
2954 |
@type text: string
|
2955 |
@type digest: string
|
2956 |
@param digest: Expected digest
|
2957 |
@rtype: bool
|
2958 |
@return: Whether HMAC-SHA1 digest matches
|
2959 |
|
2960 |
"""
|
2961 |
return digest.lower() == Sha1Hmac(key, text, salt=salt).lower()
|
2962 |
|
2963 |
|
2964 |
def SafeEncode(text): |
2965 |
"""Return a 'safe' version of a source string.
|
2966 |
|
2967 |
This function mangles the input string and returns a version that
|
2968 |
should be safe to display/encode as ASCII. To this end, we first
|
2969 |
convert it to ASCII using the 'backslashreplace' encoding which
|
2970 |
should get rid of any non-ASCII chars, and then we process it
|
2971 |
through a loop copied from the string repr sources in the python; we
|
2972 |
don't use string_escape anymore since that escape single quotes and
|
2973 |
backslashes too, and that is too much; and that escaping is not
|
2974 |
stable, i.e. string_escape(string_escape(x)) != string_escape(x).
|
2975 |
|
2976 |
@type text: str or unicode
|
2977 |
@param text: input data
|
2978 |
@rtype: str
|
2979 |
@return: a safe version of text
|
2980 |
|
2981 |
"""
|
2982 |
if isinstance(text, unicode): |
2983 |
# only if unicode; if str already, we handle it below
|
2984 |
text = text.encode('ascii', 'backslashreplace') |
2985 |
resu = ""
|
2986 |
for char in text: |
2987 |
c = ord(char)
|
2988 |
if char == '\t': |
2989 |
resu += r'\t'
|
2990 |
elif char == '\n': |
2991 |
resu += r'\n'
|
2992 |
elif char == '\r': |
2993 |
resu += r'\'r'
|
2994 |
elif c < 32 or c >= 127: # non-printable |
2995 |
resu += "\\x%02x" % (c & 0xff) |
2996 |
else:
|
2997 |
resu += char |
2998 |
return resu
|
2999 |
|
3000 |
|
3001 |
def UnescapeAndSplit(text, sep=","): |
3002 |
"""Split and unescape a string based on a given separator.
|
3003 |
|
3004 |
This function splits a string based on a separator where the
|
3005 |
separator itself can be escape in order to be an element of the
|
3006 |
elements. The escaping rules are (assuming coma being the
|
3007 |
separator):
|
3008 |
- a plain , separates the elements
|
3009 |
- a sequence \\\\, (double backslash plus comma) is handled as a
|
3010 |
backslash plus a separator comma
|
3011 |
- a sequence \, (backslash plus comma) is handled as a
|
3012 |
non-separator comma
|
3013 |
|
3014 |
@type text: string
|
3015 |
@param text: the string to split
|
3016 |
@type sep: string
|
3017 |
@param text: the separator
|
3018 |
@rtype: string
|
3019 |
@return: a list of strings
|
3020 |
|
3021 |
"""
|
3022 |
# we split the list by sep (with no escaping at this stage)
|
3023 |
slist = text.split(sep) |
3024 |
# next, we revisit the elements and if any of them ended with an odd
|
3025 |
# number of backslashes, then we join it with the next
|
3026 |
rlist = [] |
3027 |
while slist:
|
3028 |
e1 = slist.pop(0)
|
3029 |
if e1.endswith("\\"): |
3030 |
num_b = len(e1) - len(e1.rstrip("\\")) |
3031 |
if num_b % 2 == 1: |
3032 |
e2 = slist.pop(0)
|
3033 |
# here the backslashes remain (all), and will be reduced in
|
3034 |
# the next step
|
3035 |
rlist.append(e1 + sep + e2) |
3036 |
continue
|
3037 |
rlist.append(e1) |
3038 |
# finally, replace backslash-something with something
|
3039 |
rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist] |
3040 |
return rlist
|
3041 |
|
3042 |
|
3043 |
def CommaJoin(names): |
3044 |
"""Nicely join a set of identifiers.
|
3045 |
|
3046 |
@param names: set, list or tuple
|
3047 |
@return: a string with the formatted results
|
3048 |
|
3049 |
"""
|
3050 |
return ", ".join([str(val) for val in names]) |
3051 |
|
3052 |
|
3053 |
def BytesToMebibyte(value): |
3054 |
"""Converts bytes to mebibytes.
|
3055 |
|
3056 |
@type value: int
|
3057 |
@param value: Value in bytes
|
3058 |
@rtype: int
|
3059 |
@return: Value in mebibytes
|
3060 |
|
3061 |
"""
|
3062 |
return int(round(value / (1024.0 * 1024.0), 0)) |
3063 |
|
3064 |
|
3065 |
def CalculateDirectorySize(path): |
3066 |
"""Calculates the size of a directory recursively.
|
3067 |
|
3068 |
@type path: string
|
3069 |
@param path: Path to directory
|
3070 |
@rtype: int
|
3071 |
@return: Size in mebibytes
|
3072 |
|
3073 |
"""
|
3074 |
size = 0
|
3075 |
|
3076 |
for (curpath, _, files) in os.walk(path): |
3077 |
for filename in files: |
3078 |
st = os.lstat(PathJoin(curpath, filename)) |
3079 |
size += st.st_size |
3080 |
|
3081 |
return BytesToMebibyte(size)
|
3082 |
|
3083 |
|
3084 |
def GetFilesystemStats(path): |
3085 |
"""Returns the total and free space on a filesystem.
|
3086 |
|
3087 |
@type path: string
|
3088 |
@param path: Path on filesystem to be examined
|
3089 |
@rtype: int
|
3090 |
@return: tuple of (Total space, Free space) in mebibytes
|
3091 |
|
3092 |
"""
|
3093 |
st = os.statvfs(path) |
3094 |
|
3095 |
fsize = BytesToMebibyte(st.f_bavail * st.f_frsize) |
3096 |
tsize = BytesToMebibyte(st.f_blocks * st.f_frsize) |
3097 |
return (tsize, fsize)
|
3098 |
|
3099 |
|
3100 |
def RunInSeparateProcess(fn, *args): |
3101 |
"""Runs a function in a separate process.
|
3102 |
|
3103 |
Note: Only boolean return values are supported.
|
3104 |
|
3105 |
@type fn: callable
|
3106 |
@param fn: Function to be called
|
3107 |
@rtype: bool
|
3108 |
@return: Function's result
|
3109 |
|
3110 |
"""
|
3111 |
pid = os.fork() |
3112 |
if pid == 0: |
3113 |
# Child process
|
3114 |
try:
|
3115 |
# In case the function uses temporary files
|
3116 |
ResetTempfileModule() |
3117 |
|
3118 |
# Call function
|
3119 |
result = int(bool(fn(*args))) |
3120 |
assert result in (0, 1) |
3121 |
except: # pylint: disable-msg=W0702 |
3122 |
logging.exception("Error while calling function in separate process")
|
3123 |
# 0 and 1 are reserved for the return value
|
3124 |
result = 33
|
3125 |
|
3126 |
os._exit(result) # pylint: disable-msg=W0212
|
3127 |
|
3128 |
# Parent process
|
3129 |
|
3130 |
# Avoid zombies and check exit code
|
3131 |
(_, status) = os.waitpid(pid, 0)
|
3132 |
|
3133 |
if os.WIFSIGNALED(status):
|
3134 |
exitcode = None
|
3135 |
signum = os.WTERMSIG(status) |
3136 |
else:
|
3137 |
exitcode = os.WEXITSTATUS(status) |
3138 |
signum = None
|
3139 |
|
3140 |
if not (exitcode in (0, 1) and signum is None): |
3141 |
raise errors.GenericError("Child program failed (code=%s, signal=%s)" % |
3142 |
(exitcode, signum)) |
3143 |
|
3144 |
return bool(exitcode) |
3145 |
|
3146 |
|
3147 |
def IgnoreProcessNotFound(fn, *args, **kwargs): |
3148 |
"""Ignores ESRCH when calling a process-related function.
|
3149 |
|
3150 |
ESRCH is raised when a process is not found.
|
3151 |
|
3152 |
@rtype: bool
|
3153 |
@return: Whether process was found
|
3154 |
|
3155 |
"""
|
3156 |
try:
|
3157 |
fn(*args, **kwargs) |
3158 |
except EnvironmentError, err: |
3159 |
# Ignore ESRCH
|
3160 |
if err.errno == errno.ESRCH:
|
3161 |
return False |
3162 |
raise
|
3163 |
|
3164 |
return True |
3165 |
|
3166 |
|
3167 |
def IgnoreSignals(fn, *args, **kwargs): |
3168 |
"""Tries to call a function ignoring failures due to EINTR.
|
3169 |
|
3170 |
"""
|
3171 |
try:
|
3172 |
return fn(*args, **kwargs)
|
3173 |
except EnvironmentError, err: |
3174 |
if err.errno == errno.EINTR:
|
3175 |
return None |
3176 |
else:
|
3177 |
raise
|
3178 |
except (select.error, socket.error), err:
|
3179 |
# In python 2.6 and above select.error is an IOError, so it's handled
|
3180 |
# above, in 2.5 and below it's not, and it's handled here.
|
3181 |
if err.args and err.args[0] == errno.EINTR: |
3182 |
return None |
3183 |
else:
|
3184 |
raise
|
3185 |
|
3186 |
|
3187 |
def LockFile(fd): |
3188 |
"""Locks a file using POSIX locks.
|
3189 |
|
3190 |
@type fd: int
|
3191 |
@param fd: the file descriptor we need to lock
|
3192 |
|
3193 |
"""
|
3194 |
try:
|
3195 |
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) |
3196 |
except IOError, err: |
3197 |
if err.errno == errno.EAGAIN:
|
3198 |
raise errors.LockError("File already locked") |
3199 |
raise
|
3200 |
|
3201 |
|
3202 |
def FormatTime(val): |
3203 |
"""Formats a time value.
|
3204 |
|
3205 |
@type val: float or None
|
3206 |
@param val: the timestamp as returned by time.time()
|
3207 |
@return: a string value or N/A if we don't have a valid timestamp
|
3208 |
|
3209 |
"""
|
3210 |
if val is None or not isinstance(val, (int, float)): |
3211 |
return "N/A" |
3212 |
# these two codes works on Linux, but they are not guaranteed on all
|
3213 |
# platforms
|
3214 |
return time.strftime("%F %T", time.localtime(val)) |
3215 |
|
3216 |
|
3217 |
def FormatSeconds(secs): |
3218 |
"""Formats seconds for easier reading.
|
3219 |
|
3220 |
@type secs: number
|
3221 |
@param secs: Number of seconds
|
3222 |
@rtype: string
|
3223 |
@return: Formatted seconds (e.g. "2d 9h 19m 49s")
|
3224 |
|
3225 |
"""
|
3226 |
parts = [] |
3227 |
|
3228 |
secs = round(secs, 0) |
3229 |
|
3230 |
if secs > 0: |
3231 |
# Negative values would be a bit tricky
|
3232 |
for unit, one in [("d", 24 * 60 * 60), ("h", 60 * 60), ("m", 60)]: |
3233 |
(complete, secs) = divmod(secs, one)
|
3234 |
if complete or parts: |
3235 |
parts.append("%d%s" % (complete, unit))
|
3236 |
|
3237 |
parts.append("%ds" % secs)
|
3238 |
|
3239 |
return " ".join(parts) |
3240 |
|
3241 |
|
3242 |
def ReadWatcherPauseFile(filename, now=None, remove_after=3600): |
3243 |
"""Reads the watcher pause file.
|
3244 |
|
3245 |
@type filename: string
|
3246 |
@param filename: Path to watcher pause file
|
3247 |
@type now: None, float or int
|
3248 |
@param now: Current time as Unix timestamp
|
3249 |
@type remove_after: int
|
3250 |
@param remove_after: Remove watcher pause file after specified amount of
|
3251 |
seconds past the pause end time
|
3252 |
|
3253 |
"""
|
3254 |
if now is None: |
3255 |
now = time.time() |
3256 |
|
3257 |
try:
|
3258 |
value = ReadFile(filename) |
3259 |
except IOError, err: |
3260 |
if err.errno != errno.ENOENT:
|
3261 |
raise
|
3262 |
value = None
|
3263 |
|
3264 |
if value is not None: |
3265 |
try:
|
3266 |
value = int(value)
|
3267 |
except ValueError: |
3268 |
logging.warning(("Watcher pause file (%s) contains invalid value,"
|
3269 |
" removing it"), filename)
|
3270 |
RemoveFile(filename) |
3271 |
value = None
|
3272 |
|
3273 |
if value is not None: |
3274 |
# Remove file if it's outdated
|
3275 |
if now > (value + remove_after):
|
3276 |
RemoveFile(filename) |
3277 |
value = None
|
3278 |
|
3279 |
elif now > value:
|
3280 |
value = None
|
3281 |
|
3282 |
return value
|
3283 |
|
3284 |
|
3285 |
class RetryTimeout(Exception): |
3286 |
"""Retry loop timed out.
|
3287 |
|
3288 |
Any arguments which was passed by the retried function to RetryAgain will be
|
3289 |
preserved in RetryTimeout, if it is raised. If such argument was an exception
|
3290 |
the RaiseInner helper method will reraise it.
|
3291 |
|
3292 |
"""
|
3293 |
def RaiseInner(self): |
3294 |
if self.args and isinstance(self.args[0], Exception): |
3295 |
raise self.args[0] |
3296 |
else:
|
3297 |
raise RetryTimeout(*self.args) |
3298 |
|
3299 |
|
3300 |
class RetryAgain(Exception): |
3301 |
"""Retry again.
|
3302 |
|
3303 |
Any arguments passed to RetryAgain will be preserved, if a timeout occurs, as
|
3304 |
arguments to RetryTimeout. If an exception is passed, the RaiseInner() method
|
3305 |
of the RetryTimeout() method can be used to reraise it.
|
3306 |
|
3307 |
"""
|
3308 |
|
3309 |
|
3310 |
class _RetryDelayCalculator(object): |
3311 |
"""Calculator for increasing delays.
|
3312 |
|
3313 |
"""
|
3314 |
__slots__ = [ |
3315 |
"_factor",
|
3316 |
"_limit",
|
3317 |
"_next",
|
3318 |
"_start",
|
3319 |
] |
3320 |
|
3321 |
def __init__(self, start, factor, limit): |
3322 |
"""Initializes this class.
|
3323 |
|
3324 |
@type start: float
|
3325 |
@param start: Initial delay
|
3326 |
@type factor: float
|
3327 |
@param factor: Factor for delay increase
|
3328 |
@type limit: float or None
|
3329 |
@param limit: Upper limit for delay or None for no limit
|
3330 |
|
3331 |
"""
|
3332 |
assert start > 0.0 |
3333 |
assert factor >= 1.0 |
3334 |
assert limit is None or limit >= 0.0 |
3335 |
|
3336 |
self._start = start
|
3337 |
self._factor = factor
|
3338 |
self._limit = limit
|
3339 |
|
3340 |
self._next = start
|
3341 |
|
3342 |
def __call__(self): |
3343 |
"""Returns current delay and calculates the next one.
|
3344 |
|
3345 |
"""
|
3346 |
current = self._next
|
3347 |
|
3348 |
# Update for next run
|
3349 |
if self._limit is None or self._next < self._limit: |
3350 |
self._next = min(self._limit, self._next * self._factor) |
3351 |
|
3352 |
return current
|
3353 |
|
3354 |
|
3355 |
#: Special delay to specify whole remaining timeout
|
3356 |
RETRY_REMAINING_TIME = object()
|
3357 |
|
3358 |
|
3359 |
def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep, |
3360 |
_time_fn=time.time): |
3361 |
"""Call a function repeatedly until it succeeds.
|
3362 |
|
3363 |
The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
|
3364 |
anymore. Between calls a delay, specified by C{delay}, is inserted. After a
|
3365 |
total of C{timeout} seconds, this function throws L{RetryTimeout}.
|
3366 |
|
3367 |
C{delay} can be one of the following:
|
3368 |
- callable returning the delay length as a float
|
3369 |
- Tuple of (start, factor, limit)
|
3370 |
- L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
|
3371 |
useful when overriding L{wait_fn} to wait for an external event)
|
3372 |
- A static delay as a number (int or float)
|
3373 |
|
3374 |
@type fn: callable
|
3375 |
@param fn: Function to be called
|
3376 |
@param delay: Either a callable (returning the delay), a tuple of (start,
|
3377 |
factor, limit) (see L{_RetryDelayCalculator}),
|
3378 |
L{RETRY_REMAINING_TIME} or a number (int or float)
|
3379 |
@type timeout: float
|
3380 |
@param timeout: Total timeout
|
3381 |
@type wait_fn: callable
|
3382 |
@param wait_fn: Waiting function
|
3383 |
@return: Return value of function
|
3384 |
|
3385 |
"""
|
3386 |
assert callable(fn) |
3387 |
assert callable(wait_fn) |
3388 |
assert callable(_time_fn) |
3389 |
|
3390 |
if args is None: |
3391 |
args = [] |
3392 |
|
3393 |
end_time = _time_fn() + timeout |
3394 |
|
3395 |
if callable(delay): |
3396 |
# External function to calculate delay
|
3397 |
calc_delay = delay |
3398 |
|
3399 |
elif isinstance(delay, (tuple, list)): |
3400 |
# Increasing delay with optional upper boundary
|
3401 |
(start, factor, limit) = delay |
3402 |
calc_delay = _RetryDelayCalculator(start, factor, limit) |
3403 |
|
3404 |
elif delay is RETRY_REMAINING_TIME: |
3405 |
# Always use the remaining time
|
3406 |
calc_delay = None
|
3407 |
|
3408 |
else:
|
3409 |
# Static delay
|
3410 |
calc_delay = lambda: delay
|
3411 |
|
3412 |
assert calc_delay is None or callable(calc_delay) |
3413 |
|
3414 |
while True: |
3415 |
retry_args = [] |
3416 |
try:
|
3417 |
# pylint: disable-msg=W0142
|
3418 |
return fn(*args)
|
3419 |
except RetryAgain, err:
|
3420 |
retry_args = err.args |
3421 |
except RetryTimeout:
|
3422 |
raise errors.ProgrammerError("Nested retry loop detected that didn't" |
3423 |
" handle RetryTimeout")
|
3424 |
|
3425 |
remaining_time = end_time - _time_fn() |
3426 |
|
3427 |
if remaining_time < 0.0: |
3428 |
# pylint: disable-msg=W0142
|
3429 |
raise RetryTimeout(*retry_args)
|
3430 |
|
3431 |
assert remaining_time >= 0.0 |
3432 |
|
3433 |
if calc_delay is None: |
3434 |
wait_fn(remaining_time) |
3435 |
else:
|
3436 |
current_delay = calc_delay() |
3437 |
if current_delay > 0.0: |
3438 |
wait_fn(current_delay) |
3439 |
|
3440 |
|
3441 |
def GetClosedTempfile(*args, **kwargs): |
3442 |
"""Creates a temporary file and returns its path.
|
3443 |
|
3444 |
"""
|
3445 |
(fd, path) = tempfile.mkstemp(*args, **kwargs) |
3446 |
_CloseFDNoErr(fd) |
3447 |
return path
|
3448 |
|
3449 |
|
3450 |
def GenerateSelfSignedX509Cert(common_name, validity): |
3451 |
"""Generates a self-signed X509 certificate.
|
3452 |
|
3453 |
@type common_name: string
|
3454 |
@param common_name: commonName value
|
3455 |
@type validity: int
|
3456 |
@param validity: Validity for certificate in seconds
|
3457 |
|
3458 |
"""
|
3459 |
# Create private and public key
|
3460 |
key = OpenSSL.crypto.PKey() |
3461 |
key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS) |
3462 |
|
3463 |
# Create self-signed certificate
|
3464 |
cert = OpenSSL.crypto.X509() |
3465 |
if common_name:
|
3466 |
cert.get_subject().CN = common_name |
3467 |
cert.set_serial_number(1)
|
3468 |
cert.gmtime_adj_notBefore(0)
|
3469 |
cert.gmtime_adj_notAfter(validity) |
3470 |
cert.set_issuer(cert.get_subject()) |
3471 |
cert.set_pubkey(key) |
3472 |
cert.sign(key, constants.X509_CERT_SIGN_DIGEST) |
3473 |
|
3474 |
key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key) |
3475 |
cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert) |
3476 |
|
3477 |
return (key_pem, cert_pem)
|
3478 |
|
3479 |
|
3480 |
def GenerateSelfSignedSslCert(filename, validity=(5 * 365)): |
3481 |
"""Legacy function to generate self-signed X509 certificate.
|
3482 |
|
3483 |
"""
|
3484 |
(key_pem, cert_pem) = GenerateSelfSignedX509Cert(None,
|
3485 |
validity * 24 * 60 * 60) |
3486 |
|
3487 |
WriteFile(filename, mode=0400, data=key_pem + cert_pem)
|
3488 |
|
3489 |
|
3490 |
class FileLock(object): |
3491 |
"""Utility class for file locks.
|
3492 |
|
3493 |
"""
|
3494 |
def __init__(self, fd, filename): |
3495 |
"""Constructor for FileLock.
|
3496 |
|
3497 |
@type fd: file
|
3498 |
@param fd: File object
|
3499 |
@type filename: str
|
3500 |
@param filename: Path of the file opened at I{fd}
|
3501 |
|
3502 |
"""
|
3503 |
self.fd = fd
|
3504 |
self.filename = filename
|
3505 |
|
3506 |
@classmethod
|
3507 |
def Open(cls, filename): |
3508 |
"""Creates and opens a file to be used as a file-based lock.
|
3509 |
|
3510 |
@type filename: string
|
3511 |
@param filename: path to the file to be locked
|
3512 |
|
3513 |
"""
|
3514 |
# Using "os.open" is necessary to allow both opening existing file
|
3515 |
# read/write and creating if not existing. Vanilla "open" will truncate an
|
3516 |
# existing file -or- allow creating if not existing.
|
3517 |
return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"), |
3518 |
filename) |
3519 |
|
3520 |
def __del__(self): |
3521 |
self.Close()
|
3522 |
|
3523 |
def Close(self): |
3524 |
"""Close the file and release the lock.
|
3525 |
|
3526 |
"""
|
3527 |
if hasattr(self, "fd") and self.fd: |
3528 |
self.fd.close()
|
3529 |
self.fd = None |
3530 |
|
3531 |
def _flock(self, flag, blocking, timeout, errmsg): |
3532 |
"""Wrapper for fcntl.flock.
|
3533 |
|
3534 |
@type flag: int
|
3535 |
@param flag: operation flag
|
3536 |
@type blocking: bool
|
3537 |
@param blocking: whether the operation should be done in blocking mode.
|
3538 |
@type timeout: None or float
|
3539 |
@param timeout: for how long the operation should be retried (implies
|
3540 |
non-blocking mode).
|
3541 |
@type errmsg: string
|
3542 |
@param errmsg: error message in case operation fails.
|
3543 |
|
3544 |
"""
|
3545 |
assert self.fd, "Lock was closed" |
3546 |
assert timeout is None or timeout >= 0, \ |
3547 |
"If specified, timeout must be positive"
|
3548 |
assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set" |
3549 |
|
3550 |
# When a timeout is used, LOCK_NB must always be set
|
3551 |
if not (timeout is None and blocking): |
3552 |
flag |= fcntl.LOCK_NB |
3553 |
|
3554 |
if timeout is None: |
3555 |
self._Lock(self.fd, flag, timeout) |
3556 |
else:
|
3557 |
try:
|
3558 |
Retry(self._Lock, (0.1, 1.2, 1.0), timeout, |
3559 |
args=(self.fd, flag, timeout))
|
3560 |
except RetryTimeout:
|
3561 |
raise errors.LockError(errmsg)
|
3562 |
|
3563 |
@staticmethod
|
3564 |
def _Lock(fd, flag, timeout): |
3565 |
try:
|
3566 |
fcntl.flock(fd, flag) |
3567 |
except IOError, err: |
3568 |
if timeout is not None and err.errno == errno.EAGAIN: |
3569 |
raise RetryAgain()
|
3570 |
|
3571 |
logging.exception("fcntl.flock failed")
|
3572 |
raise
|
3573 |
|
3574 |
def Exclusive(self, blocking=False, timeout=None): |
3575 |
"""Locks the file in exclusive mode.
|
3576 |
|
3577 |
@type blocking: boolean
|
3578 |
@param blocking: whether to block and wait until we
|
3579 |
can lock the file or return immediately
|
3580 |
@type timeout: int or None
|
3581 |
@param timeout: if not None, the duration to wait for the lock
|
3582 |
(in blocking mode)
|
3583 |
|
3584 |
"""
|
3585 |
self._flock(fcntl.LOCK_EX, blocking, timeout,
|
3586 |
"Failed to lock %s in exclusive mode" % self.filename) |
3587 |
|
3588 |
def Shared(self, blocking=False, timeout=None): |
3589 |
"""Locks the file in shared mode.
|
3590 |
|
3591 |
@type blocking: boolean
|
3592 |
@param blocking: whether to block and wait until we
|
3593 |
can lock the file or return immediately
|
3594 |
@type timeout: int or None
|
3595 |
@param timeout: if not None, the duration to wait for the lock
|
3596 |
(in blocking mode)
|
3597 |
|
3598 |
"""
|
3599 |
self._flock(fcntl.LOCK_SH, blocking, timeout,
|
3600 |
"Failed to lock %s in shared mode" % self.filename) |
3601 |
|
3602 |
def Unlock(self, blocking=True, timeout=None): |
3603 |
"""Unlocks the file.
|
3604 |
|
3605 |
According to C{flock(2)}, unlocking can also be a nonblocking
|
3606 |
operation::
|
3607 |
|
3608 |
To make a non-blocking request, include LOCK_NB with any of the above
|
3609 |
operations.
|
3610 |
|
3611 |
@type blocking: boolean
|
3612 |
@param blocking: whether to block and wait until we
|
3613 |
can lock the file or return immediately
|
3614 |
@type timeout: int or None
|
3615 |
@param timeout: if not None, the duration to wait for the lock
|
3616 |
(in blocking mode)
|
3617 |
|
3618 |
"""
|
3619 |
self._flock(fcntl.LOCK_UN, blocking, timeout,
|
3620 |
"Failed to unlock %s" % self.filename) |
3621 |
|
3622 |
|
3623 |
class LineSplitter: |
3624 |
"""Splits data chunks into lines separated by newline.
|
3625 |
|
3626 |
Instances provide a file-like interface.
|
3627 |
|
3628 |
"""
|
3629 |
def __init__(self, line_fn, *args): |
3630 |
"""Initializes this class.
|
3631 |
|
3632 |
@type line_fn: callable
|
3633 |
@param line_fn: Function called for each line, first parameter is line
|
3634 |
@param args: Extra arguments for L{line_fn}
|
3635 |
|
3636 |
"""
|
3637 |
assert callable(line_fn) |
3638 |
|
3639 |
if args:
|
3640 |
# Python 2.4 doesn't have functools.partial yet
|
3641 |
self._line_fn = \
|
3642 |
lambda line: line_fn(line, *args) # pylint: disable-msg=W0142 |
3643 |
else:
|
3644 |
self._line_fn = line_fn
|
3645 |
|
3646 |
self._lines = collections.deque()
|
3647 |
self._buffer = "" |
3648 |
|
3649 |
def write(self, data): |
3650 |
parts = (self._buffer + data).split("\n") |
3651 |
self._buffer = parts.pop()
|
3652 |
self._lines.extend(parts)
|
3653 |
|
3654 |
def flush(self): |
3655 |
while self._lines: |
3656 |
self._line_fn(self._lines.popleft().rstrip("\r\n")) |
3657 |
|
3658 |
def close(self): |
3659 |
self.flush()
|
3660 |
if self._buffer: |
3661 |
self._line_fn(self._buffer) |
3662 |
|
3663 |
|
3664 |
def SignalHandled(signums): |
3665 |
"""Signal Handled decoration.
|
3666 |
|
3667 |
This special decorator installs a signal handler and then calls the target
|
3668 |
function. The function must accept a 'signal_handlers' keyword argument,
|
3669 |
which will contain a dict indexed by signal number, with SignalHandler
|
3670 |
objects as values.
|
3671 |
|
3672 |
The decorator can be safely stacked with iself, to handle multiple signals
|
3673 |
with different handlers.
|
3674 |
|
3675 |
@type signums: list
|
3676 |
@param signums: signals to intercept
|
3677 |
|
3678 |
"""
|
3679 |
def wrap(fn): |
3680 |
def sig_function(*args, **kwargs): |
3681 |
assert 'signal_handlers' not in kwargs or \ |
3682 |
kwargs['signal_handlers'] is None or \ |
3683 |
isinstance(kwargs['signal_handlers'], dict), \ |
3684 |
"Wrong signal_handlers parameter in original function call"
|
3685 |
if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None: |
3686 |
signal_handlers = kwargs['signal_handlers']
|
3687 |
else:
|
3688 |
signal_handlers = {} |
3689 |
kwargs['signal_handlers'] = signal_handlers
|
3690 |
sighandler = SignalHandler(signums) |
3691 |
try:
|
3692 |
for sig in signums: |
3693 |
signal_handlers[sig] = sighandler |
3694 |
return fn(*args, **kwargs)
|
3695 |
finally:
|
3696 |
sighandler.Reset() |
3697 |
return sig_function
|
3698 |
return wrap
|
3699 |
|
3700 |
|
3701 |
class SignalWakeupFd(object): |
3702 |
try:
|
3703 |
# This is only supported in Python 2.5 and above (some distributions
|
3704 |
# backported it to Python 2.4)
|
3705 |
_set_wakeup_fd_fn = signal.set_wakeup_fd |
3706 |
except AttributeError: |
3707 |
# Not supported
|
3708 |
def _SetWakeupFd(self, _): # pylint: disable-msg=R0201 |
3709 |
return -1 |
3710 |
else:
|
3711 |
def _SetWakeupFd(self, fd): |
3712 |
return self._set_wakeup_fd_fn(fd) |
3713 |
|
3714 |
def __init__(self): |
3715 |
"""Initializes this class.
|
3716 |
|
3717 |
"""
|
3718 |
(read_fd, write_fd) = os.pipe() |
3719 |
|
3720 |
# Once these succeeded, the file descriptors will be closed automatically.
|
3721 |
# Buffer size 0 is important, otherwise .read() with a specified length
|
3722 |
# might buffer data and the file descriptors won't be marked readable.
|
3723 |
self._read_fh = os.fdopen(read_fd, "r", 0) |
3724 |
self._write_fh = os.fdopen(write_fd, "w", 0) |
3725 |
|
3726 |
self._previous = self._SetWakeupFd(self._write_fh.fileno()) |
3727 |
|
3728 |
# Utility functions
|
3729 |
self.fileno = self._read_fh.fileno |
3730 |
self.read = self._read_fh.read |
3731 |
|
3732 |
def Reset(self): |
3733 |
"""Restores the previous wakeup file descriptor.
|
3734 |
|
3735 |
"""
|
3736 |
if hasattr(self, "_previous") and self._previous is not None: |
3737 |
self._SetWakeupFd(self._previous) |
3738 |
self._previous = None |
3739 |
|
3740 |
def Notify(self): |
3741 |
"""Notifies the wakeup file descriptor.
|
3742 |
|
3743 |
"""
|
3744 |
self._write_fh.write("\0") |
3745 |
|
3746 |
def __del__(self): |
3747 |
"""Called before object deletion.
|
3748 |
|
3749 |
"""
|
3750 |
self.Reset()
|
3751 |
|
3752 |
|
3753 |
class SignalHandler(object): |
3754 |
"""Generic signal handler class.
|
3755 |
|
3756 |
It automatically restores the original handler when deconstructed or
|
3757 |
when L{Reset} is called. You can either pass your own handler
|
3758 |
function in or query the L{called} attribute to detect whether the
|
3759 |
signal was sent.
|
3760 |
|
3761 |
@type signum: list
|
3762 |
@ivar signum: the signals we handle
|
3763 |
@type called: boolean
|
3764 |
@ivar called: tracks whether any of the signals have been raised
|
3765 |
|
3766 |
"""
|
3767 |
def __init__(self, signum, handler_fn=None, wakeup=None): |
3768 |
"""Constructs a new SignalHandler instance.
|
3769 |
|
3770 |
@type signum: int or list of ints
|
3771 |
@param signum: Single signal number or set of signal numbers
|
3772 |
@type handler_fn: callable
|
3773 |
@param handler_fn: Signal handling function
|
3774 |
|
3775 |
"""
|
3776 |
assert handler_fn is None or callable(handler_fn) |
3777 |
|
3778 |
self.signum = set(signum) |
3779 |
self.called = False |
3780 |
|
3781 |
self._handler_fn = handler_fn
|
3782 |
self._wakeup = wakeup
|
3783 |
|
3784 |
self._previous = {}
|
3785 |
try:
|
3786 |
for signum in self.signum: |
3787 |
# Setup handler
|
3788 |
prev_handler = signal.signal(signum, self._HandleSignal)
|
3789 |
try:
|
3790 |
self._previous[signum] = prev_handler
|
3791 |
except:
|
3792 |
# Restore previous handler
|
3793 |
signal.signal(signum, prev_handler) |
3794 |
raise
|
3795 |
except:
|
3796 |
# Reset all handlers
|
3797 |
self.Reset()
|
3798 |
# Here we have a race condition: a handler may have already been called,
|
3799 |
# but there's not much we can do about it at this point.
|
3800 |
raise
|
3801 |
|
3802 |
def __del__(self): |
3803 |
self.Reset()
|
3804 |
|
3805 |
def Reset(self): |
3806 |
"""Restore previous handler.
|
3807 |
|
3808 |
This will reset all the signals to their previous handlers.
|
3809 |
|
3810 |
"""
|
3811 |
for signum, prev_handler in self._previous.items(): |
3812 |
signal.signal(signum, prev_handler) |
3813 |
# If successful, remove from dict
|
3814 |
del self._previous[signum] |
3815 |
|
3816 |
def Clear(self): |
3817 |
"""Unsets the L{called} flag.
|
3818 |
|
3819 |
This function can be used in case a signal may arrive several times.
|
3820 |
|
3821 |
"""
|
3822 |
self.called = False |
3823 |
|
3824 |
def _HandleSignal(self, signum, frame): |
3825 |
"""Actual signal handling function.
|
3826 |
|
3827 |
"""
|
3828 |
# This is not nice and not absolutely atomic, but it appears to be the only
|
3829 |
# solution in Python -- there are no atomic types.
|
3830 |
self.called = True |
3831 |
|
3832 |
if self._wakeup: |
3833 |
# Notify whoever is interested in signals
|
3834 |
self._wakeup.Notify()
|
3835 |
|
3836 |
if self._handler_fn: |
3837 |
self._handler_fn(signum, frame)
|
3838 |
|
3839 |
|
3840 |
class FieldSet(object): |
3841 |
"""A simple field set.
|
3842 |
|
3843 |
Among the features are:
|
3844 |
- checking if a string is among a list of static string or regex objects
|
3845 |
- checking if a whole list of string matches
|
3846 |
- returning the matching groups from a regex match
|
3847 |
|
3848 |
Internally, all fields are held as regular expression objects.
|
3849 |
|
3850 |
"""
|
3851 |
def __init__(self, *items): |
3852 |
self.items = [re.compile("^%s$" % value) for value in items] |
3853 |
|
3854 |
def Extend(self, other_set): |
3855 |
"""Extend the field set with the items from another one"""
|
3856 |
self.items.extend(other_set.items)
|
3857 |
|
3858 |
def Matches(self, field): |
3859 |
"""Checks if a field matches the current set
|
3860 |
|
3861 |
@type field: str
|
3862 |
@param field: the string to match
|
3863 |
@return: either None or a regular expression match object
|
3864 |
|
3865 |
"""
|
3866 |
for m in itertools.ifilter(None, (val.match(field) for val in self.items)): |
3867 |
return m
|
3868 |
return None |
3869 |
|
3870 |
def NonMatching(self, items): |
3871 |
"""Returns the list of fields not matching the current set
|
3872 |
|
3873 |
@type items: list
|
3874 |
@param items: the list of fields to check
|
3875 |
@rtype: list
|
3876 |
@return: list of non-matching fields
|
3877 |
|
3878 |
"""
|
3879 |
return [val for val in items if not self.Matches(val)] |