Statistics
| Branch: | Tag: | Revision:

root / lib / utils.py @ 1948e5fe

History | View | Annotate | Download (85 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti utility module.
23

24
This module holds functions that can be used in both daemons (all) and
25
the command line scripts.
26

27
"""
28

    
29

    
30
import os
31
import time
32
import subprocess
33
import re
34
import socket
35
import tempfile
36
import shutil
37
import errno
38
import pwd
39
import itertools
40
import select
41
import fcntl
42
import resource
43
import logging
44
import logging.handlers
45
import signal
46
import datetime
47
import calendar
48
import collections
49
import struct
50
import IN
51

    
52
from cStringIO import StringIO
53

    
54
try:
55
  from hashlib import sha1
56
except ImportError:
57
  import sha
58
  sha1 = sha.new
59

    
60
try:
61
  import ctypes
62
except ImportError:
63
  ctypes = None
64

    
65
from ganeti import errors
66
from ganeti import constants
67

    
68

    
69
_locksheld = []
70
_re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
71

    
72
debug_locks = False
73

    
74
#: when set to True, L{RunCmd} is disabled
75
no_fork = False
76

    
77
_RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
78

    
79
# Structure definition for getsockopt(SOL_SOCKET, SO_PEERCRED, ...):
80
# struct ucred { pid_t pid; uid_t uid; gid_t gid; };
81
#
82
# The GNU C Library defines gid_t and uid_t to be "unsigned int" and
83
# pid_t to "int".
84
#
85
# IEEE Std 1003.1-2008:
86
# "nlink_t, uid_t, gid_t, and id_t shall be integer types"
87
# "blksize_t, pid_t, and ssize_t shall be signed integer types"
88
_STRUCT_UCRED = "iII"
89
_STRUCT_UCRED_SIZE = struct.calcsize(_STRUCT_UCRED)
90

    
91
# Flags for mlockall() (from bits/mman.h)
92
_MCL_CURRENT = 1
93
_MCL_FUTURE = 2
94

    
95

    
96
class RunResult(object):
97
  """Holds the result of running external programs.
98

99
  @type exit_code: int
100
  @ivar exit_code: the exit code of the program, or None (if the program
101
      didn't exit())
102
  @type signal: int or None
103
  @ivar signal: the signal that caused the program to finish, or None
104
      (if the program wasn't terminated by a signal)
105
  @type stdout: str
106
  @ivar stdout: the standard output of the program
107
  @type stderr: str
108
  @ivar stderr: the standard error of the program
109
  @type failed: boolean
110
  @ivar failed: True in case the program was
111
      terminated by a signal or exited with a non-zero exit code
112
  @ivar fail_reason: a string detailing the termination reason
113

114
  """
115
  __slots__ = ["exit_code", "signal", "stdout", "stderr",
116
               "failed", "fail_reason", "cmd"]
117

    
118

    
119
  def __init__(self, exit_code, signal_, stdout, stderr, cmd):
120
    self.cmd = cmd
121
    self.exit_code = exit_code
122
    self.signal = signal_
123
    self.stdout = stdout
124
    self.stderr = stderr
125
    self.failed = (signal_ is not None or exit_code != 0)
126

    
127
    if self.signal is not None:
128
      self.fail_reason = "terminated by signal %s" % self.signal
129
    elif self.exit_code is not None:
130
      self.fail_reason = "exited with exit code %s" % self.exit_code
131
    else:
132
      self.fail_reason = "unable to determine termination reason"
133

    
134
    if self.failed:
135
      logging.debug("Command '%s' failed (%s); output: %s",
136
                    self.cmd, self.fail_reason, self.output)
137

    
138
  def _GetOutput(self):
139
    """Returns the combined stdout and stderr for easier usage.
140

141
    """
142
    return self.stdout + self.stderr
143

    
144
  output = property(_GetOutput, None, None, "Return full output")
145

    
146

    
147
def RunCmd(cmd, env=None, output=None, cwd='/', reset_env=False):
148
  """Execute a (shell) command.
149

150
  The command should not read from its standard input, as it will be
151
  closed.
152

153
  @type cmd: string or list
154
  @param cmd: Command to run
155
  @type env: dict
156
  @param env: Additional environment
157
  @type output: str
158
  @param output: if desired, the output of the command can be
159
      saved in a file instead of the RunResult instance; this
160
      parameter denotes the file name (if not None)
161
  @type cwd: string
162
  @param cwd: if specified, will be used as the working
163
      directory for the command; the default will be /
164
  @type reset_env: boolean
165
  @param reset_env: whether to reset or keep the default os environment
166
  @rtype: L{RunResult}
167
  @return: RunResult instance
168
  @raise errors.ProgrammerError: if we call this when forks are disabled
169

170
  """
171
  if no_fork:
172
    raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
173

    
174
  if isinstance(cmd, list):
175
    cmd = [str(val) for val in cmd]
176
    strcmd = " ".join(cmd)
177
    shell = False
178
  else:
179
    strcmd = cmd
180
    shell = True
181
  logging.debug("RunCmd '%s'", strcmd)
182

    
183
  if not reset_env:
184
    cmd_env = os.environ.copy()
185
    cmd_env["LC_ALL"] = "C"
186
  else:
187
    cmd_env = {}
188

    
189
  if env is not None:
190
    cmd_env.update(env)
191

    
192
  try:
193
    if output is None:
194
      out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd)
195
    else:
196
      status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
197
      out = err = ""
198
  except OSError, err:
199
    if err.errno == errno.ENOENT:
200
      raise errors.OpExecError("Can't execute '%s': not found (%s)" %
201
                               (strcmd, err))
202
    else:
203
      raise
204

    
205
  if status >= 0:
206
    exitcode = status
207
    signal_ = None
208
  else:
209
    exitcode = None
210
    signal_ = -status
211

    
212
  return RunResult(exitcode, signal_, out, err, strcmd)
213

    
214

    
215
def _RunCmdPipe(cmd, env, via_shell, cwd):
216
  """Run a command and return its output.
217

218
  @type  cmd: string or list
219
  @param cmd: Command to run
220
  @type env: dict
221
  @param env: The environment to use
222
  @type via_shell: bool
223
  @param via_shell: if we should run via the shell
224
  @type cwd: string
225
  @param cwd: the working directory for the program
226
  @rtype: tuple
227
  @return: (out, err, status)
228

229
  """
230
  poller = select.poll()
231
  child = subprocess.Popen(cmd, shell=via_shell,
232
                           stderr=subprocess.PIPE,
233
                           stdout=subprocess.PIPE,
234
                           stdin=subprocess.PIPE,
235
                           close_fds=True, env=env,
236
                           cwd=cwd)
237

    
238
  child.stdin.close()
239
  poller.register(child.stdout, select.POLLIN)
240
  poller.register(child.stderr, select.POLLIN)
241
  out = StringIO()
242
  err = StringIO()
243
  fdmap = {
244
    child.stdout.fileno(): (out, child.stdout),
245
    child.stderr.fileno(): (err, child.stderr),
246
    }
247
  for fd in fdmap:
248
    status = fcntl.fcntl(fd, fcntl.F_GETFL)
249
    fcntl.fcntl(fd, fcntl.F_SETFL, status | os.O_NONBLOCK)
250

    
251
  while fdmap:
252
    try:
253
      pollresult = poller.poll()
254
    except EnvironmentError, eerr:
255
      if eerr.errno == errno.EINTR:
256
        continue
257
      raise
258
    except select.error, serr:
259
      if serr[0] == errno.EINTR:
260
        continue
261
      raise
262

    
263
    for fd, event in pollresult:
264
      if event & select.POLLIN or event & select.POLLPRI:
265
        data = fdmap[fd][1].read()
266
        # no data from read signifies EOF (the same as POLLHUP)
267
        if not data:
268
          poller.unregister(fd)
269
          del fdmap[fd]
270
          continue
271
        fdmap[fd][0].write(data)
272
      if (event & select.POLLNVAL or event & select.POLLHUP or
273
          event & select.POLLERR):
274
        poller.unregister(fd)
275
        del fdmap[fd]
276

    
277
  out = out.getvalue()
278
  err = err.getvalue()
279

    
280
  status = child.wait()
281
  return out, err, status
282

    
283

    
284
def _RunCmdFile(cmd, env, via_shell, output, cwd):
285
  """Run a command and save its output to a file.
286

287
  @type  cmd: string or list
288
  @param cmd: Command to run
289
  @type env: dict
290
  @param env: The environment to use
291
  @type via_shell: bool
292
  @param via_shell: if we should run via the shell
293
  @type output: str
294
  @param output: the filename in which to save the output
295
  @type cwd: string
296
  @param cwd: the working directory for the program
297
  @rtype: int
298
  @return: the exit status
299

300
  """
301
  fh = open(output, "a")
302
  try:
303
    child = subprocess.Popen(cmd, shell=via_shell,
304
                             stderr=subprocess.STDOUT,
305
                             stdout=fh,
306
                             stdin=subprocess.PIPE,
307
                             close_fds=True, env=env,
308
                             cwd=cwd)
309

    
310
    child.stdin.close()
311
    status = child.wait()
312
  finally:
313
    fh.close()
314
  return status
315

    
316

    
317
def RunParts(dir_name, env=None, reset_env=False):
318
  """Run Scripts or programs in a directory
319

320
  @type dir_name: string
321
  @param dir_name: absolute path to a directory
322
  @type env: dict
323
  @param env: The environment to use
324
  @type reset_env: boolean
325
  @param reset_env: whether to reset or keep the default os environment
326
  @rtype: list of tuples
327
  @return: list of (name, (one of RUNDIR_STATUS), RunResult)
328

329
  """
330
  rr = []
331

    
332
  try:
333
    dir_contents = ListVisibleFiles(dir_name)
334
  except OSError, err:
335
    logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
336
    return rr
337

    
338
  for relname in sorted(dir_contents):
339
    fname = PathJoin(dir_name, relname)
340
    if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
341
            constants.EXT_PLUGIN_MASK.match(relname) is not None):
342
      rr.append((relname, constants.RUNPARTS_SKIP, None))
343
    else:
344
      try:
345
        result = RunCmd([fname], env=env, reset_env=reset_env)
346
      except Exception, err: # pylint: disable-msg=W0703
347
        rr.append((relname, constants.RUNPARTS_ERR, str(err)))
348
      else:
349
        rr.append((relname, constants.RUNPARTS_RUN, result))
350

    
351
  return rr
352

    
353

    
354
def GetSocketCredentials(sock):
355
  """Returns the credentials of the foreign process connected to a socket.
356

357
  @param sock: Unix socket
358
  @rtype: tuple; (number, number, number)
359
  @return: The PID, UID and GID of the connected foreign process.
360

361
  """
362
  peercred = sock.getsockopt(socket.SOL_SOCKET, IN.SO_PEERCRED,
363
                             _STRUCT_UCRED_SIZE)
364
  return struct.unpack(_STRUCT_UCRED, peercred)
365

    
366

    
367
def RemoveFile(filename):
368
  """Remove a file ignoring some errors.
369

370
  Remove a file, ignoring non-existing ones or directories. Other
371
  errors are passed.
372

373
  @type filename: str
374
  @param filename: the file to be removed
375

376
  """
377
  try:
378
    os.unlink(filename)
379
  except OSError, err:
380
    if err.errno not in (errno.ENOENT, errno.EISDIR):
381
      raise
382

    
383

    
384
def RemoveDir(dirname):
385
  """Remove an empty directory.
386

387
  Remove a directory, ignoring non-existing ones.
388
  Other errors are passed. This includes the case,
389
  where the directory is not empty, so it can't be removed.
390

391
  @type dirname: str
392
  @param dirname: the empty directory to be removed
393

394
  """
395
  try:
396
    os.rmdir(dirname)
397
  except OSError, err:
398
    if err.errno != errno.ENOENT:
399
      raise
400

    
401

    
402
def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
403
  """Renames a file.
404

405
  @type old: string
406
  @param old: Original path
407
  @type new: string
408
  @param new: New path
409
  @type mkdir: bool
410
  @param mkdir: Whether to create target directory if it doesn't exist
411
  @type mkdir_mode: int
412
  @param mkdir_mode: Mode for newly created directories
413

414
  """
415
  try:
416
    return os.rename(old, new)
417
  except OSError, err:
418
    # In at least one use case of this function, the job queue, directory
419
    # creation is very rare. Checking for the directory before renaming is not
420
    # as efficient.
421
    if mkdir and err.errno == errno.ENOENT:
422
      # Create directory and try again
423
      Makedirs(os.path.dirname(new), mode=mkdir_mode)
424

    
425
      return os.rename(old, new)
426

    
427
    raise
428

    
429

    
430
def Makedirs(path, mode=0750):
431
  """Super-mkdir; create a leaf directory and all intermediate ones.
432

433
  This is a wrapper around C{os.makedirs} adding error handling not implemented
434
  before Python 2.5.
435

436
  """
437
  try:
438
    os.makedirs(path, mode)
439
  except OSError, err:
440
    # Ignore EEXIST. This is only handled in os.makedirs as included in
441
    # Python 2.5 and above.
442
    if err.errno != errno.EEXIST or not os.path.exists(path):
443
      raise
444

    
445

    
446
def ResetTempfileModule():
447
  """Resets the random name generator of the tempfile module.
448

449
  This function should be called after C{os.fork} in the child process to
450
  ensure it creates a newly seeded random generator. Otherwise it would
451
  generate the same random parts as the parent process. If several processes
452
  race for the creation of a temporary file, this could lead to one not getting
453
  a temporary name.
454

455
  """
456
  # pylint: disable-msg=W0212
457
  if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
458
    tempfile._once_lock.acquire()
459
    try:
460
      # Reset random name generator
461
      tempfile._name_sequence = None
462
    finally:
463
      tempfile._once_lock.release()
464
  else:
465
    logging.critical("The tempfile module misses at least one of the"
466
                     " '_once_lock' and '_name_sequence' attributes")
467

    
468

    
469
def _FingerprintFile(filename):
470
  """Compute the fingerprint of a file.
471

472
  If the file does not exist, a None will be returned
473
  instead.
474

475
  @type filename: str
476
  @param filename: the filename to checksum
477
  @rtype: str
478
  @return: the hex digest of the sha checksum of the contents
479
      of the file
480

481
  """
482
  if not (os.path.exists(filename) and os.path.isfile(filename)):
483
    return None
484

    
485
  f = open(filename)
486

    
487
  fp = sha1()
488
  while True:
489
    data = f.read(4096)
490
    if not data:
491
      break
492

    
493
    fp.update(data)
494

    
495
  return fp.hexdigest()
496

    
497

    
498
def FingerprintFiles(files):
499
  """Compute fingerprints for a list of files.
500

501
  @type files: list
502
  @param files: the list of filename to fingerprint
503
  @rtype: dict
504
  @return: a dictionary filename: fingerprint, holding only
505
      existing files
506

507
  """
508
  ret = {}
509

    
510
  for filename in files:
511
    cksum = _FingerprintFile(filename)
512
    if cksum:
513
      ret[filename] = cksum
514

    
515
  return ret
516

    
517

    
518
def ForceDictType(target, key_types, allowed_values=None):
519
  """Force the values of a dict to have certain types.
520

521
  @type target: dict
522
  @param target: the dict to update
523
  @type key_types: dict
524
  @param key_types: dict mapping target dict keys to types
525
                    in constants.ENFORCEABLE_TYPES
526
  @type allowed_values: list
527
  @keyword allowed_values: list of specially allowed values
528

529
  """
530
  if allowed_values is None:
531
    allowed_values = []
532

    
533
  if not isinstance(target, dict):
534
    msg = "Expected dictionary, got '%s'" % target
535
    raise errors.TypeEnforcementError(msg)
536

    
537
  for key in target:
538
    if key not in key_types:
539
      msg = "Unknown key '%s'" % key
540
      raise errors.TypeEnforcementError(msg)
541

    
542
    if target[key] in allowed_values:
543
      continue
544

    
545
    ktype = key_types[key]
546
    if ktype not in constants.ENFORCEABLE_TYPES:
547
      msg = "'%s' has non-enforceable type %s" % (key, ktype)
548
      raise errors.ProgrammerError(msg)
549

    
550
    if ktype == constants.VTYPE_STRING:
551
      if not isinstance(target[key], basestring):
552
        if isinstance(target[key], bool) and not target[key]:
553
          target[key] = ''
554
        else:
555
          msg = "'%s' (value %s) is not a valid string" % (key, target[key])
556
          raise errors.TypeEnforcementError(msg)
557
    elif ktype == constants.VTYPE_BOOL:
558
      if isinstance(target[key], basestring) and target[key]:
559
        if target[key].lower() == constants.VALUE_FALSE:
560
          target[key] = False
561
        elif target[key].lower() == constants.VALUE_TRUE:
562
          target[key] = True
563
        else:
564
          msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
565
          raise errors.TypeEnforcementError(msg)
566
      elif target[key]:
567
        target[key] = True
568
      else:
569
        target[key] = False
570
    elif ktype == constants.VTYPE_SIZE:
571
      try:
572
        target[key] = ParseUnit(target[key])
573
      except errors.UnitParseError, err:
574
        msg = "'%s' (value %s) is not a valid size. error: %s" % \
575
              (key, target[key], err)
576
        raise errors.TypeEnforcementError(msg)
577
    elif ktype == constants.VTYPE_INT:
578
      try:
579
        target[key] = int(target[key])
580
      except (ValueError, TypeError):
581
        msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
582
        raise errors.TypeEnforcementError(msg)
583

    
584

    
585
def IsProcessAlive(pid):
586
  """Check if a given pid exists on the system.
587

588
  @note: zombie status is not handled, so zombie processes
589
      will be returned as alive
590
  @type pid: int
591
  @param pid: the process ID to check
592
  @rtype: boolean
593
  @return: True if the process exists
594

595
  """
596
  def _TryStat(name):
597
    try:
598
      os.stat(name)
599
      return True
600
    except EnvironmentError, err:
601
      if err.errno in (errno.ENOENT, errno.ENOTDIR):
602
        return False
603
      elif err.errno == errno.EINVAL:
604
        raise RetryAgain(err)
605
      raise
606

    
607
  assert isinstance(pid, int), "pid must be an integer"
608
  if pid <= 0:
609
    return False
610

    
611
  proc_entry = "/proc/%d/status" % pid
612
  # /proc in a multiprocessor environment can have strange behaviors.
613
  # Retry the os.stat a few times until we get a good result.
614
  try:
615
    return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5, args=[proc_entry])
616
  except RetryTimeout, err:
617
    err.RaiseInner()
618

    
619

    
620
def ReadPidFile(pidfile):
621
  """Read a pid from a file.
622

623
  @type  pidfile: string
624
  @param pidfile: path to the file containing the pid
625
  @rtype: int
626
  @return: The process id, if the file exists and contains a valid PID,
627
           otherwise 0
628

629
  """
630
  try:
631
    raw_data = ReadOneLineFile(pidfile)
632
  except EnvironmentError, err:
633
    if err.errno != errno.ENOENT:
634
      logging.exception("Can't read pid file")
635
    return 0
636

    
637
  try:
638
    pid = int(raw_data)
639
  except (TypeError, ValueError), err:
640
    logging.info("Can't parse pid file contents", exc_info=True)
641
    return 0
642

    
643
  return pid
644

    
645

    
646
def MatchNameComponent(key, name_list, case_sensitive=True):
647
  """Try to match a name against a list.
648

649
  This function will try to match a name like test1 against a list
650
  like C{['test1.example.com', 'test2.example.com', ...]}. Against
651
  this list, I{'test1'} as well as I{'test1.example'} will match, but
652
  not I{'test1.ex'}. A multiple match will be considered as no match
653
  at all (e.g. I{'test1'} against C{['test1.example.com',
654
  'test1.example.org']}), except when the key fully matches an entry
655
  (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
656

657
  @type key: str
658
  @param key: the name to be searched
659
  @type name_list: list
660
  @param name_list: the list of strings against which to search the key
661
  @type case_sensitive: boolean
662
  @param case_sensitive: whether to provide a case-sensitive match
663

664
  @rtype: None or str
665
  @return: None if there is no match I{or} if there are multiple matches,
666
      otherwise the element from the list which matches
667

668
  """
669
  if key in name_list:
670
    return key
671

    
672
  re_flags = 0
673
  if not case_sensitive:
674
    re_flags |= re.IGNORECASE
675
    key = key.upper()
676
  mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
677
  names_filtered = []
678
  string_matches = []
679
  for name in name_list:
680
    if mo.match(name) is not None:
681
      names_filtered.append(name)
682
      if not case_sensitive and key == name.upper():
683
        string_matches.append(name)
684

    
685
  if len(string_matches) == 1:
686
    return string_matches[0]
687
  if len(names_filtered) == 1:
688
    return names_filtered[0]
689
  return None
690

    
691

    
692
class HostInfo:
693
  """Class implementing resolver and hostname functionality
694

695
  """
696
  _VALID_NAME_RE = re.compile("^[a-z0-9._-]{1,255}$")
697

    
698
  def __init__(self, name=None):
699
    """Initialize the host name object.
700

701
    If the name argument is not passed, it will use this system's
702
    name.
703

704
    """
705
    if name is None:
706
      name = self.SysName()
707

    
708
    self.query = name
709
    self.name, self.aliases, self.ipaddrs = self.LookupHostname(name)
710
    self.ip = self.ipaddrs[0]
711

    
712
  def ShortName(self):
713
    """Returns the hostname without domain.
714

715
    """
716
    return self.name.split('.')[0]
717

    
718
  @staticmethod
719
  def SysName():
720
    """Return the current system's name.
721

722
    This is simply a wrapper over C{socket.gethostname()}.
723

724
    """
725
    return socket.gethostname()
726

    
727
  @staticmethod
728
  def LookupHostname(hostname):
729
    """Look up hostname
730

731
    @type hostname: str
732
    @param hostname: hostname to look up
733

734
    @rtype: tuple
735
    @return: a tuple (name, aliases, ipaddrs) as returned by
736
        C{socket.gethostbyname_ex}
737
    @raise errors.ResolverError: in case of errors in resolving
738

739
    """
740
    try:
741
      result = socket.gethostbyname_ex(hostname)
742
    except socket.gaierror, err:
743
      # hostname not found in DNS
744
      raise errors.ResolverError(hostname, err.args[0], err.args[1])
745

    
746
    return result
747

    
748
  @classmethod
749
  def NormalizeName(cls, hostname):
750
    """Validate and normalize the given hostname.
751

752
    @attention: the validation is a bit more relaxed than the standards
753
        require; most importantly, we allow underscores in names
754
    @raise errors.OpPrereqError: when the name is not valid
755

756
    """
757
    hostname = hostname.lower()
758
    if (not cls._VALID_NAME_RE.match(hostname) or
759
        # double-dots, meaning empty label
760
        ".." in hostname or
761
        # empty initial label
762
        hostname.startswith(".")):
763
      raise errors.OpPrereqError("Invalid hostname '%s'" % hostname,
764
                                 errors.ECODE_INVAL)
765
    if hostname.endswith("."):
766
      hostname = hostname.rstrip(".")
767
    return hostname
768

    
769

    
770
def GetHostInfo(name=None):
771
  """Lookup host name and raise an OpPrereqError for failures"""
772

    
773
  try:
774
    return HostInfo(name)
775
  except errors.ResolverError, err:
776
    raise errors.OpPrereqError("The given name (%s) does not resolve: %s" %
777
                               (err[0], err[2]), errors.ECODE_RESOLVER)
778

    
779

    
780
def ListVolumeGroups():
781
  """List volume groups and their size
782

783
  @rtype: dict
784
  @return:
785
       Dictionary with keys volume name and values
786
       the size of the volume
787

788
  """
789
  command = "vgs --noheadings --units m --nosuffix -o name,size"
790
  result = RunCmd(command)
791
  retval = {}
792
  if result.failed:
793
    return retval
794

    
795
  for line in result.stdout.splitlines():
796
    try:
797
      name, size = line.split()
798
      size = int(float(size))
799
    except (IndexError, ValueError), err:
800
      logging.error("Invalid output from vgs (%s): %s", err, line)
801
      continue
802

    
803
    retval[name] = size
804

    
805
  return retval
806

    
807

    
808
def BridgeExists(bridge):
809
  """Check whether the given bridge exists in the system
810

811
  @type bridge: str
812
  @param bridge: the bridge name to check
813
  @rtype: boolean
814
  @return: True if it does
815

816
  """
817
  return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
818

    
819

    
820
def NiceSort(name_list):
821
  """Sort a list of strings based on digit and non-digit groupings.
822

823
  Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
824
  will sort the list in the logical order C{['a1', 'a2', 'a10',
825
  'a11']}.
826

827
  The sort algorithm breaks each name in groups of either only-digits
828
  or no-digits. Only the first eight such groups are considered, and
829
  after that we just use what's left of the string.
830

831
  @type name_list: list
832
  @param name_list: the names to be sorted
833
  @rtype: list
834
  @return: a copy of the name list sorted with our algorithm
835

836
  """
837
  _SORTER_BASE = "(\D+|\d+)"
838
  _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
839
                                                  _SORTER_BASE, _SORTER_BASE,
840
                                                  _SORTER_BASE, _SORTER_BASE,
841
                                                  _SORTER_BASE, _SORTER_BASE)
842
  _SORTER_RE = re.compile(_SORTER_FULL)
843
  _SORTER_NODIGIT = re.compile("^\D*$")
844
  def _TryInt(val):
845
    """Attempts to convert a variable to integer."""
846
    if val is None or _SORTER_NODIGIT.match(val):
847
      return val
848
    rval = int(val)
849
    return rval
850

    
851
  to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
852
             for name in name_list]
853
  to_sort.sort()
854
  return [tup[1] for tup in to_sort]
855

    
856

    
857
def TryConvert(fn, val):
858
  """Try to convert a value ignoring errors.
859

860
  This function tries to apply function I{fn} to I{val}. If no
861
  C{ValueError} or C{TypeError} exceptions are raised, it will return
862
  the result, else it will return the original value. Any other
863
  exceptions are propagated to the caller.
864

865
  @type fn: callable
866
  @param fn: function to apply to the value
867
  @param val: the value to be converted
868
  @return: The converted value if the conversion was successful,
869
      otherwise the original value.
870

871
  """
872
  try:
873
    nv = fn(val)
874
  except (ValueError, TypeError):
875
    nv = val
876
  return nv
877

    
878

    
879
def IsValidIP(ip):
880
  """Verifies the syntax of an IPv4 address.
881

882
  This function checks if the IPv4 address passes is valid or not based
883
  on syntax (not IP range, class calculations, etc.).
884

885
  @type ip: str
886
  @param ip: the address to be checked
887
  @rtype: a regular expression match object
888
  @return: a regular expression match object, or None if the
889
      address is not valid
890

891
  """
892
  unit = "(0|[1-9]\d{0,2})"
893
  #TODO: convert and return only boolean
894
  return re.match("^%s\.%s\.%s\.%s$" % (unit, unit, unit, unit), ip)
895

    
896

    
897
def IsValidShellParam(word):
898
  """Verifies is the given word is safe from the shell's p.o.v.
899

900
  This means that we can pass this to a command via the shell and be
901
  sure that it doesn't alter the command line and is passed as such to
902
  the actual command.
903

904
  Note that we are overly restrictive here, in order to be on the safe
905
  side.
906

907
  @type word: str
908
  @param word: the word to check
909
  @rtype: boolean
910
  @return: True if the word is 'safe'
911

912
  """
913
  return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
914

    
915

    
916
def BuildShellCmd(template, *args):
917
  """Build a safe shell command line from the given arguments.
918

919
  This function will check all arguments in the args list so that they
920
  are valid shell parameters (i.e. they don't contain shell
921
  metacharacters). If everything is ok, it will return the result of
922
  template % args.
923

924
  @type template: str
925
  @param template: the string holding the template for the
926
      string formatting
927
  @rtype: str
928
  @return: the expanded command line
929

930
  """
931
  for word in args:
932
    if not IsValidShellParam(word):
933
      raise errors.ProgrammerError("Shell argument '%s' contains"
934
                                   " invalid characters" % word)
935
  return template % args
936

    
937

    
938
def FormatUnit(value, units):
939
  """Formats an incoming number of MiB with the appropriate unit.
940

941
  @type value: int
942
  @param value: integer representing the value in MiB (1048576)
943
  @type units: char
944
  @param units: the type of formatting we should do:
945
      - 'h' for automatic scaling
946
      - 'm' for MiBs
947
      - 'g' for GiBs
948
      - 't' for TiBs
949
  @rtype: str
950
  @return: the formatted value (with suffix)
951

952
  """
953
  if units not in ('m', 'g', 't', 'h'):
954
    raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
955

    
956
  suffix = ''
957

    
958
  if units == 'm' or (units == 'h' and value < 1024):
959
    if units == 'h':
960
      suffix = 'M'
961
    return "%d%s" % (round(value, 0), suffix)
962

    
963
  elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
964
    if units == 'h':
965
      suffix = 'G'
966
    return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
967

    
968
  else:
969
    if units == 'h':
970
      suffix = 'T'
971
    return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
972

    
973

    
974
def ParseUnit(input_string):
975
  """Tries to extract number and scale from the given string.
976

977
  Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
978
  [UNIT]}. If no unit is specified, it defaults to MiB. Return value
979
  is always an int in MiB.
980

981
  """
982
  m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
983
  if not m:
984
    raise errors.UnitParseError("Invalid format")
985

    
986
  value = float(m.groups()[0])
987

    
988
  unit = m.groups()[1]
989
  if unit:
990
    lcunit = unit.lower()
991
  else:
992
    lcunit = 'm'
993

    
994
  if lcunit in ('m', 'mb', 'mib'):
995
    # Value already in MiB
996
    pass
997

    
998
  elif lcunit in ('g', 'gb', 'gib'):
999
    value *= 1024
1000

    
1001
  elif lcunit in ('t', 'tb', 'tib'):
1002
    value *= 1024 * 1024
1003

    
1004
  else:
1005
    raise errors.UnitParseError("Unknown unit: %s" % unit)
1006

    
1007
  # Make sure we round up
1008
  if int(value) < value:
1009
    value += 1
1010

    
1011
  # Round up to the next multiple of 4
1012
  value = int(value)
1013
  if value % 4:
1014
    value += 4 - value % 4
1015

    
1016
  return value
1017

    
1018

    
1019
def AddAuthorizedKey(file_name, key):
1020
  """Adds an SSH public key to an authorized_keys file.
1021

1022
  @type file_name: str
1023
  @param file_name: path to authorized_keys file
1024
  @type key: str
1025
  @param key: string containing key
1026

1027
  """
1028
  key_fields = key.split()
1029

    
1030
  f = open(file_name, 'a+')
1031
  try:
1032
    nl = True
1033
    for line in f:
1034
      # Ignore whitespace changes
1035
      if line.split() == key_fields:
1036
        break
1037
      nl = line.endswith('\n')
1038
    else:
1039
      if not nl:
1040
        f.write("\n")
1041
      f.write(key.rstrip('\r\n'))
1042
      f.write("\n")
1043
      f.flush()
1044
  finally:
1045
    f.close()
1046

    
1047

    
1048
def RemoveAuthorizedKey(file_name, key):
1049
  """Removes an SSH public key from an authorized_keys file.
1050

1051
  @type file_name: str
1052
  @param file_name: path to authorized_keys file
1053
  @type key: str
1054
  @param key: string containing key
1055

1056
  """
1057
  key_fields = key.split()
1058

    
1059
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1060
  try:
1061
    out = os.fdopen(fd, 'w')
1062
    try:
1063
      f = open(file_name, 'r')
1064
      try:
1065
        for line in f:
1066
          # Ignore whitespace changes while comparing lines
1067
          if line.split() != key_fields:
1068
            out.write(line)
1069

    
1070
        out.flush()
1071
        os.rename(tmpname, file_name)
1072
      finally:
1073
        f.close()
1074
    finally:
1075
      out.close()
1076
  except:
1077
    RemoveFile(tmpname)
1078
    raise
1079

    
1080

    
1081
def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1082
  """Sets the name of an IP address and hostname in /etc/hosts.
1083

1084
  @type file_name: str
1085
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1086
  @type ip: str
1087
  @param ip: the IP address
1088
  @type hostname: str
1089
  @param hostname: the hostname to be added
1090
  @type aliases: list
1091
  @param aliases: the list of aliases to add for the hostname
1092

1093
  """
1094
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1095
  # Ensure aliases are unique
1096
  aliases = UniqueSequence([hostname] + aliases)[1:]
1097

    
1098
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1099
  try:
1100
    out = os.fdopen(fd, 'w')
1101
    try:
1102
      f = open(file_name, 'r')
1103
      try:
1104
        for line in f:
1105
          fields = line.split()
1106
          if fields and not fields[0].startswith('#') and ip == fields[0]:
1107
            continue
1108
          out.write(line)
1109

    
1110
        out.write("%s\t%s" % (ip, hostname))
1111
        if aliases:
1112
          out.write(" %s" % ' '.join(aliases))
1113
        out.write('\n')
1114

    
1115
        out.flush()
1116
        os.fsync(out)
1117
        os.chmod(tmpname, 0644)
1118
        os.rename(tmpname, file_name)
1119
      finally:
1120
        f.close()
1121
    finally:
1122
      out.close()
1123
  except:
1124
    RemoveFile(tmpname)
1125
    raise
1126

    
1127

    
1128
def AddHostToEtcHosts(hostname):
1129
  """Wrapper around SetEtcHostsEntry.
1130

1131
  @type hostname: str
1132
  @param hostname: a hostname that will be resolved and added to
1133
      L{constants.ETC_HOSTS}
1134

1135
  """
1136
  hi = HostInfo(name=hostname)
1137
  SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
1138

    
1139

    
1140
def RemoveEtcHostsEntry(file_name, hostname):
1141
  """Removes a hostname from /etc/hosts.
1142

1143
  IP addresses without names are removed from the file.
1144

1145
  @type file_name: str
1146
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1147
  @type hostname: str
1148
  @param hostname: the hostname to be removed
1149

1150
  """
1151
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1152
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1153
  try:
1154
    out = os.fdopen(fd, 'w')
1155
    try:
1156
      f = open(file_name, 'r')
1157
      try:
1158
        for line in f:
1159
          fields = line.split()
1160
          if len(fields) > 1 and not fields[0].startswith('#'):
1161
            names = fields[1:]
1162
            if hostname in names:
1163
              while hostname in names:
1164
                names.remove(hostname)
1165
              if names:
1166
                out.write("%s %s\n" % (fields[0], ' '.join(names)))
1167
              continue
1168

    
1169
          out.write(line)
1170

    
1171
        out.flush()
1172
        os.fsync(out)
1173
        os.chmod(tmpname, 0644)
1174
        os.rename(tmpname, file_name)
1175
      finally:
1176
        f.close()
1177
    finally:
1178
      out.close()
1179
  except:
1180
    RemoveFile(tmpname)
1181
    raise
1182

    
1183

    
1184
def RemoveHostFromEtcHosts(hostname):
1185
  """Wrapper around RemoveEtcHostsEntry.
1186

1187
  @type hostname: str
1188
  @param hostname: hostname that will be resolved and its
1189
      full and shot name will be removed from
1190
      L{constants.ETC_HOSTS}
1191

1192
  """
1193
  hi = HostInfo(name=hostname)
1194
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
1195
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
1196

    
1197

    
1198
def TimestampForFilename():
1199
  """Returns the current time formatted for filenames.
1200

1201
  The format doesn't contain colons as some shells and applications them as
1202
  separators.
1203

1204
  """
1205
  return time.strftime("%Y-%m-%d_%H_%M_%S")
1206

    
1207

    
1208
def CreateBackup(file_name):
1209
  """Creates a backup of a file.
1210

1211
  @type file_name: str
1212
  @param file_name: file to be backed up
1213
  @rtype: str
1214
  @return: the path to the newly created backup
1215
  @raise errors.ProgrammerError: for invalid file names
1216

1217
  """
1218
  if not os.path.isfile(file_name):
1219
    raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1220
                                file_name)
1221

    
1222
  prefix = ("%s.backup-%s." %
1223
            (os.path.basename(file_name), TimestampForFilename()))
1224
  dir_name = os.path.dirname(file_name)
1225

    
1226
  fsrc = open(file_name, 'rb')
1227
  try:
1228
    (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1229
    fdst = os.fdopen(fd, 'wb')
1230
    try:
1231
      logging.debug("Backing up %s at %s", file_name, backup_name)
1232
      shutil.copyfileobj(fsrc, fdst)
1233
    finally:
1234
      fdst.close()
1235
  finally:
1236
    fsrc.close()
1237

    
1238
  return backup_name
1239

    
1240

    
1241
def ShellQuote(value):
1242
  """Quotes shell argument according to POSIX.
1243

1244
  @type value: str
1245
  @param value: the argument to be quoted
1246
  @rtype: str
1247
  @return: the quoted value
1248

1249
  """
1250
  if _re_shell_unquoted.match(value):
1251
    return value
1252
  else:
1253
    return "'%s'" % value.replace("'", "'\\''")
1254

    
1255

    
1256
def ShellQuoteArgs(args):
1257
  """Quotes a list of shell arguments.
1258

1259
  @type args: list
1260
  @param args: list of arguments to be quoted
1261
  @rtype: str
1262
  @return: the quoted arguments concatenated with spaces
1263

1264
  """
1265
  return ' '.join([ShellQuote(i) for i in args])
1266

    
1267

    
1268
def TcpPing(target, port, timeout=10, live_port_needed=False, source=None):
1269
  """Simple ping implementation using TCP connect(2).
1270

1271
  Check if the given IP is reachable by doing attempting a TCP connect
1272
  to it.
1273

1274
  @type target: str
1275
  @param target: the IP or hostname to ping
1276
  @type port: int
1277
  @param port: the port to connect to
1278
  @type timeout: int
1279
  @param timeout: the timeout on the connection attempt
1280
  @type live_port_needed: boolean
1281
  @param live_port_needed: whether a closed port will cause the
1282
      function to return failure, as if there was a timeout
1283
  @type source: str or None
1284
  @param source: if specified, will cause the connect to be made
1285
      from this specific source address; failures to bind other
1286
      than C{EADDRNOTAVAIL} will be ignored
1287

1288
  """
1289
  sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
1290

    
1291
  success = False
1292

    
1293
  if source is not None:
1294
    try:
1295
      sock.bind((source, 0))
1296
    except socket.error, (errcode, _):
1297
      if errcode == errno.EADDRNOTAVAIL:
1298
        success = False
1299

    
1300
  sock.settimeout(timeout)
1301

    
1302
  try:
1303
    sock.connect((target, port))
1304
    sock.close()
1305
    success = True
1306
  except socket.timeout:
1307
    success = False
1308
  except socket.error, (errcode, _):
1309
    success = (not live_port_needed) and (errcode == errno.ECONNREFUSED)
1310

    
1311
  return success
1312

    
1313

    
1314
def OwnIpAddress(address):
1315
  """Check if the current host has the the given IP address.
1316

1317
  Currently this is done by TCP-pinging the address from the loopback
1318
  address.
1319

1320
  @type address: string
1321
  @param address: the address to check
1322
  @rtype: bool
1323
  @return: True if we own the address
1324

1325
  """
1326
  return TcpPing(address, constants.DEFAULT_NODED_PORT,
1327
                 source=constants.LOCALHOST_IP_ADDRESS)
1328

    
1329

    
1330
def ListVisibleFiles(path):
1331
  """Returns a list of visible files in a directory.
1332

1333
  @type path: str
1334
  @param path: the directory to enumerate
1335
  @rtype: list
1336
  @return: the list of all files not starting with a dot
1337
  @raise ProgrammerError: if L{path} is not an absolue and normalized path
1338

1339
  """
1340
  if not IsNormAbsPath(path):
1341
    raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1342
                                 " absolute/normalized: '%s'" % path)
1343
  files = [i for i in os.listdir(path) if not i.startswith(".")]
1344
  files.sort()
1345
  return files
1346

    
1347

    
1348
def GetHomeDir(user, default=None):
1349
  """Try to get the homedir of the given user.
1350

1351
  The user can be passed either as a string (denoting the name) or as
1352
  an integer (denoting the user id). If the user is not found, the
1353
  'default' argument is returned, which defaults to None.
1354

1355
  """
1356
  try:
1357
    if isinstance(user, basestring):
1358
      result = pwd.getpwnam(user)
1359
    elif isinstance(user, (int, long)):
1360
      result = pwd.getpwuid(user)
1361
    else:
1362
      raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1363
                                   type(user))
1364
  except KeyError:
1365
    return default
1366
  return result.pw_dir
1367

    
1368

    
1369
def NewUUID():
1370
  """Returns a random UUID.
1371

1372
  @note: This is a Linux-specific method as it uses the /proc
1373
      filesystem.
1374
  @rtype: str
1375

1376
  """
1377
  return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1378

    
1379

    
1380
def GenerateSecret(numbytes=20):
1381
  """Generates a random secret.
1382

1383
  This will generate a pseudo-random secret returning an hex string
1384
  (so that it can be used where an ASCII string is needed).
1385

1386
  @param numbytes: the number of bytes which will be represented by the returned
1387
      string (defaulting to 20, the length of a SHA1 hash)
1388
  @rtype: str
1389
  @return: an hex representation of the pseudo-random sequence
1390

1391
  """
1392
  return os.urandom(numbytes).encode('hex')
1393

    
1394

    
1395
def EnsureDirs(dirs):
1396
  """Make required directories, if they don't exist.
1397

1398
  @param dirs: list of tuples (dir_name, dir_mode)
1399
  @type dirs: list of (string, integer)
1400

1401
  """
1402
  for dir_name, dir_mode in dirs:
1403
    try:
1404
      os.mkdir(dir_name, dir_mode)
1405
    except EnvironmentError, err:
1406
      if err.errno != errno.EEXIST:
1407
        raise errors.GenericError("Cannot create needed directory"
1408
                                  " '%s': %s" % (dir_name, err))
1409
    try:
1410
      os.chmod(dir_name, dir_mode)
1411
    except EnvironmentError, err:
1412
      raise errors.GenericError("Cannot change directory permissions on"
1413
                                " '%s': %s" % (dir_name, err))
1414
    if not os.path.isdir(dir_name):
1415
      raise errors.GenericError("%s is not a directory" % dir_name)
1416

    
1417

    
1418
def ReadFile(file_name, size=-1):
1419
  """Reads a file.
1420

1421
  @type size: int
1422
  @param size: Read at most size bytes (if negative, entire file)
1423
  @rtype: str
1424
  @return: the (possibly partial) content of the file
1425

1426
  """
1427
  f = open(file_name, "r")
1428
  try:
1429
    return f.read(size)
1430
  finally:
1431
    f.close()
1432

    
1433

    
1434
def WriteFile(file_name, fn=None, data=None,
1435
              mode=None, uid=-1, gid=-1,
1436
              atime=None, mtime=None, close=True,
1437
              dry_run=False, backup=False,
1438
              prewrite=None, postwrite=None):
1439
  """(Over)write a file atomically.
1440

1441
  The file_name and either fn (a function taking one argument, the
1442
  file descriptor, and which should write the data to it) or data (the
1443
  contents of the file) must be passed. The other arguments are
1444
  optional and allow setting the file mode, owner and group, and the
1445
  mtime/atime of the file.
1446

1447
  If the function doesn't raise an exception, it has succeeded and the
1448
  target file has the new contents. If the function has raised an
1449
  exception, an existing target file should be unmodified and the
1450
  temporary file should be removed.
1451

1452
  @type file_name: str
1453
  @param file_name: the target filename
1454
  @type fn: callable
1455
  @param fn: content writing function, called with
1456
      file descriptor as parameter
1457
  @type data: str
1458
  @param data: contents of the file
1459
  @type mode: int
1460
  @param mode: file mode
1461
  @type uid: int
1462
  @param uid: the owner of the file
1463
  @type gid: int
1464
  @param gid: the group of the file
1465
  @type atime: int
1466
  @param atime: a custom access time to be set on the file
1467
  @type mtime: int
1468
  @param mtime: a custom modification time to be set on the file
1469
  @type close: boolean
1470
  @param close: whether to close file after writing it
1471
  @type prewrite: callable
1472
  @param prewrite: function to be called before writing content
1473
  @type postwrite: callable
1474
  @param postwrite: function to be called after writing content
1475

1476
  @rtype: None or int
1477
  @return: None if the 'close' parameter evaluates to True,
1478
      otherwise the file descriptor
1479

1480
  @raise errors.ProgrammerError: if any of the arguments are not valid
1481

1482
  """
1483
  if not os.path.isabs(file_name):
1484
    raise errors.ProgrammerError("Path passed to WriteFile is not"
1485
                                 " absolute: '%s'" % file_name)
1486

    
1487
  if [fn, data].count(None) != 1:
1488
    raise errors.ProgrammerError("fn or data required")
1489

    
1490
  if [atime, mtime].count(None) == 1:
1491
    raise errors.ProgrammerError("Both atime and mtime must be either"
1492
                                 " set or None")
1493

    
1494
  if backup and not dry_run and os.path.isfile(file_name):
1495
    CreateBackup(file_name)
1496

    
1497
  dir_name, base_name = os.path.split(file_name)
1498
  fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1499
  do_remove = True
1500
  # here we need to make sure we remove the temp file, if any error
1501
  # leaves it in place
1502
  try:
1503
    if uid != -1 or gid != -1:
1504
      os.chown(new_name, uid, gid)
1505
    if mode:
1506
      os.chmod(new_name, mode)
1507
    if callable(prewrite):
1508
      prewrite(fd)
1509
    if data is not None:
1510
      os.write(fd, data)
1511
    else:
1512
      fn(fd)
1513
    if callable(postwrite):
1514
      postwrite(fd)
1515
    os.fsync(fd)
1516
    if atime is not None and mtime is not None:
1517
      os.utime(new_name, (atime, mtime))
1518
    if not dry_run:
1519
      os.rename(new_name, file_name)
1520
      do_remove = False
1521
  finally:
1522
    if close:
1523
      os.close(fd)
1524
      result = None
1525
    else:
1526
      result = fd
1527
    if do_remove:
1528
      RemoveFile(new_name)
1529

    
1530
  return result
1531

    
1532

    
1533
def ReadOneLineFile(file_name, strict=False):
1534
  """Return the first non-empty line from a file.
1535

1536
  @type strict: boolean
1537
  @param strict: if True, abort if the file has more than one
1538
      non-empty line
1539

1540
  """
1541
  file_lines = ReadFile(file_name).splitlines()
1542
  full_lines = filter(bool, file_lines)
1543
  if not file_lines or not full_lines:
1544
    raise errors.GenericError("No data in one-liner file %s" % file_name)
1545
  elif strict and len(full_lines) > 1:
1546
    raise errors.GenericError("Too many lines in one-liner file %s" %
1547
                              file_name)
1548
  return full_lines[0]
1549

    
1550

    
1551
def FirstFree(seq, base=0):
1552
  """Returns the first non-existing integer from seq.
1553

1554
  The seq argument should be a sorted list of positive integers. The
1555
  first time the index of an element is smaller than the element
1556
  value, the index will be returned.
1557

1558
  The base argument is used to start at a different offset,
1559
  i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
1560

1561
  Example: C{[0, 1, 3]} will return I{2}.
1562

1563
  @type seq: sequence
1564
  @param seq: the sequence to be analyzed.
1565
  @type base: int
1566
  @param base: use this value as the base index of the sequence
1567
  @rtype: int
1568
  @return: the first non-used index in the sequence
1569

1570
  """
1571
  for idx, elem in enumerate(seq):
1572
    assert elem >= base, "Passed element is higher than base offset"
1573
    if elem > idx + base:
1574
      # idx is not used
1575
      return idx + base
1576
  return None
1577

    
1578

    
1579
def SingleWaitForFdCondition(fdobj, event, timeout):
1580
  """Waits for a condition to occur on the socket.
1581

1582
  Immediately returns at the first interruption.
1583

1584
  @type fdobj: integer or object supporting a fileno() method
1585
  @param fdobj: entity to wait for events on
1586
  @type event: integer
1587
  @param event: ORed condition (see select module)
1588
  @type timeout: float or None
1589
  @param timeout: Timeout in seconds
1590
  @rtype: int or None
1591
  @return: None for timeout, otherwise occured conditions
1592

1593
  """
1594
  check = (event | select.POLLPRI |
1595
           select.POLLNVAL | select.POLLHUP | select.POLLERR)
1596

    
1597
  if timeout is not None:
1598
    # Poller object expects milliseconds
1599
    timeout *= 1000
1600

    
1601
  poller = select.poll()
1602
  poller.register(fdobj, event)
1603
  try:
1604
    # TODO: If the main thread receives a signal and we have no timeout, we
1605
    # could wait forever. This should check a global "quit" flag or something
1606
    # every so often.
1607
    io_events = poller.poll(timeout)
1608
  except select.error, err:
1609
    if err[0] != errno.EINTR:
1610
      raise
1611
    io_events = []
1612
  if io_events and io_events[0][1] & check:
1613
    return io_events[0][1]
1614
  else:
1615
    return None
1616

    
1617

    
1618
class FdConditionWaiterHelper(object):
1619
  """Retry helper for WaitForFdCondition.
1620

1621
  This class contains the retried and wait functions that make sure
1622
  WaitForFdCondition can continue waiting until the timeout is actually
1623
  expired.
1624

1625
  """
1626

    
1627
  def __init__(self, timeout):
1628
    self.timeout = timeout
1629

    
1630
  def Poll(self, fdobj, event):
1631
    result = SingleWaitForFdCondition(fdobj, event, self.timeout)
1632
    if result is None:
1633
      raise RetryAgain()
1634
    else:
1635
      return result
1636

    
1637
  def UpdateTimeout(self, timeout):
1638
    self.timeout = timeout
1639

    
1640

    
1641
def WaitForFdCondition(fdobj, event, timeout):
1642
  """Waits for a condition to occur on the socket.
1643

1644
  Retries until the timeout is expired, even if interrupted.
1645

1646
  @type fdobj: integer or object supporting a fileno() method
1647
  @param fdobj: entity to wait for events on
1648
  @type event: integer
1649
  @param event: ORed condition (see select module)
1650
  @type timeout: float or None
1651
  @param timeout: Timeout in seconds
1652
  @rtype: int or None
1653
  @return: None for timeout, otherwise occured conditions
1654

1655
  """
1656
  if timeout is not None:
1657
    retrywaiter = FdConditionWaiterHelper(timeout)
1658
    try:
1659
      result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
1660
                     args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
1661
    except RetryTimeout:
1662
      result = None
1663
  else:
1664
    result = None
1665
    while result is None:
1666
      result = SingleWaitForFdCondition(fdobj, event, timeout)
1667
  return result
1668

    
1669

    
1670
def UniqueSequence(seq):
1671
  """Returns a list with unique elements.
1672

1673
  Element order is preserved.
1674

1675
  @type seq: sequence
1676
  @param seq: the sequence with the source elements
1677
  @rtype: list
1678
  @return: list of unique elements from seq
1679

1680
  """
1681
  seen = set()
1682
  return [i for i in seq if i not in seen and not seen.add(i)]
1683

    
1684

    
1685
def NormalizeAndValidateMac(mac):
1686
  """Normalizes and check if a MAC address is valid.
1687

1688
  Checks whether the supplied MAC address is formally correct, only
1689
  accepts colon separated format. Normalize it to all lower.
1690

1691
  @type mac: str
1692
  @param mac: the MAC to be validated
1693
  @rtype: str
1694
  @return: returns the normalized and validated MAC.
1695

1696
  @raise errors.OpPrereqError: If the MAC isn't valid
1697

1698
  """
1699
  mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$", re.I)
1700
  if not mac_check.match(mac):
1701
    raise errors.OpPrereqError("Invalid MAC address specified: %s" %
1702
                               mac, errors.ECODE_INVAL)
1703

    
1704
  return mac.lower()
1705

    
1706

    
1707
def TestDelay(duration):
1708
  """Sleep for a fixed amount of time.
1709

1710
  @type duration: float
1711
  @param duration: the sleep duration
1712
  @rtype: boolean
1713
  @return: False for negative value, True otherwise
1714

1715
  """
1716
  if duration < 0:
1717
    return False, "Invalid sleep duration"
1718
  time.sleep(duration)
1719
  return True, None
1720

    
1721

    
1722
def _CloseFDNoErr(fd, retries=5):
1723
  """Close a file descriptor ignoring errors.
1724

1725
  @type fd: int
1726
  @param fd: the file descriptor
1727
  @type retries: int
1728
  @param retries: how many retries to make, in case we get any
1729
      other error than EBADF
1730

1731
  """
1732
  try:
1733
    os.close(fd)
1734
  except OSError, err:
1735
    if err.errno != errno.EBADF:
1736
      if retries > 0:
1737
        _CloseFDNoErr(fd, retries - 1)
1738
    # else either it's closed already or we're out of retries, so we
1739
    # ignore this and go on
1740

    
1741

    
1742
def CloseFDs(noclose_fds=None):
1743
  """Close file descriptors.
1744

1745
  This closes all file descriptors above 2 (i.e. except
1746
  stdin/out/err).
1747

1748
  @type noclose_fds: list or None
1749
  @param noclose_fds: if given, it denotes a list of file descriptor
1750
      that should not be closed
1751

1752
  """
1753
  # Default maximum for the number of available file descriptors.
1754
  if 'SC_OPEN_MAX' in os.sysconf_names:
1755
    try:
1756
      MAXFD = os.sysconf('SC_OPEN_MAX')
1757
      if MAXFD < 0:
1758
        MAXFD = 1024
1759
    except OSError:
1760
      MAXFD = 1024
1761
  else:
1762
    MAXFD = 1024
1763
  maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
1764
  if (maxfd == resource.RLIM_INFINITY):
1765
    maxfd = MAXFD
1766

    
1767
  # Iterate through and close all file descriptors (except the standard ones)
1768
  for fd in range(3, maxfd):
1769
    if noclose_fds and fd in noclose_fds:
1770
      continue
1771
    _CloseFDNoErr(fd)
1772

    
1773

    
1774
def Mlockall():
1775
  """Lock current process' virtual address space into RAM.
1776

1777
  This is equivalent to the C call mlockall(MCL_CURRENT|MCL_FUTURE),
1778
  see mlock(2) for more details. This function requires ctypes module.
1779

1780
  """
1781
  if ctypes is None:
1782
    logging.warning("Cannot set memory lock, ctypes module not found")
1783
    return
1784

    
1785
  libc = ctypes.cdll.LoadLibrary("libc.so.6")
1786
  if libc is None:
1787
    logging.error("Cannot set memory lock, ctypes cannot load libc")
1788
    return
1789

    
1790
  # Some older version of the ctypes module don't have built-in functionality
1791
  # to access the errno global variable, where function error codes are stored.
1792
  # By declaring this variable as a pointer to an integer we can then access
1793
  # its value correctly, should the mlockall call fail, in order to see what
1794
  # the actual error code was.
1795
  # pylint: disable-msg=W0212
1796
  libc.__errno_location.restype = ctypes.POINTER(ctypes.c_int)
1797

    
1798
  if libc.mlockall(_MCL_CURRENT | _MCL_FUTURE):
1799
    # pylint: disable-msg=W0212
1800
    logging.error("Cannot set memory lock: %s",
1801
                  os.strerror(libc.__errno_location().contents.value))
1802
    return
1803

    
1804
  logging.debug("Memory lock set")
1805

    
1806

    
1807
def Daemonize(logfile):
1808
  """Daemonize the current process.
1809

1810
  This detaches the current process from the controlling terminal and
1811
  runs it in the background as a daemon.
1812

1813
  @type logfile: str
1814
  @param logfile: the logfile to which we should redirect stdout/stderr
1815
  @rtype: int
1816
  @return: the value zero
1817

1818
  """
1819
  # pylint: disable-msg=W0212
1820
  # yes, we really want os._exit
1821
  UMASK = 077
1822
  WORKDIR = "/"
1823

    
1824
  # this might fail
1825
  pid = os.fork()
1826
  if (pid == 0):  # The first child.
1827
    os.setsid()
1828
    # this might fail
1829
    pid = os.fork() # Fork a second child.
1830
    if (pid == 0):  # The second child.
1831
      os.chdir(WORKDIR)
1832
      os.umask(UMASK)
1833
    else:
1834
      # exit() or _exit()?  See below.
1835
      os._exit(0) # Exit parent (the first child) of the second child.
1836
  else:
1837
    os._exit(0) # Exit parent of the first child.
1838

    
1839
  for fd in range(3):
1840
    _CloseFDNoErr(fd)
1841
  i = os.open("/dev/null", os.O_RDONLY) # stdin
1842
  assert i == 0, "Can't close/reopen stdin"
1843
  i = os.open(logfile, os.O_WRONLY|os.O_CREAT|os.O_APPEND, 0600) # stdout
1844
  assert i == 1, "Can't close/reopen stdout"
1845
  # Duplicate standard output to standard error.
1846
  os.dup2(1, 2)
1847
  return 0
1848

    
1849

    
1850
def DaemonPidFileName(name):
1851
  """Compute a ganeti pid file absolute path
1852

1853
  @type name: str
1854
  @param name: the daemon name
1855
  @rtype: str
1856
  @return: the full path to the pidfile corresponding to the given
1857
      daemon name
1858

1859
  """
1860
  return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
1861

    
1862

    
1863
def EnsureDaemon(name):
1864
  """Check for and start daemon if not alive.
1865

1866
  """
1867
  result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
1868
  if result.failed:
1869
    logging.error("Can't start daemon '%s', failure %s, output: %s",
1870
                  name, result.fail_reason, result.output)
1871
    return False
1872

    
1873
  return True
1874

    
1875

    
1876
def WritePidFile(name):
1877
  """Write the current process pidfile.
1878

1879
  The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid}
1880

1881
  @type name: str
1882
  @param name: the daemon name to use
1883
  @raise errors.GenericError: if the pid file already exists and
1884
      points to a live process
1885

1886
  """
1887
  pid = os.getpid()
1888
  pidfilename = DaemonPidFileName(name)
1889
  if IsProcessAlive(ReadPidFile(pidfilename)):
1890
    raise errors.GenericError("%s contains a live process" % pidfilename)
1891

    
1892
  WriteFile(pidfilename, data="%d\n" % pid)
1893

    
1894

    
1895
def RemovePidFile(name):
1896
  """Remove the current process pidfile.
1897

1898
  Any errors are ignored.
1899

1900
  @type name: str
1901
  @param name: the daemon name used to derive the pidfile name
1902

1903
  """
1904
  pidfilename = DaemonPidFileName(name)
1905
  # TODO: we could check here that the file contains our pid
1906
  try:
1907
    RemoveFile(pidfilename)
1908
  except: # pylint: disable-msg=W0702
1909
    pass
1910

    
1911

    
1912
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
1913
                waitpid=False):
1914
  """Kill a process given by its pid.
1915

1916
  @type pid: int
1917
  @param pid: The PID to terminate.
1918
  @type signal_: int
1919
  @param signal_: The signal to send, by default SIGTERM
1920
  @type timeout: int
1921
  @param timeout: The timeout after which, if the process is still alive,
1922
                  a SIGKILL will be sent. If not positive, no such checking
1923
                  will be done
1924
  @type waitpid: boolean
1925
  @param waitpid: If true, we should waitpid on this process after
1926
      sending signals, since it's our own child and otherwise it
1927
      would remain as zombie
1928

1929
  """
1930
  def _helper(pid, signal_, wait):
1931
    """Simple helper to encapsulate the kill/waitpid sequence"""
1932
    os.kill(pid, signal_)
1933
    if wait:
1934
      try:
1935
        os.waitpid(pid, os.WNOHANG)
1936
      except OSError:
1937
        pass
1938

    
1939
  if pid <= 0:
1940
    # kill with pid=0 == suicide
1941
    raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
1942

    
1943
  if not IsProcessAlive(pid):
1944
    return
1945

    
1946
  _helper(pid, signal_, waitpid)
1947

    
1948
  if timeout <= 0:
1949
    return
1950

    
1951
  def _CheckProcess():
1952
    if not IsProcessAlive(pid):
1953
      return
1954

    
1955
    try:
1956
      (result_pid, _) = os.waitpid(pid, os.WNOHANG)
1957
    except OSError:
1958
      raise RetryAgain()
1959

    
1960
    if result_pid > 0:
1961
      return
1962

    
1963
    raise RetryAgain()
1964

    
1965
  try:
1966
    # Wait up to $timeout seconds
1967
    Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
1968
  except RetryTimeout:
1969
    pass
1970

    
1971
  if IsProcessAlive(pid):
1972
    # Kill process if it's still alive
1973
    _helper(pid, signal.SIGKILL, waitpid)
1974

    
1975

    
1976
def FindFile(name, search_path, test=os.path.exists):
1977
  """Look for a filesystem object in a given path.
1978

1979
  This is an abstract method to search for filesystem object (files,
1980
  dirs) under a given search path.
1981

1982
  @type name: str
1983
  @param name: the name to look for
1984
  @type search_path: str
1985
  @param search_path: location to start at
1986
  @type test: callable
1987
  @param test: a function taking one argument that should return True
1988
      if the a given object is valid; the default value is
1989
      os.path.exists, causing only existing files to be returned
1990
  @rtype: str or None
1991
  @return: full path to the object if found, None otherwise
1992

1993
  """
1994
  # validate the filename mask
1995
  if constants.EXT_PLUGIN_MASK.match(name) is None:
1996
    logging.critical("Invalid value passed for external script name: '%s'",
1997
                     name)
1998
    return None
1999

    
2000
  for dir_name in search_path:
2001
    # FIXME: investigate switch to PathJoin
2002
    item_name = os.path.sep.join([dir_name, name])
2003
    # check the user test and that we're indeed resolving to the given
2004
    # basename
2005
    if test(item_name) and os.path.basename(item_name) == name:
2006
      return item_name
2007
  return None
2008

    
2009

    
2010
def CheckVolumeGroupSize(vglist, vgname, minsize):
2011
  """Checks if the volume group list is valid.
2012

2013
  The function will check if a given volume group is in the list of
2014
  volume groups and has a minimum size.
2015

2016
  @type vglist: dict
2017
  @param vglist: dictionary of volume group names and their size
2018
  @type vgname: str
2019
  @param vgname: the volume group we should check
2020
  @type minsize: int
2021
  @param minsize: the minimum size we accept
2022
  @rtype: None or str
2023
  @return: None for success, otherwise the error message
2024

2025
  """
2026
  vgsize = vglist.get(vgname, None)
2027
  if vgsize is None:
2028
    return "volume group '%s' missing" % vgname
2029
  elif vgsize < minsize:
2030
    return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2031
            (vgname, minsize, vgsize))
2032
  return None
2033

    
2034

    
2035
def SplitTime(value):
2036
  """Splits time as floating point number into a tuple.
2037

2038
  @param value: Time in seconds
2039
  @type value: int or float
2040
  @return: Tuple containing (seconds, microseconds)
2041

2042
  """
2043
  (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2044

    
2045
  assert 0 <= seconds, \
2046
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2047
  assert 0 <= microseconds <= 999999, \
2048
    "Microseconds must be 0-999999, but are %s" % microseconds
2049

    
2050
  return (int(seconds), int(microseconds))
2051

    
2052

    
2053
def MergeTime(timetuple):
2054
  """Merges a tuple into time as a floating point number.
2055

2056
  @param timetuple: Time as tuple, (seconds, microseconds)
2057
  @type timetuple: tuple
2058
  @return: Time as a floating point number expressed in seconds
2059

2060
  """
2061
  (seconds, microseconds) = timetuple
2062

    
2063
  assert 0 <= seconds, \
2064
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2065
  assert 0 <= microseconds <= 999999, \
2066
    "Microseconds must be 0-999999, but are %s" % microseconds
2067

    
2068
  return float(seconds) + (float(microseconds) * 0.000001)
2069

    
2070

    
2071
def GetDaemonPort(daemon_name):
2072
  """Get the daemon port for this cluster.
2073

2074
  Note that this routine does not read a ganeti-specific file, but
2075
  instead uses C{socket.getservbyname} to allow pre-customization of
2076
  this parameter outside of Ganeti.
2077

2078
  @type daemon_name: string
2079
  @param daemon_name: daemon name (in constants.DAEMONS_PORTS)
2080
  @rtype: int
2081

2082
  """
2083
  if daemon_name not in constants.DAEMONS_PORTS:
2084
    raise errors.ProgrammerError("Unknown daemon: %s" % daemon_name)
2085

    
2086
  (proto, default_port) = constants.DAEMONS_PORTS[daemon_name]
2087
  try:
2088
    port = socket.getservbyname(daemon_name, proto)
2089
  except socket.error:
2090
    port = default_port
2091

    
2092
  return port
2093

    
2094

    
2095
class LogFileHandler(logging.FileHandler):
2096
  """Log handler that doesn't fallback to stderr.
2097

2098
  When an error occurs while writing on the logfile, logging.FileHandler tries
2099
  to log on stderr. This doesn't work in ganeti since stderr is redirected to
2100
  the logfile. This class avoids failures reporting errors to /dev/console.
2101

2102
  """
2103
  def __init__(self, filename, mode="a", encoding=None):
2104
    """Open the specified file and use it as the stream for logging.
2105

2106
    Also open /dev/console to report errors while logging.
2107

2108
    """
2109
    logging.FileHandler.__init__(self, filename, mode, encoding)
2110
    self.console = open(constants.DEV_CONSOLE, "a")
2111

    
2112
  def handleError(self, record): # pylint: disable-msg=C0103
2113
    """Handle errors which occur during an emit() call.
2114

2115
    Try to handle errors with FileHandler method, if it fails write to
2116
    /dev/console.
2117

2118
    """
2119
    try:
2120
      logging.FileHandler.handleError(self, record)
2121
    except Exception: # pylint: disable-msg=W0703
2122
      try:
2123
        self.console.write("Cannot log message:\n%s\n" % self.format(record))
2124
      except Exception: # pylint: disable-msg=W0703
2125
        # Log handler tried everything it could, now just give up
2126
        pass
2127

    
2128

    
2129
def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
2130
                 multithreaded=False, syslog=constants.SYSLOG_USAGE,
2131
                 console_logging=False):
2132
  """Configures the logging module.
2133

2134
  @type logfile: str
2135
  @param logfile: the filename to which we should log
2136
  @type debug: integer
2137
  @param debug: if greater than zero, enable debug messages, otherwise
2138
      only those at C{INFO} and above level
2139
  @type stderr_logging: boolean
2140
  @param stderr_logging: whether we should also log to the standard error
2141
  @type program: str
2142
  @param program: the name under which we should log messages
2143
  @type multithreaded: boolean
2144
  @param multithreaded: if True, will add the thread name to the log file
2145
  @type syslog: string
2146
  @param syslog: one of 'no', 'yes', 'only':
2147
      - if no, syslog is not used
2148
      - if yes, syslog is used (in addition to file-logging)
2149
      - if only, only syslog is used
2150
  @type console_logging: boolean
2151
  @param console_logging: if True, will use a FileHandler which falls back to
2152
      the system console if logging fails
2153
  @raise EnvironmentError: if we can't open the log file and
2154
      syslog/stderr logging is disabled
2155

2156
  """
2157
  fmt = "%(asctime)s: " + program + " pid=%(process)d"
2158
  sft = program + "[%(process)d]:"
2159
  if multithreaded:
2160
    fmt += "/%(threadName)s"
2161
    sft += " (%(threadName)s)"
2162
  if debug:
2163
    fmt += " %(module)s:%(lineno)s"
2164
    # no debug info for syslog loggers
2165
  fmt += " %(levelname)s %(message)s"
2166
  # yes, we do want the textual level, as remote syslog will probably
2167
  # lose the error level, and it's easier to grep for it
2168
  sft += " %(levelname)s %(message)s"
2169
  formatter = logging.Formatter(fmt)
2170
  sys_fmt = logging.Formatter(sft)
2171

    
2172
  root_logger = logging.getLogger("")
2173
  root_logger.setLevel(logging.NOTSET)
2174

    
2175
  # Remove all previously setup handlers
2176
  for handler in root_logger.handlers:
2177
    handler.close()
2178
    root_logger.removeHandler(handler)
2179

    
2180
  if stderr_logging:
2181
    stderr_handler = logging.StreamHandler()
2182
    stderr_handler.setFormatter(formatter)
2183
    if debug:
2184
      stderr_handler.setLevel(logging.NOTSET)
2185
    else:
2186
      stderr_handler.setLevel(logging.CRITICAL)
2187
    root_logger.addHandler(stderr_handler)
2188

    
2189
  if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2190
    facility = logging.handlers.SysLogHandler.LOG_DAEMON
2191
    syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2192
                                                    facility)
2193
    syslog_handler.setFormatter(sys_fmt)
2194
    # Never enable debug over syslog
2195
    syslog_handler.setLevel(logging.INFO)
2196
    root_logger.addHandler(syslog_handler)
2197

    
2198
  if syslog != constants.SYSLOG_ONLY:
2199
    # this can fail, if the logging directories are not setup or we have
2200
    # a permisssion problem; in this case, it's best to log but ignore
2201
    # the error if stderr_logging is True, and if false we re-raise the
2202
    # exception since otherwise we could run but without any logs at all
2203
    try:
2204
      if console_logging:
2205
        logfile_handler = LogFileHandler(logfile)
2206
      else:
2207
        logfile_handler = logging.FileHandler(logfile)
2208
      logfile_handler.setFormatter(formatter)
2209
      if debug:
2210
        logfile_handler.setLevel(logging.DEBUG)
2211
      else:
2212
        logfile_handler.setLevel(logging.INFO)
2213
      root_logger.addHandler(logfile_handler)
2214
    except EnvironmentError:
2215
      if stderr_logging or syslog == constants.SYSLOG_YES:
2216
        logging.exception("Failed to enable logging to file '%s'", logfile)
2217
      else:
2218
        # we need to re-raise the exception
2219
        raise
2220

    
2221

    
2222
def IsNormAbsPath(path):
2223
  """Check whether a path is absolute and also normalized
2224

2225
  This avoids things like /dir/../../other/path to be valid.
2226

2227
  """
2228
  return os.path.normpath(path) == path and os.path.isabs(path)
2229

    
2230

    
2231
def PathJoin(*args):
2232
  """Safe-join a list of path components.
2233

2234
  Requirements:
2235
      - the first argument must be an absolute path
2236
      - no component in the path must have backtracking (e.g. /../),
2237
        since we check for normalization at the end
2238

2239
  @param args: the path components to be joined
2240
  @raise ValueError: for invalid paths
2241

2242
  """
2243
  # ensure we're having at least one path passed in
2244
  assert args
2245
  # ensure the first component is an absolute and normalized path name
2246
  root = args[0]
2247
  if not IsNormAbsPath(root):
2248
    raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2249
  result = os.path.join(*args)
2250
  # ensure that the whole path is normalized
2251
  if not IsNormAbsPath(result):
2252
    raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2253
  # check that we're still under the original prefix
2254
  prefix = os.path.commonprefix([root, result])
2255
  if prefix != root:
2256
    raise ValueError("Error: path joining resulted in different prefix"
2257
                     " (%s != %s)" % (prefix, root))
2258
  return result
2259

    
2260

    
2261
def TailFile(fname, lines=20):
2262
  """Return the last lines from a file.
2263

2264
  @note: this function will only read and parse the last 4KB of
2265
      the file; if the lines are very long, it could be that less
2266
      than the requested number of lines are returned
2267

2268
  @param fname: the file name
2269
  @type lines: int
2270
  @param lines: the (maximum) number of lines to return
2271

2272
  """
2273
  fd = open(fname, "r")
2274
  try:
2275
    fd.seek(0, 2)
2276
    pos = fd.tell()
2277
    pos = max(0, pos-4096)
2278
    fd.seek(pos, 0)
2279
    raw_data = fd.read()
2280
  finally:
2281
    fd.close()
2282

    
2283
  rows = raw_data.splitlines()
2284
  return rows[-lines:]
2285

    
2286

    
2287
def _ParseAsn1Generalizedtime(value):
2288
  """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2289

2290
  @type value: string
2291
  @param value: ASN1 GENERALIZEDTIME timestamp
2292

2293
  """
2294
  m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
2295
  if m:
2296
    # We have an offset
2297
    asn1time = m.group(1)
2298
    hours = int(m.group(2))
2299
    minutes = int(m.group(3))
2300
    utcoffset = (60 * hours) + minutes
2301
  else:
2302
    if not value.endswith("Z"):
2303
      raise ValueError("Missing timezone")
2304
    asn1time = value[:-1]
2305
    utcoffset = 0
2306

    
2307
  parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2308

    
2309
  tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2310

    
2311
  return calendar.timegm(tt.utctimetuple())
2312

    
2313

    
2314
def GetX509CertValidity(cert):
2315
  """Returns the validity period of the certificate.
2316

2317
  @type cert: OpenSSL.crypto.X509
2318
  @param cert: X509 certificate object
2319

2320
  """
2321
  # The get_notBefore and get_notAfter functions are only supported in
2322
  # pyOpenSSL 0.7 and above.
2323
  try:
2324
    get_notbefore_fn = cert.get_notBefore
2325
  except AttributeError:
2326
    not_before = None
2327
  else:
2328
    not_before_asn1 = get_notbefore_fn()
2329

    
2330
    if not_before_asn1 is None:
2331
      not_before = None
2332
    else:
2333
      not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2334

    
2335
  try:
2336
    get_notafter_fn = cert.get_notAfter
2337
  except AttributeError:
2338
    not_after = None
2339
  else:
2340
    not_after_asn1 = get_notafter_fn()
2341

    
2342
    if not_after_asn1 is None:
2343
      not_after = None
2344
    else:
2345
      not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2346

    
2347
  return (not_before, not_after)
2348

    
2349

    
2350
def SafeEncode(text):
2351
  """Return a 'safe' version of a source string.
2352

2353
  This function mangles the input string and returns a version that
2354
  should be safe to display/encode as ASCII. To this end, we first
2355
  convert it to ASCII using the 'backslashreplace' encoding which
2356
  should get rid of any non-ASCII chars, and then we process it
2357
  through a loop copied from the string repr sources in the python; we
2358
  don't use string_escape anymore since that escape single quotes and
2359
  backslashes too, and that is too much; and that escaping is not
2360
  stable, i.e. string_escape(string_escape(x)) != string_escape(x).
2361

2362
  @type text: str or unicode
2363
  @param text: input data
2364
  @rtype: str
2365
  @return: a safe version of text
2366

2367
  """
2368
  if isinstance(text, unicode):
2369
    # only if unicode; if str already, we handle it below
2370
    text = text.encode('ascii', 'backslashreplace')
2371
  resu = ""
2372
  for char in text:
2373
    c = ord(char)
2374
    if char  == '\t':
2375
      resu += r'\t'
2376
    elif char == '\n':
2377
      resu += r'\n'
2378
    elif char == '\r':
2379
      resu += r'\'r'
2380
    elif c < 32 or c >= 127: # non-printable
2381
      resu += "\\x%02x" % (c & 0xff)
2382
    else:
2383
      resu += char
2384
  return resu
2385

    
2386

    
2387
def UnescapeAndSplit(text, sep=","):
2388
  """Split and unescape a string based on a given separator.
2389

2390
  This function splits a string based on a separator where the
2391
  separator itself can be escape in order to be an element of the
2392
  elements. The escaping rules are (assuming coma being the
2393
  separator):
2394
    - a plain , separates the elements
2395
    - a sequence \\\\, (double backslash plus comma) is handled as a
2396
      backslash plus a separator comma
2397
    - a sequence \, (backslash plus comma) is handled as a
2398
      non-separator comma
2399

2400
  @type text: string
2401
  @param text: the string to split
2402
  @type sep: string
2403
  @param text: the separator
2404
  @rtype: string
2405
  @return: a list of strings
2406

2407
  """
2408
  # we split the list by sep (with no escaping at this stage)
2409
  slist = text.split(sep)
2410
  # next, we revisit the elements and if any of them ended with an odd
2411
  # number of backslashes, then we join it with the next
2412
  rlist = []
2413
  while slist:
2414
    e1 = slist.pop(0)
2415
    if e1.endswith("\\"):
2416
      num_b = len(e1) - len(e1.rstrip("\\"))
2417
      if num_b % 2 == 1:
2418
        e2 = slist.pop(0)
2419
        # here the backslashes remain (all), and will be reduced in
2420
        # the next step
2421
        rlist.append(e1 + sep + e2)
2422
        continue
2423
    rlist.append(e1)
2424
  # finally, replace backslash-something with something
2425
  rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
2426
  return rlist
2427

    
2428

    
2429
def CommaJoin(names):
2430
  """Nicely join a set of identifiers.
2431

2432
  @param names: set, list or tuple
2433
  @return: a string with the formatted results
2434

2435
  """
2436
  return ", ".join([str(val) for val in names])
2437

    
2438

    
2439
def BytesToMebibyte(value):
2440
  """Converts bytes to mebibytes.
2441

2442
  @type value: int
2443
  @param value: Value in bytes
2444
  @rtype: int
2445
  @return: Value in mebibytes
2446

2447
  """
2448
  return int(round(value / (1024.0 * 1024.0), 0))
2449

    
2450

    
2451
def CalculateDirectorySize(path):
2452
  """Calculates the size of a directory recursively.
2453

2454
  @type path: string
2455
  @param path: Path to directory
2456
  @rtype: int
2457
  @return: Size in mebibytes
2458

2459
  """
2460
  size = 0
2461

    
2462
  for (curpath, _, files) in os.walk(path):
2463
    for filename in files:
2464
      st = os.lstat(PathJoin(curpath, filename))
2465
      size += st.st_size
2466

    
2467
  return BytesToMebibyte(size)
2468

    
2469

    
2470
def GetFilesystemStats(path):
2471
  """Returns the total and free space on a filesystem.
2472

2473
  @type path: string
2474
  @param path: Path on filesystem to be examined
2475
  @rtype: int
2476
  @return: tuple of (Total space, Free space) in mebibytes
2477

2478
  """
2479
  st = os.statvfs(path)
2480

    
2481
  fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
2482
  tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
2483
  return (tsize, fsize)
2484

    
2485

    
2486
def RunInSeparateProcess(fn, *args):
2487
  """Runs a function in a separate process.
2488

2489
  Note: Only boolean return values are supported.
2490

2491
  @type fn: callable
2492
  @param fn: Function to be called
2493
  @rtype: bool
2494
  @return: Function's result
2495

2496
  """
2497
  pid = os.fork()
2498
  if pid == 0:
2499
    # Child process
2500
    try:
2501
      # In case the function uses temporary files
2502
      ResetTempfileModule()
2503

    
2504
      # Call function
2505
      result = int(bool(fn(*args)))
2506
      assert result in (0, 1)
2507
    except: # pylint: disable-msg=W0702
2508
      logging.exception("Error while calling function in separate process")
2509
      # 0 and 1 are reserved for the return value
2510
      result = 33
2511

    
2512
    os._exit(result) # pylint: disable-msg=W0212
2513

    
2514
  # Parent process
2515

    
2516
  # Avoid zombies and check exit code
2517
  (_, status) = os.waitpid(pid, 0)
2518

    
2519
  if os.WIFSIGNALED(status):
2520
    exitcode = None
2521
    signum = os.WTERMSIG(status)
2522
  else:
2523
    exitcode = os.WEXITSTATUS(status)
2524
    signum = None
2525

    
2526
  if not (exitcode in (0, 1) and signum is None):
2527
    raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
2528
                              (exitcode, signum))
2529

    
2530
  return bool(exitcode)
2531

    
2532

    
2533
def IgnoreSignals(fn, *args, **kwargs):
2534
  """Tries to call a function ignoring failures due to EINTR.
2535

2536
  """
2537
  try:
2538
    return fn(*args, **kwargs)
2539
  except EnvironmentError, err:
2540
    if err.errno == errno.EINTR:
2541
      return None
2542
    else:
2543
      raise
2544
  except (select.error, socket.error), err:
2545
    # In python 2.6 and above select.error is an IOError, so it's handled
2546
    # above, in 2.5 and below it's not, and it's handled here.
2547
    if err.args and err.args[0] == errno.EINTR:
2548
      return None
2549
    else:
2550
      raise
2551

    
2552

    
2553
def LockedMethod(fn):
2554
  """Synchronized object access decorator.
2555

2556
  This decorator is intended to protect access to an object using the
2557
  object's own lock which is hardcoded to '_lock'.
2558

2559
  """
2560
  def _LockDebug(*args, **kwargs):
2561
    if debug_locks:
2562
      logging.debug(*args, **kwargs)
2563

    
2564
  def wrapper(self, *args, **kwargs):
2565
    # pylint: disable-msg=W0212
2566
    assert hasattr(self, '_lock')
2567
    lock = self._lock
2568
    _LockDebug("Waiting for %s", lock)
2569
    lock.acquire()
2570
    try:
2571
      _LockDebug("Acquired %s", lock)
2572
      result = fn(self, *args, **kwargs)
2573
    finally:
2574
      _LockDebug("Releasing %s", lock)
2575
      lock.release()
2576
      _LockDebug("Released %s", lock)
2577
    return result
2578
  return wrapper
2579

    
2580

    
2581
def LockFile(fd):
2582
  """Locks a file using POSIX locks.
2583

2584
  @type fd: int
2585
  @param fd: the file descriptor we need to lock
2586

2587
  """
2588
  try:
2589
    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
2590
  except IOError, err:
2591
    if err.errno == errno.EAGAIN:
2592
      raise errors.LockError("File already locked")
2593
    raise
2594

    
2595

    
2596
def FormatTime(val):
2597
  """Formats a time value.
2598

2599
  @type val: float or None
2600
  @param val: the timestamp as returned by time.time()
2601
  @return: a string value or N/A if we don't have a valid timestamp
2602

2603
  """
2604
  if val is None or not isinstance(val, (int, float)):
2605
    return "N/A"
2606
  # these two codes works on Linux, but they are not guaranteed on all
2607
  # platforms
2608
  return time.strftime("%F %T", time.localtime(val))
2609

    
2610

    
2611
def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
2612
  """Reads the watcher pause file.
2613

2614
  @type filename: string
2615
  @param filename: Path to watcher pause file
2616
  @type now: None, float or int
2617
  @param now: Current time as Unix timestamp
2618
  @type remove_after: int
2619
  @param remove_after: Remove watcher pause file after specified amount of
2620
    seconds past the pause end time
2621

2622
  """
2623
  if now is None:
2624
    now = time.time()
2625

    
2626
  try:
2627
    value = ReadFile(filename)
2628
  except IOError, err:
2629
    if err.errno != errno.ENOENT:
2630
      raise
2631
    value = None
2632

    
2633
  if value is not None:
2634
    try:
2635
      value = int(value)
2636
    except ValueError:
2637
      logging.warning(("Watcher pause file (%s) contains invalid value,"
2638
                       " removing it"), filename)
2639
      RemoveFile(filename)
2640
      value = None
2641

    
2642
    if value is not None:
2643
      # Remove file if it's outdated
2644
      if now > (value + remove_after):
2645
        RemoveFile(filename)
2646
        value = None
2647

    
2648
      elif now > value:
2649
        value = None
2650

    
2651
  return value
2652

    
2653

    
2654
class RetryTimeout(Exception):
2655
  """Retry loop timed out.
2656

2657
  Any arguments which was passed by the retried function to RetryAgain will be
2658
  preserved in RetryTimeout, if it is raised. If such argument was an exception
2659
  the RaiseInner helper method will reraise it.
2660

2661
  """
2662
  def RaiseInner(self):
2663
    if self.args and isinstance(self.args[0], Exception):
2664
      raise self.args[0]
2665
    else:
2666
      raise RetryTimeout(*self.args)
2667

    
2668

    
2669
class RetryAgain(Exception):
2670
  """Retry again.
2671

2672
  Any arguments passed to RetryAgain will be preserved, if a timeout occurs, as
2673
  arguments to RetryTimeout. If an exception is passed, the RaiseInner() method
2674
  of the RetryTimeout() method can be used to reraise it.
2675

2676
  """
2677

    
2678

    
2679
class _RetryDelayCalculator(object):
2680
  """Calculator for increasing delays.
2681

2682
  """
2683
  __slots__ = [
2684
    "_factor",
2685
    "_limit",
2686
    "_next",
2687
    "_start",
2688
    ]
2689

    
2690
  def __init__(self, start, factor, limit):
2691
    """Initializes this class.
2692

2693
    @type start: float
2694
    @param start: Initial delay
2695
    @type factor: float
2696
    @param factor: Factor for delay increase
2697
    @type limit: float or None
2698
    @param limit: Upper limit for delay or None for no limit
2699

2700
    """
2701
    assert start > 0.0
2702
    assert factor >= 1.0
2703
    assert limit is None or limit >= 0.0
2704

    
2705
    self._start = start
2706
    self._factor = factor
2707
    self._limit = limit
2708

    
2709
    self._next = start
2710

    
2711
  def __call__(self):
2712
    """Returns current delay and calculates the next one.
2713

2714
    """
2715
    current = self._next
2716

    
2717
    # Update for next run
2718
    if self._limit is None or self._next < self._limit:
2719
      self._next = min(self._limit, self._next * self._factor)
2720

    
2721
    return current
2722

    
2723

    
2724
#: Special delay to specify whole remaining timeout
2725
RETRY_REMAINING_TIME = object()
2726

    
2727

    
2728
def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
2729
          _time_fn=time.time):
2730
  """Call a function repeatedly until it succeeds.
2731

2732
  The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
2733
  anymore. Between calls a delay, specified by C{delay}, is inserted. After a
2734
  total of C{timeout} seconds, this function throws L{RetryTimeout}.
2735

2736
  C{delay} can be one of the following:
2737
    - callable returning the delay length as a float
2738
    - Tuple of (start, factor, limit)
2739
    - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
2740
      useful when overriding L{wait_fn} to wait for an external event)
2741
    - A static delay as a number (int or float)
2742

2743
  @type fn: callable
2744
  @param fn: Function to be called
2745
  @param delay: Either a callable (returning the delay), a tuple of (start,
2746
                factor, limit) (see L{_RetryDelayCalculator}),
2747
                L{RETRY_REMAINING_TIME} or a number (int or float)
2748
  @type timeout: float
2749
  @param timeout: Total timeout
2750
  @type wait_fn: callable
2751
  @param wait_fn: Waiting function
2752
  @return: Return value of function
2753

2754
  """
2755
  assert callable(fn)
2756
  assert callable(wait_fn)
2757
  assert callable(_time_fn)
2758

    
2759
  if args is None:
2760
    args = []
2761

    
2762
  end_time = _time_fn() + timeout
2763

    
2764
  if callable(delay):
2765
    # External function to calculate delay
2766
    calc_delay = delay
2767

    
2768
  elif isinstance(delay, (tuple, list)):
2769
    # Increasing delay with optional upper boundary
2770
    (start, factor, limit) = delay
2771
    calc_delay = _RetryDelayCalculator(start, factor, limit)
2772

    
2773
  elif delay is RETRY_REMAINING_TIME:
2774
    # Always use the remaining time
2775
    calc_delay = None
2776

    
2777
  else:
2778
    # Static delay
2779
    calc_delay = lambda: delay
2780

    
2781
  assert calc_delay is None or callable(calc_delay)
2782

    
2783
  while True:
2784
    retry_args = []
2785
    try:
2786
      # pylint: disable-msg=W0142
2787
      return fn(*args)
2788
    except RetryAgain, err:
2789
      retry_args = err.args
2790
    except RetryTimeout:
2791
      raise errors.ProgrammerError("Nested retry loop detected that didn't"
2792
                                   " handle RetryTimeout")
2793

    
2794
    remaining_time = end_time - _time_fn()
2795

    
2796
    if remaining_time < 0.0:
2797
      # pylint: disable-msg=W0142
2798
      raise RetryTimeout(*retry_args)
2799

    
2800
    assert remaining_time >= 0.0
2801

    
2802
    if calc_delay is None:
2803
      wait_fn(remaining_time)
2804
    else:
2805
      current_delay = calc_delay()
2806
      if current_delay > 0.0:
2807
        wait_fn(current_delay)
2808

    
2809

    
2810
class FileLock(object):
2811
  """Utility class for file locks.
2812

2813
  """
2814
  def __init__(self, fd, filename):
2815
    """Constructor for FileLock.
2816

2817
    @type fd: file
2818
    @param fd: File object
2819
    @type filename: str
2820
    @param filename: Path of the file opened at I{fd}
2821

2822
    """
2823
    self.fd = fd
2824
    self.filename = filename
2825

    
2826
  @classmethod
2827
  def Open(cls, filename):
2828
    """Creates and opens a file to be used as a file-based lock.
2829

2830
    @type filename: string
2831
    @param filename: path to the file to be locked
2832

2833
    """
2834
    # Using "os.open" is necessary to allow both opening existing file
2835
    # read/write and creating if not existing. Vanilla "open" will truncate an
2836
    # existing file -or- allow creating if not existing.
2837
    return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
2838
               filename)
2839

    
2840
  def __del__(self):
2841
    self.Close()
2842

    
2843
  def Close(self):
2844
    """Close the file and release the lock.
2845

2846
    """
2847
    if hasattr(self, "fd") and self.fd:
2848
      self.fd.close()
2849
      self.fd = None
2850

    
2851
  def _flock(self, flag, blocking, timeout, errmsg):
2852
    """Wrapper for fcntl.flock.
2853

2854
    @type flag: int
2855
    @param flag: operation flag
2856
    @type blocking: bool
2857
    @param blocking: whether the operation should be done in blocking mode.
2858
    @type timeout: None or float
2859
    @param timeout: for how long the operation should be retried (implies
2860
                    non-blocking mode).
2861
    @type errmsg: string
2862
    @param errmsg: error message in case operation fails.
2863

2864
    """
2865
    assert self.fd, "Lock was closed"
2866
    assert timeout is None or timeout >= 0, \
2867
      "If specified, timeout must be positive"
2868
    assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
2869

    
2870
    # When a timeout is used, LOCK_NB must always be set
2871
    if not (timeout is None and blocking):
2872
      flag |= fcntl.LOCK_NB
2873

    
2874
    if timeout is None:
2875
      self._Lock(self.fd, flag, timeout)
2876
    else:
2877
      try:
2878
        Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
2879
              args=(self.fd, flag, timeout))
2880
      except RetryTimeout:
2881
        raise errors.LockError(errmsg)
2882

    
2883
  @staticmethod
2884
  def _Lock(fd, flag, timeout):
2885
    try:
2886
      fcntl.flock(fd, flag)
2887
    except IOError, err:
2888
      if timeout is not None and err.errno == errno.EAGAIN:
2889
        raise RetryAgain()
2890

    
2891
      logging.exception("fcntl.flock failed")
2892
      raise
2893

    
2894
  def Exclusive(self, blocking=False, timeout=None):
2895
    """Locks the file in exclusive mode.
2896

2897
    @type blocking: boolean
2898
    @param blocking: whether to block and wait until we
2899
        can lock the file or return immediately
2900
    @type timeout: int or None
2901
    @param timeout: if not None, the duration to wait for the lock
2902
        (in blocking mode)
2903

2904
    """
2905
    self._flock(fcntl.LOCK_EX, blocking, timeout,
2906
                "Failed to lock %s in exclusive mode" % self.filename)
2907

    
2908
  def Shared(self, blocking=False, timeout=None):
2909
    """Locks the file in shared mode.
2910

2911
    @type blocking: boolean
2912
    @param blocking: whether to block and wait until we
2913
        can lock the file or return immediately
2914
    @type timeout: int or None
2915
    @param timeout: if not None, the duration to wait for the lock
2916
        (in blocking mode)
2917

2918
    """
2919
    self._flock(fcntl.LOCK_SH, blocking, timeout,
2920
                "Failed to lock %s in shared mode" % self.filename)
2921

    
2922
  def Unlock(self, blocking=True, timeout=None):
2923
    """Unlocks the file.
2924

2925
    According to C{flock(2)}, unlocking can also be a nonblocking
2926
    operation::
2927

2928
      To make a non-blocking request, include LOCK_NB with any of the above
2929
      operations.
2930

2931
    @type blocking: boolean
2932
    @param blocking: whether to block and wait until we
2933
        can lock the file or return immediately
2934
    @type timeout: int or None
2935
    @param timeout: if not None, the duration to wait for the lock
2936
        (in blocking mode)
2937

2938
    """
2939
    self._flock(fcntl.LOCK_UN, blocking, timeout,
2940
                "Failed to unlock %s" % self.filename)
2941

    
2942

    
2943
class LineSplitter:
2944
  """Splits data chunks into lines separated by newline.
2945

2946
  Instances provide a file-like interface.
2947

2948
  """
2949
  def __init__(self, line_fn, *args):
2950
    """Initializes this class.
2951

2952
    @type line_fn: callable
2953
    @param line_fn: Function called for each line, first parameter is line
2954
    @param args: Extra arguments for L{line_fn}
2955

2956
    """
2957
    assert callable(line_fn)
2958

    
2959
    if args:
2960
      # Python 2.4 doesn't have functools.partial yet
2961
      self._line_fn = \
2962
        lambda line: line_fn(line, *args) # pylint: disable-msg=W0142
2963
    else:
2964
      self._line_fn = line_fn
2965

    
2966
    self._lines = collections.deque()
2967
    self._buffer = ""
2968

    
2969
  def write(self, data):
2970
    parts = (self._buffer + data).split("\n")
2971
    self._buffer = parts.pop()
2972
    self._lines.extend(parts)
2973

    
2974
  def flush(self):
2975
    while self._lines:
2976
      self._line_fn(self._lines.popleft().rstrip("\r\n"))
2977

    
2978
  def close(self):
2979
    self.flush()
2980
    if self._buffer:
2981
      self._line_fn(self._buffer)
2982

    
2983

    
2984
def SignalHandled(signums):
2985
  """Signal Handled decoration.
2986

2987
  This special decorator installs a signal handler and then calls the target
2988
  function. The function must accept a 'signal_handlers' keyword argument,
2989
  which will contain a dict indexed by signal number, with SignalHandler
2990
  objects as values.
2991

2992
  The decorator can be safely stacked with iself, to handle multiple signals
2993
  with different handlers.
2994

2995
  @type signums: list
2996
  @param signums: signals to intercept
2997

2998
  """
2999
  def wrap(fn):
3000
    def sig_function(*args, **kwargs):
3001
      assert 'signal_handlers' not in kwargs or \
3002
             kwargs['signal_handlers'] is None or \
3003
             isinstance(kwargs['signal_handlers'], dict), \
3004
             "Wrong signal_handlers parameter in original function call"
3005
      if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
3006
        signal_handlers = kwargs['signal_handlers']
3007
      else:
3008
        signal_handlers = {}
3009
        kwargs['signal_handlers'] = signal_handlers
3010
      sighandler = SignalHandler(signums)
3011
      try:
3012
        for sig in signums:
3013
          signal_handlers[sig] = sighandler
3014
        return fn(*args, **kwargs)
3015
      finally:
3016
        sighandler.Reset()
3017
    return sig_function
3018
  return wrap
3019

    
3020

    
3021
class SignalHandler(object):
3022
  """Generic signal handler class.
3023

3024
  It automatically restores the original handler when deconstructed or
3025
  when L{Reset} is called. You can either pass your own handler
3026
  function in or query the L{called} attribute to detect whether the
3027
  signal was sent.
3028

3029
  @type signum: list
3030
  @ivar signum: the signals we handle
3031
  @type called: boolean
3032
  @ivar called: tracks whether any of the signals have been raised
3033

3034
  """
3035
  def __init__(self, signum):
3036
    """Constructs a new SignalHandler instance.
3037

3038
    @type signum: int or list of ints
3039
    @param signum: Single signal number or set of signal numbers
3040

3041
    """
3042
    self.signum = set(signum)
3043
    self.called = False
3044

    
3045
    self._previous = {}
3046
    try:
3047
      for signum in self.signum:
3048
        # Setup handler
3049
        prev_handler = signal.signal(signum, self._HandleSignal)
3050
        try:
3051
          self._previous[signum] = prev_handler
3052
        except:
3053
          # Restore previous handler
3054
          signal.signal(signum, prev_handler)
3055
          raise
3056
    except:
3057
      # Reset all handlers
3058
      self.Reset()
3059
      # Here we have a race condition: a handler may have already been called,
3060
      # but there's not much we can do about it at this point.
3061
      raise
3062

    
3063
  def __del__(self):
3064
    self.Reset()
3065

    
3066
  def Reset(self):
3067
    """Restore previous handler.
3068

3069
    This will reset all the signals to their previous handlers.
3070

3071
    """
3072
    for signum, prev_handler in self._previous.items():
3073
      signal.signal(signum, prev_handler)
3074
      # If successful, remove from dict
3075
      del self._previous[signum]
3076

    
3077
  def Clear(self):
3078
    """Unsets the L{called} flag.
3079

3080
    This function can be used in case a signal may arrive several times.
3081

3082
    """
3083
    self.called = False
3084

    
3085
  # we don't care about arguments, but we leave them named for the future
3086
  def _HandleSignal(self, signum, frame): # pylint: disable-msg=W0613
3087
    """Actual signal handling function.
3088

3089
    """
3090
    # This is not nice and not absolutely atomic, but it appears to be the only
3091
    # solution in Python -- there are no atomic types.
3092
    self.called = True
3093

    
3094

    
3095
class FieldSet(object):
3096
  """A simple field set.
3097

3098
  Among the features are:
3099
    - checking if a string is among a list of static string or regex objects
3100
    - checking if a whole list of string matches
3101
    - returning the matching groups from a regex match
3102

3103
  Internally, all fields are held as regular expression objects.
3104

3105
  """
3106
  def __init__(self, *items):
3107
    self.items = [re.compile("^%s$" % value) for value in items]
3108

    
3109
  def Extend(self, other_set):
3110
    """Extend the field set with the items from another one"""
3111
    self.items.extend(other_set.items)
3112

    
3113
  def Matches(self, field):
3114
    """Checks if a field matches the current set
3115

3116
    @type field: str
3117
    @param field: the string to match
3118
    @return: either None or a regular expression match object
3119

3120
    """
3121
    for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
3122
      return m
3123
    return None
3124

    
3125
  def NonMatching(self, items):
3126
    """Returns the list of fields not matching the current set
3127

3128
    @type items: list
3129
    @param items: the list of fields to check
3130
    @rtype: list
3131
    @return: list of non-matching fields
3132

3133
    """
3134
    return [val for val in items if not self.Matches(val)]