Statistics
| Branch: | Tag: | Revision:

root / lib / utils.py @ b774bb10

History | View | Annotate | Download (80.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti utility module.
23

24
This module holds functions that can be used in both daemons (all) and
25
the command line scripts.
26

27
"""
28

    
29

    
30
import os
31
import time
32
import subprocess
33
import re
34
import socket
35
import tempfile
36
import shutil
37
import errno
38
import pwd
39
import itertools
40
import select
41
import fcntl
42
import resource
43
import logging
44
import logging.handlers
45
import signal
46
import datetime
47
import calendar
48
import collections
49
import struct
50
import IN
51

    
52
from cStringIO import StringIO
53

    
54
try:
55
  from hashlib import sha1
56
except ImportError:
57
  import sha
58
  sha1 = sha.new
59

    
60
from ganeti import errors
61
from ganeti import constants
62

    
63

    
64
_locksheld = []
65
_re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
66

    
67
debug_locks = False
68

    
69
#: when set to True, L{RunCmd} is disabled
70
no_fork = False
71

    
72
_RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
73

    
74
# Structure definition for getsockopt(SOL_SOCKET, SO_PEERCRED, ...):
75
# struct ucred { pid_t pid; uid_t uid; gid_t gid; };
76
#
77
# The GNU C Library defines gid_t and uid_t to be "unsigned int" and
78
# pid_t to "int".
79
#
80
# IEEE Std 1003.1-2008:
81
# "nlink_t, uid_t, gid_t, and id_t shall be integer types"
82
# "blksize_t, pid_t, and ssize_t shall be signed integer types"
83
_STRUCT_UCRED = "iII"
84
_STRUCT_UCRED_SIZE = struct.calcsize(_STRUCT_UCRED)
85

    
86

    
87
class RunResult(object):
88
  """Holds the result of running external programs.
89

90
  @type exit_code: int
91
  @ivar exit_code: the exit code of the program, or None (if the program
92
      didn't exit())
93
  @type signal: int or None
94
  @ivar signal: the signal that caused the program to finish, or None
95
      (if the program wasn't terminated by a signal)
96
  @type stdout: str
97
  @ivar stdout: the standard output of the program
98
  @type stderr: str
99
  @ivar stderr: the standard error of the program
100
  @type failed: boolean
101
  @ivar failed: True in case the program was
102
      terminated by a signal or exited with a non-zero exit code
103
  @ivar fail_reason: a string detailing the termination reason
104

105
  """
106
  __slots__ = ["exit_code", "signal", "stdout", "stderr",
107
               "failed", "fail_reason", "cmd"]
108

    
109

    
110
  def __init__(self, exit_code, signal_, stdout, stderr, cmd):
111
    self.cmd = cmd
112
    self.exit_code = exit_code
113
    self.signal = signal_
114
    self.stdout = stdout
115
    self.stderr = stderr
116
    self.failed = (signal_ is not None or exit_code != 0)
117

    
118
    if self.signal is not None:
119
      self.fail_reason = "terminated by signal %s" % self.signal
120
    elif self.exit_code is not None:
121
      self.fail_reason = "exited with exit code %s" % self.exit_code
122
    else:
123
      self.fail_reason = "unable to determine termination reason"
124

    
125
    if self.failed:
126
      logging.debug("Command '%s' failed (%s); output: %s",
127
                    self.cmd, self.fail_reason, self.output)
128

    
129
  def _GetOutput(self):
130
    """Returns the combined stdout and stderr for easier usage.
131

132
    """
133
    return self.stdout + self.stderr
134

    
135
  output = property(_GetOutput, None, None, "Return full output")
136

    
137

    
138
def RunCmd(cmd, env=None, output=None, cwd='/', reset_env=False):
139
  """Execute a (shell) command.
140

141
  The command should not read from its standard input, as it will be
142
  closed.
143

144
  @type cmd: string or list
145
  @param cmd: Command to run
146
  @type env: dict
147
  @param env: Additional environment
148
  @type output: str
149
  @param output: if desired, the output of the command can be
150
      saved in a file instead of the RunResult instance; this
151
      parameter denotes the file name (if not None)
152
  @type cwd: string
153
  @param cwd: if specified, will be used as the working
154
      directory for the command; the default will be /
155
  @type reset_env: boolean
156
  @param reset_env: whether to reset or keep the default os environment
157
  @rtype: L{RunResult}
158
  @return: RunResult instance
159
  @raise errors.ProgrammerError: if we call this when forks are disabled
160

161
  """
162
  if no_fork:
163
    raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
164

    
165
  if isinstance(cmd, list):
166
    cmd = [str(val) for val in cmd]
167
    strcmd = " ".join(cmd)
168
    shell = False
169
  else:
170
    strcmd = cmd
171
    shell = True
172
  logging.debug("RunCmd '%s'", strcmd)
173

    
174
  if not reset_env:
175
    cmd_env = os.environ.copy()
176
    cmd_env["LC_ALL"] = "C"
177
  else:
178
    cmd_env = {}
179

    
180
  if env is not None:
181
    cmd_env.update(env)
182

    
183
  try:
184
    if output is None:
185
      out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd)
186
    else:
187
      status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
188
      out = err = ""
189
  except OSError, err:
190
    if err.errno == errno.ENOENT:
191
      raise errors.OpExecError("Can't execute '%s': not found (%s)" %
192
                               (strcmd, err))
193
    else:
194
      raise
195

    
196
  if status >= 0:
197
    exitcode = status
198
    signal_ = None
199
  else:
200
    exitcode = None
201
    signal_ = -status
202

    
203
  return RunResult(exitcode, signal_, out, err, strcmd)
204

    
205

    
206
def _RunCmdPipe(cmd, env, via_shell, cwd):
207
  """Run a command and return its output.
208

209
  @type  cmd: string or list
210
  @param cmd: Command to run
211
  @type env: dict
212
  @param env: The environment to use
213
  @type via_shell: bool
214
  @param via_shell: if we should run via the shell
215
  @type cwd: string
216
  @param cwd: the working directory for the program
217
  @rtype: tuple
218
  @return: (out, err, status)
219

220
  """
221
  poller = select.poll()
222
  child = subprocess.Popen(cmd, shell=via_shell,
223
                           stderr=subprocess.PIPE,
224
                           stdout=subprocess.PIPE,
225
                           stdin=subprocess.PIPE,
226
                           close_fds=True, env=env,
227
                           cwd=cwd)
228

    
229
  child.stdin.close()
230
  poller.register(child.stdout, select.POLLIN)
231
  poller.register(child.stderr, select.POLLIN)
232
  out = StringIO()
233
  err = StringIO()
234
  fdmap = {
235
    child.stdout.fileno(): (out, child.stdout),
236
    child.stderr.fileno(): (err, child.stderr),
237
    }
238
  for fd in fdmap:
239
    status = fcntl.fcntl(fd, fcntl.F_GETFL)
240
    fcntl.fcntl(fd, fcntl.F_SETFL, status | os.O_NONBLOCK)
241

    
242
  while fdmap:
243
    try:
244
      pollresult = poller.poll()
245
    except EnvironmentError, eerr:
246
      if eerr.errno == errno.EINTR:
247
        continue
248
      raise
249
    except select.error, serr:
250
      if serr[0] == errno.EINTR:
251
        continue
252
      raise
253

    
254
    for fd, event in pollresult:
255
      if event & select.POLLIN or event & select.POLLPRI:
256
        data = fdmap[fd][1].read()
257
        # no data from read signifies EOF (the same as POLLHUP)
258
        if not data:
259
          poller.unregister(fd)
260
          del fdmap[fd]
261
          continue
262
        fdmap[fd][0].write(data)
263
      if (event & select.POLLNVAL or event & select.POLLHUP or
264
          event & select.POLLERR):
265
        poller.unregister(fd)
266
        del fdmap[fd]
267

    
268
  out = out.getvalue()
269
  err = err.getvalue()
270

    
271
  status = child.wait()
272
  return out, err, status
273

    
274

    
275
def _RunCmdFile(cmd, env, via_shell, output, cwd):
276
  """Run a command and save its output to a file.
277

278
  @type  cmd: string or list
279
  @param cmd: Command to run
280
  @type env: dict
281
  @param env: The environment to use
282
  @type via_shell: bool
283
  @param via_shell: if we should run via the shell
284
  @type output: str
285
  @param output: the filename in which to save the output
286
  @type cwd: string
287
  @param cwd: the working directory for the program
288
  @rtype: int
289
  @return: the exit status
290

291
  """
292
  fh = open(output, "a")
293
  try:
294
    child = subprocess.Popen(cmd, shell=via_shell,
295
                             stderr=subprocess.STDOUT,
296
                             stdout=fh,
297
                             stdin=subprocess.PIPE,
298
                             close_fds=True, env=env,
299
                             cwd=cwd)
300

    
301
    child.stdin.close()
302
    status = child.wait()
303
  finally:
304
    fh.close()
305
  return status
306

    
307

    
308
def RunParts(dir_name, env=None, reset_env=False):
309
  """Run Scripts or programs in a directory
310

311
  @type dir_name: string
312
  @param dir_name: absolute path to a directory
313
  @type env: dict
314
  @param env: The environment to use
315
  @type reset_env: boolean
316
  @param reset_env: whether to reset or keep the default os environment
317
  @rtype: list of tuples
318
  @return: list of (name, (one of RUNDIR_STATUS), RunResult)
319

320
  """
321
  rr = []
322

    
323
  try:
324
    dir_contents = ListVisibleFiles(dir_name)
325
  except OSError, err:
326
    logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
327
    return rr
328

    
329
  for relname in sorted(dir_contents):
330
    fname = PathJoin(dir_name, relname)
331
    if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
332
            constants.EXT_PLUGIN_MASK.match(relname) is not None):
333
      rr.append((relname, constants.RUNPARTS_SKIP, None))
334
    else:
335
      try:
336
        result = RunCmd([fname], env=env, reset_env=reset_env)
337
      except Exception, err: # pylint: disable-msg=W0703
338
        rr.append((relname, constants.RUNPARTS_ERR, str(err)))
339
      else:
340
        rr.append((relname, constants.RUNPARTS_RUN, result))
341

    
342
  return rr
343

    
344

    
345
def GetSocketCredentials(sock):
346
  """Returns the credentials of the foreign process connected to a socket.
347

348
  @param sock: Unix socket
349
  @rtype: tuple; (number, number, number)
350
  @return: The PID, UID and GID of the connected foreign process.
351

352
  """
353
  peercred = sock.getsockopt(socket.SOL_SOCKET, IN.SO_PEERCRED,
354
                             _STRUCT_UCRED_SIZE)
355
  return struct.unpack(_STRUCT_UCRED, peercred)
356

    
357

    
358
def RemoveFile(filename):
359
  """Remove a file ignoring some errors.
360

361
  Remove a file, ignoring non-existing ones or directories. Other
362
  errors are passed.
363

364
  @type filename: str
365
  @param filename: the file to be removed
366

367
  """
368
  try:
369
    os.unlink(filename)
370
  except OSError, err:
371
    if err.errno not in (errno.ENOENT, errno.EISDIR):
372
      raise
373

    
374

    
375
def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
376
  """Renames a file.
377

378
  @type old: string
379
  @param old: Original path
380
  @type new: string
381
  @param new: New path
382
  @type mkdir: bool
383
  @param mkdir: Whether to create target directory if it doesn't exist
384
  @type mkdir_mode: int
385
  @param mkdir_mode: Mode for newly created directories
386

387
  """
388
  try:
389
    return os.rename(old, new)
390
  except OSError, err:
391
    # In at least one use case of this function, the job queue, directory
392
    # creation is very rare. Checking for the directory before renaming is not
393
    # as efficient.
394
    if mkdir and err.errno == errno.ENOENT:
395
      # Create directory and try again
396
      Makedirs(os.path.dirname(new), mode=mkdir_mode)
397

    
398
      return os.rename(old, new)
399

    
400
    raise
401

    
402

    
403
def Makedirs(path, mode=0750):
404
  """Super-mkdir; create a leaf directory and all intermediate ones.
405

406
  This is a wrapper around C{os.makedirs} adding error handling not implemented
407
  before Python 2.5.
408

409
  """
410
  try:
411
    os.makedirs(path, mode)
412
  except OSError, err:
413
    # Ignore EEXIST. This is only handled in os.makedirs as included in
414
    # Python 2.5 and above.
415
    if err.errno != errno.EEXIST or not os.path.exists(path):
416
      raise
417

    
418

    
419
def ResetTempfileModule():
420
  """Resets the random name generator of the tempfile module.
421

422
  This function should be called after C{os.fork} in the child process to
423
  ensure it creates a newly seeded random generator. Otherwise it would
424
  generate the same random parts as the parent process. If several processes
425
  race for the creation of a temporary file, this could lead to one not getting
426
  a temporary name.
427

428
  """
429
  # pylint: disable-msg=W0212
430
  if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
431
    tempfile._once_lock.acquire()
432
    try:
433
      # Reset random name generator
434
      tempfile._name_sequence = None
435
    finally:
436
      tempfile._once_lock.release()
437
  else:
438
    logging.critical("The tempfile module misses at least one of the"
439
                     " '_once_lock' and '_name_sequence' attributes")
440

    
441

    
442
def _FingerprintFile(filename):
443
  """Compute the fingerprint of a file.
444

445
  If the file does not exist, a None will be returned
446
  instead.
447

448
  @type filename: str
449
  @param filename: the filename to checksum
450
  @rtype: str
451
  @return: the hex digest of the sha checksum of the contents
452
      of the file
453

454
  """
455
  if not (os.path.exists(filename) and os.path.isfile(filename)):
456
    return None
457

    
458
  f = open(filename)
459

    
460
  fp = sha1()
461
  while True:
462
    data = f.read(4096)
463
    if not data:
464
      break
465

    
466
    fp.update(data)
467

    
468
  return fp.hexdigest()
469

    
470

    
471
def FingerprintFiles(files):
472
  """Compute fingerprints for a list of files.
473

474
  @type files: list
475
  @param files: the list of filename to fingerprint
476
  @rtype: dict
477
  @return: a dictionary filename: fingerprint, holding only
478
      existing files
479

480
  """
481
  ret = {}
482

    
483
  for filename in files:
484
    cksum = _FingerprintFile(filename)
485
    if cksum:
486
      ret[filename] = cksum
487

    
488
  return ret
489

    
490

    
491
def ForceDictType(target, key_types, allowed_values=None):
492
  """Force the values of a dict to have certain types.
493

494
  @type target: dict
495
  @param target: the dict to update
496
  @type key_types: dict
497
  @param key_types: dict mapping target dict keys to types
498
                    in constants.ENFORCEABLE_TYPES
499
  @type allowed_values: list
500
  @keyword allowed_values: list of specially allowed values
501

502
  """
503
  if allowed_values is None:
504
    allowed_values = []
505

    
506
  if not isinstance(target, dict):
507
    msg = "Expected dictionary, got '%s'" % target
508
    raise errors.TypeEnforcementError(msg)
509

    
510
  for key in target:
511
    if key not in key_types:
512
      msg = "Unknown key '%s'" % key
513
      raise errors.TypeEnforcementError(msg)
514

    
515
    if target[key] in allowed_values:
516
      continue
517

    
518
    ktype = key_types[key]
519
    if ktype not in constants.ENFORCEABLE_TYPES:
520
      msg = "'%s' has non-enforceable type %s" % (key, ktype)
521
      raise errors.ProgrammerError(msg)
522

    
523
    if ktype == constants.VTYPE_STRING:
524
      if not isinstance(target[key], basestring):
525
        if isinstance(target[key], bool) and not target[key]:
526
          target[key] = ''
527
        else:
528
          msg = "'%s' (value %s) is not a valid string" % (key, target[key])
529
          raise errors.TypeEnforcementError(msg)
530
    elif ktype == constants.VTYPE_BOOL:
531
      if isinstance(target[key], basestring) and target[key]:
532
        if target[key].lower() == constants.VALUE_FALSE:
533
          target[key] = False
534
        elif target[key].lower() == constants.VALUE_TRUE:
535
          target[key] = True
536
        else:
537
          msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
538
          raise errors.TypeEnforcementError(msg)
539
      elif target[key]:
540
        target[key] = True
541
      else:
542
        target[key] = False
543
    elif ktype == constants.VTYPE_SIZE:
544
      try:
545
        target[key] = ParseUnit(target[key])
546
      except errors.UnitParseError, err:
547
        msg = "'%s' (value %s) is not a valid size. error: %s" % \
548
              (key, target[key], err)
549
        raise errors.TypeEnforcementError(msg)
550
    elif ktype == constants.VTYPE_INT:
551
      try:
552
        target[key] = int(target[key])
553
      except (ValueError, TypeError):
554
        msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
555
        raise errors.TypeEnforcementError(msg)
556

    
557

    
558
def IsProcessAlive(pid):
559
  """Check if a given pid exists on the system.
560

561
  @note: zombie status is not handled, so zombie processes
562
      will be returned as alive
563
  @type pid: int
564
  @param pid: the process ID to check
565
  @rtype: boolean
566
  @return: True if the process exists
567

568
  """
569
  def _TryStat(name):
570
    try:
571
      os.stat(name)
572
      return True
573
    except EnvironmentError, err:
574
      if err.errno in (errno.ENOENT, errno.ENOTDIR):
575
        return False
576
      elif err.errno == errno.EINVAL:
577
        raise RetryAgain(err)
578
      raise
579

    
580
  assert isinstance(pid, int), "pid must be an integer"
581
  if pid <= 0:
582
    return False
583

    
584
  proc_entry = "/proc/%d/status" % pid
585
  # /proc in a multiprocessor environment can have strange behaviors.
586
  # Retry the os.stat a few times until we get a good result.
587
  try:
588
    return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5, args=[proc_entry])
589
  except RetryTimeout, err:
590
    err.RaiseInner()
591

    
592

    
593
def ReadPidFile(pidfile):
594
  """Read a pid from a file.
595

596
  @type  pidfile: string
597
  @param pidfile: path to the file containing the pid
598
  @rtype: int
599
  @return: The process id, if the file exists and contains a valid PID,
600
           otherwise 0
601

602
  """
603
  try:
604
    raw_data = ReadFile(pidfile)
605
  except EnvironmentError, err:
606
    if err.errno != errno.ENOENT:
607
      logging.exception("Can't read pid file")
608
    return 0
609

    
610
  try:
611
    pid = int(raw_data)
612
  except (TypeError, ValueError), err:
613
    logging.info("Can't parse pid file contents", exc_info=True)
614
    return 0
615

    
616
  return pid
617

    
618

    
619
def MatchNameComponent(key, name_list, case_sensitive=True):
620
  """Try to match a name against a list.
621

622
  This function will try to match a name like test1 against a list
623
  like C{['test1.example.com', 'test2.example.com', ...]}. Against
624
  this list, I{'test1'} as well as I{'test1.example'} will match, but
625
  not I{'test1.ex'}. A multiple match will be considered as no match
626
  at all (e.g. I{'test1'} against C{['test1.example.com',
627
  'test1.example.org']}), except when the key fully matches an entry
628
  (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
629

630
  @type key: str
631
  @param key: the name to be searched
632
  @type name_list: list
633
  @param name_list: the list of strings against which to search the key
634
  @type case_sensitive: boolean
635
  @param case_sensitive: whether to provide a case-sensitive match
636

637
  @rtype: None or str
638
  @return: None if there is no match I{or} if there are multiple matches,
639
      otherwise the element from the list which matches
640

641
  """
642
  if key in name_list:
643
    return key
644

    
645
  re_flags = 0
646
  if not case_sensitive:
647
    re_flags |= re.IGNORECASE
648
    key = key.upper()
649
  mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
650
  names_filtered = []
651
  string_matches = []
652
  for name in name_list:
653
    if mo.match(name) is not None:
654
      names_filtered.append(name)
655
      if not case_sensitive and key == name.upper():
656
        string_matches.append(name)
657

    
658
  if len(string_matches) == 1:
659
    return string_matches[0]
660
  if len(names_filtered) == 1:
661
    return names_filtered[0]
662
  return None
663

    
664

    
665
class HostInfo:
666
  """Class implementing resolver and hostname functionality
667

668
  """
669
  _VALID_NAME_RE = re.compile("^[a-z0-9._-]{1,255}$")
670

    
671
  def __init__(self, name=None):
672
    """Initialize the host name object.
673

674
    If the name argument is not passed, it will use this system's
675
    name.
676

677
    """
678
    if name is None:
679
      name = self.SysName()
680

    
681
    self.query = name
682
    self.name, self.aliases, self.ipaddrs = self.LookupHostname(name)
683
    self.ip = self.ipaddrs[0]
684

    
685
  def ShortName(self):
686
    """Returns the hostname without domain.
687

688
    """
689
    return self.name.split('.')[0]
690

    
691
  @staticmethod
692
  def SysName():
693
    """Return the current system's name.
694

695
    This is simply a wrapper over C{socket.gethostname()}.
696

697
    """
698
    return socket.gethostname()
699

    
700
  @staticmethod
701
  def LookupHostname(hostname):
702
    """Look up hostname
703

704
    @type hostname: str
705
    @param hostname: hostname to look up
706

707
    @rtype: tuple
708
    @return: a tuple (name, aliases, ipaddrs) as returned by
709
        C{socket.gethostbyname_ex}
710
    @raise errors.ResolverError: in case of errors in resolving
711

712
    """
713
    try:
714
      result = socket.gethostbyname_ex(hostname)
715
    except socket.gaierror, err:
716
      # hostname not found in DNS
717
      raise errors.ResolverError(hostname, err.args[0], err.args[1])
718

    
719
    return result
720

    
721
  @classmethod
722
  def NormalizeName(cls, hostname):
723
    """Validate and normalize the given hostname.
724

725
    @attention: the validation is a bit more relaxed than the standards
726
        require; most importantly, we allow underscores in names
727
    @raise errors.OpPrereqError: when the name is not valid
728

729
    """
730
    hostname = hostname.lower()
731
    if (not cls._VALID_NAME_RE.match(hostname) or
732
        # double-dots, meaning empty label
733
        ".." in hostname or
734
        # empty initial label
735
        hostname.startswith(".")):
736
      raise errors.OpPrereqError("Invalid hostname '%s'" % hostname,
737
                                 errors.ECODE_INVAL)
738
    if hostname.endswith("."):
739
      hostname = hostname.rstrip(".")
740
    return hostname
741

    
742

    
743
def GetHostInfo(name=None):
744
  """Lookup host name and raise an OpPrereqError for failures"""
745

    
746
  try:
747
    return HostInfo(name)
748
  except errors.ResolverError, err:
749
    raise errors.OpPrereqError("The given name (%s) does not resolve: %s" %
750
                               (err[0], err[2]), errors.ECODE_RESOLVER)
751

    
752

    
753
def ListVolumeGroups():
754
  """List volume groups and their size
755

756
  @rtype: dict
757
  @return:
758
       Dictionary with keys volume name and values
759
       the size of the volume
760

761
  """
762
  command = "vgs --noheadings --units m --nosuffix -o name,size"
763
  result = RunCmd(command)
764
  retval = {}
765
  if result.failed:
766
    return retval
767

    
768
  for line in result.stdout.splitlines():
769
    try:
770
      name, size = line.split()
771
      size = int(float(size))
772
    except (IndexError, ValueError), err:
773
      logging.error("Invalid output from vgs (%s): %s", err, line)
774
      continue
775

    
776
    retval[name] = size
777

    
778
  return retval
779

    
780

    
781
def BridgeExists(bridge):
782
  """Check whether the given bridge exists in the system
783

784
  @type bridge: str
785
  @param bridge: the bridge name to check
786
  @rtype: boolean
787
  @return: True if it does
788

789
  """
790
  return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
791

    
792

    
793
def NiceSort(name_list):
794
  """Sort a list of strings based on digit and non-digit groupings.
795

796
  Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
797
  will sort the list in the logical order C{['a1', 'a2', 'a10',
798
  'a11']}.
799

800
  The sort algorithm breaks each name in groups of either only-digits
801
  or no-digits. Only the first eight such groups are considered, and
802
  after that we just use what's left of the string.
803

804
  @type name_list: list
805
  @param name_list: the names to be sorted
806
  @rtype: list
807
  @return: a copy of the name list sorted with our algorithm
808

809
  """
810
  _SORTER_BASE = "(\D+|\d+)"
811
  _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
812
                                                  _SORTER_BASE, _SORTER_BASE,
813
                                                  _SORTER_BASE, _SORTER_BASE,
814
                                                  _SORTER_BASE, _SORTER_BASE)
815
  _SORTER_RE = re.compile(_SORTER_FULL)
816
  _SORTER_NODIGIT = re.compile("^\D*$")
817
  def _TryInt(val):
818
    """Attempts to convert a variable to integer."""
819
    if val is None or _SORTER_NODIGIT.match(val):
820
      return val
821
    rval = int(val)
822
    return rval
823

    
824
  to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
825
             for name in name_list]
826
  to_sort.sort()
827
  return [tup[1] for tup in to_sort]
828

    
829

    
830
def TryConvert(fn, val):
831
  """Try to convert a value ignoring errors.
832

833
  This function tries to apply function I{fn} to I{val}. If no
834
  C{ValueError} or C{TypeError} exceptions are raised, it will return
835
  the result, else it will return the original value. Any other
836
  exceptions are propagated to the caller.
837

838
  @type fn: callable
839
  @param fn: function to apply to the value
840
  @param val: the value to be converted
841
  @return: The converted value if the conversion was successful,
842
      otherwise the original value.
843

844
  """
845
  try:
846
    nv = fn(val)
847
  except (ValueError, TypeError):
848
    nv = val
849
  return nv
850

    
851

    
852
def IsValidIP(ip):
853
  """Verifies the syntax of an IPv4 address.
854

855
  This function checks if the IPv4 address passes is valid or not based
856
  on syntax (not IP range, class calculations, etc.).
857

858
  @type ip: str
859
  @param ip: the address to be checked
860
  @rtype: a regular expression match object
861
  @return: a regular expression match object, or None if the
862
      address is not valid
863

864
  """
865
  unit = "(0|[1-9]\d{0,2})"
866
  #TODO: convert and return only boolean
867
  return re.match("^%s\.%s\.%s\.%s$" % (unit, unit, unit, unit), ip)
868

    
869

    
870
def IsValidShellParam(word):
871
  """Verifies is the given word is safe from the shell's p.o.v.
872

873
  This means that we can pass this to a command via the shell and be
874
  sure that it doesn't alter the command line and is passed as such to
875
  the actual command.
876

877
  Note that we are overly restrictive here, in order to be on the safe
878
  side.
879

880
  @type word: str
881
  @param word: the word to check
882
  @rtype: boolean
883
  @return: True if the word is 'safe'
884

885
  """
886
  return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
887

    
888

    
889
def BuildShellCmd(template, *args):
890
  """Build a safe shell command line from the given arguments.
891

892
  This function will check all arguments in the args list so that they
893
  are valid shell parameters (i.e. they don't contain shell
894
  metacharacters). If everything is ok, it will return the result of
895
  template % args.
896

897
  @type template: str
898
  @param template: the string holding the template for the
899
      string formatting
900
  @rtype: str
901
  @return: the expanded command line
902

903
  """
904
  for word in args:
905
    if not IsValidShellParam(word):
906
      raise errors.ProgrammerError("Shell argument '%s' contains"
907
                                   " invalid characters" % word)
908
  return template % args
909

    
910

    
911
def FormatUnit(value, units):
912
  """Formats an incoming number of MiB with the appropriate unit.
913

914
  @type value: int
915
  @param value: integer representing the value in MiB (1048576)
916
  @type units: char
917
  @param units: the type of formatting we should do:
918
      - 'h' for automatic scaling
919
      - 'm' for MiBs
920
      - 'g' for GiBs
921
      - 't' for TiBs
922
  @rtype: str
923
  @return: the formatted value (with suffix)
924

925
  """
926
  if units not in ('m', 'g', 't', 'h'):
927
    raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
928

    
929
  suffix = ''
930

    
931
  if units == 'm' or (units == 'h' and value < 1024):
932
    if units == 'h':
933
      suffix = 'M'
934
    return "%d%s" % (round(value, 0), suffix)
935

    
936
  elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
937
    if units == 'h':
938
      suffix = 'G'
939
    return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
940

    
941
  else:
942
    if units == 'h':
943
      suffix = 'T'
944
    return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
945

    
946

    
947
def ParseUnit(input_string):
948
  """Tries to extract number and scale from the given string.
949

950
  Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
951
  [UNIT]}. If no unit is specified, it defaults to MiB. Return value
952
  is always an int in MiB.
953

954
  """
955
  m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
956
  if not m:
957
    raise errors.UnitParseError("Invalid format")
958

    
959
  value = float(m.groups()[0])
960

    
961
  unit = m.groups()[1]
962
  if unit:
963
    lcunit = unit.lower()
964
  else:
965
    lcunit = 'm'
966

    
967
  if lcunit in ('m', 'mb', 'mib'):
968
    # Value already in MiB
969
    pass
970

    
971
  elif lcunit in ('g', 'gb', 'gib'):
972
    value *= 1024
973

    
974
  elif lcunit in ('t', 'tb', 'tib'):
975
    value *= 1024 * 1024
976

    
977
  else:
978
    raise errors.UnitParseError("Unknown unit: %s" % unit)
979

    
980
  # Make sure we round up
981
  if int(value) < value:
982
    value += 1
983

    
984
  # Round up to the next multiple of 4
985
  value = int(value)
986
  if value % 4:
987
    value += 4 - value % 4
988

    
989
  return value
990

    
991

    
992
def AddAuthorizedKey(file_name, key):
993
  """Adds an SSH public key to an authorized_keys file.
994

995
  @type file_name: str
996
  @param file_name: path to authorized_keys file
997
  @type key: str
998
  @param key: string containing key
999

1000
  """
1001
  key_fields = key.split()
1002

    
1003
  f = open(file_name, 'a+')
1004
  try:
1005
    nl = True
1006
    for line in f:
1007
      # Ignore whitespace changes
1008
      if line.split() == key_fields:
1009
        break
1010
      nl = line.endswith('\n')
1011
    else:
1012
      if not nl:
1013
        f.write("\n")
1014
      f.write(key.rstrip('\r\n'))
1015
      f.write("\n")
1016
      f.flush()
1017
  finally:
1018
    f.close()
1019

    
1020

    
1021
def RemoveAuthorizedKey(file_name, key):
1022
  """Removes an SSH public key from an authorized_keys file.
1023

1024
  @type file_name: str
1025
  @param file_name: path to authorized_keys file
1026
  @type key: str
1027
  @param key: string containing key
1028

1029
  """
1030
  key_fields = key.split()
1031

    
1032
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1033
  try:
1034
    out = os.fdopen(fd, 'w')
1035
    try:
1036
      f = open(file_name, 'r')
1037
      try:
1038
        for line in f:
1039
          # Ignore whitespace changes while comparing lines
1040
          if line.split() != key_fields:
1041
            out.write(line)
1042

    
1043
        out.flush()
1044
        os.rename(tmpname, file_name)
1045
      finally:
1046
        f.close()
1047
    finally:
1048
      out.close()
1049
  except:
1050
    RemoveFile(tmpname)
1051
    raise
1052

    
1053

    
1054
def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1055
  """Sets the name of an IP address and hostname in /etc/hosts.
1056

1057
  @type file_name: str
1058
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1059
  @type ip: str
1060
  @param ip: the IP address
1061
  @type hostname: str
1062
  @param hostname: the hostname to be added
1063
  @type aliases: list
1064
  @param aliases: the list of aliases to add for the hostname
1065

1066
  """
1067
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1068
  # Ensure aliases are unique
1069
  aliases = UniqueSequence([hostname] + aliases)[1:]
1070

    
1071
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1072
  try:
1073
    out = os.fdopen(fd, 'w')
1074
    try:
1075
      f = open(file_name, 'r')
1076
      try:
1077
        for line in f:
1078
          fields = line.split()
1079
          if fields and not fields[0].startswith('#') and ip == fields[0]:
1080
            continue
1081
          out.write(line)
1082

    
1083
        out.write("%s\t%s" % (ip, hostname))
1084
        if aliases:
1085
          out.write(" %s" % ' '.join(aliases))
1086
        out.write('\n')
1087

    
1088
        out.flush()
1089
        os.fsync(out)
1090
        os.chmod(tmpname, 0644)
1091
        os.rename(tmpname, file_name)
1092
      finally:
1093
        f.close()
1094
    finally:
1095
      out.close()
1096
  except:
1097
    RemoveFile(tmpname)
1098
    raise
1099

    
1100

    
1101
def AddHostToEtcHosts(hostname):
1102
  """Wrapper around SetEtcHostsEntry.
1103

1104
  @type hostname: str
1105
  @param hostname: a hostname that will be resolved and added to
1106
      L{constants.ETC_HOSTS}
1107

1108
  """
1109
  hi = HostInfo(name=hostname)
1110
  SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
1111

    
1112

    
1113
def RemoveEtcHostsEntry(file_name, hostname):
1114
  """Removes a hostname from /etc/hosts.
1115

1116
  IP addresses without names are removed from the file.
1117

1118
  @type file_name: str
1119
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1120
  @type hostname: str
1121
  @param hostname: the hostname to be removed
1122

1123
  """
1124
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1125
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1126
  try:
1127
    out = os.fdopen(fd, 'w')
1128
    try:
1129
      f = open(file_name, 'r')
1130
      try:
1131
        for line in f:
1132
          fields = line.split()
1133
          if len(fields) > 1 and not fields[0].startswith('#'):
1134
            names = fields[1:]
1135
            if hostname in names:
1136
              while hostname in names:
1137
                names.remove(hostname)
1138
              if names:
1139
                out.write("%s %s\n" % (fields[0], ' '.join(names)))
1140
              continue
1141

    
1142
          out.write(line)
1143

    
1144
        out.flush()
1145
        os.fsync(out)
1146
        os.chmod(tmpname, 0644)
1147
        os.rename(tmpname, file_name)
1148
      finally:
1149
        f.close()
1150
    finally:
1151
      out.close()
1152
  except:
1153
    RemoveFile(tmpname)
1154
    raise
1155

    
1156

    
1157
def RemoveHostFromEtcHosts(hostname):
1158
  """Wrapper around RemoveEtcHostsEntry.
1159

1160
  @type hostname: str
1161
  @param hostname: hostname that will be resolved and its
1162
      full and shot name will be removed from
1163
      L{constants.ETC_HOSTS}
1164

1165
  """
1166
  hi = HostInfo(name=hostname)
1167
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
1168
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
1169

    
1170

    
1171
def TimestampForFilename():
1172
  """Returns the current time formatted for filenames.
1173

1174
  The format doesn't contain colons as some shells and applications them as
1175
  separators.
1176

1177
  """
1178
  return time.strftime("%Y-%m-%d_%H_%M_%S")
1179

    
1180

    
1181
def CreateBackup(file_name):
1182
  """Creates a backup of a file.
1183

1184
  @type file_name: str
1185
  @param file_name: file to be backed up
1186
  @rtype: str
1187
  @return: the path to the newly created backup
1188
  @raise errors.ProgrammerError: for invalid file names
1189

1190
  """
1191
  if not os.path.isfile(file_name):
1192
    raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1193
                                file_name)
1194

    
1195
  prefix = ("%s.backup-%s." %
1196
            (os.path.basename(file_name), TimestampForFilename()))
1197
  dir_name = os.path.dirname(file_name)
1198

    
1199
  fsrc = open(file_name, 'rb')
1200
  try:
1201
    (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1202
    fdst = os.fdopen(fd, 'wb')
1203
    try:
1204
      logging.debug("Backing up %s at %s", file_name, backup_name)
1205
      shutil.copyfileobj(fsrc, fdst)
1206
    finally:
1207
      fdst.close()
1208
  finally:
1209
    fsrc.close()
1210

    
1211
  return backup_name
1212

    
1213

    
1214
def ShellQuote(value):
1215
  """Quotes shell argument according to POSIX.
1216

1217
  @type value: str
1218
  @param value: the argument to be quoted
1219
  @rtype: str
1220
  @return: the quoted value
1221

1222
  """
1223
  if _re_shell_unquoted.match(value):
1224
    return value
1225
  else:
1226
    return "'%s'" % value.replace("'", "'\\''")
1227

    
1228

    
1229
def ShellQuoteArgs(args):
1230
  """Quotes a list of shell arguments.
1231

1232
  @type args: list
1233
  @param args: list of arguments to be quoted
1234
  @rtype: str
1235
  @return: the quoted arguments concatenated with spaces
1236

1237
  """
1238
  return ' '.join([ShellQuote(i) for i in args])
1239

    
1240

    
1241
def TcpPing(target, port, timeout=10, live_port_needed=False, source=None):
1242
  """Simple ping implementation using TCP connect(2).
1243

1244
  Check if the given IP is reachable by doing attempting a TCP connect
1245
  to it.
1246

1247
  @type target: str
1248
  @param target: the IP or hostname to ping
1249
  @type port: int
1250
  @param port: the port to connect to
1251
  @type timeout: int
1252
  @param timeout: the timeout on the connection attempt
1253
  @type live_port_needed: boolean
1254
  @param live_port_needed: whether a closed port will cause the
1255
      function to return failure, as if there was a timeout
1256
  @type source: str or None
1257
  @param source: if specified, will cause the connect to be made
1258
      from this specific source address; failures to bind other
1259
      than C{EADDRNOTAVAIL} will be ignored
1260

1261
  """
1262
  sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
1263

    
1264
  success = False
1265

    
1266
  if source is not None:
1267
    try:
1268
      sock.bind((source, 0))
1269
    except socket.error, (errcode, _):
1270
      if errcode == errno.EADDRNOTAVAIL:
1271
        success = False
1272

    
1273
  sock.settimeout(timeout)
1274

    
1275
  try:
1276
    sock.connect((target, port))
1277
    sock.close()
1278
    success = True
1279
  except socket.timeout:
1280
    success = False
1281
  except socket.error, (errcode, _):
1282
    success = (not live_port_needed) and (errcode == errno.ECONNREFUSED)
1283

    
1284
  return success
1285

    
1286

    
1287
def OwnIpAddress(address):
1288
  """Check if the current host has the the given IP address.
1289

1290
  Currently this is done by TCP-pinging the address from the loopback
1291
  address.
1292

1293
  @type address: string
1294
  @param address: the address to check
1295
  @rtype: bool
1296
  @return: True if we own the address
1297

1298
  """
1299
  return TcpPing(address, constants.DEFAULT_NODED_PORT,
1300
                 source=constants.LOCALHOST_IP_ADDRESS)
1301

    
1302

    
1303
def ListVisibleFiles(path):
1304
  """Returns a list of visible files in a directory.
1305

1306
  @type path: str
1307
  @param path: the directory to enumerate
1308
  @rtype: list
1309
  @return: the list of all files not starting with a dot
1310
  @raise ProgrammerError: if L{path} is not an absolue and normalized path
1311

1312
  """
1313
  if not IsNormAbsPath(path):
1314
    raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1315
                                 " absolute/normalized: '%s'" % path)
1316
  files = [i for i in os.listdir(path) if not i.startswith(".")]
1317
  files.sort()
1318
  return files
1319

    
1320

    
1321
def GetHomeDir(user, default=None):
1322
  """Try to get the homedir of the given user.
1323

1324
  The user can be passed either as a string (denoting the name) or as
1325
  an integer (denoting the user id). If the user is not found, the
1326
  'default' argument is returned, which defaults to None.
1327

1328
  """
1329
  try:
1330
    if isinstance(user, basestring):
1331
      result = pwd.getpwnam(user)
1332
    elif isinstance(user, (int, long)):
1333
      result = pwd.getpwuid(user)
1334
    else:
1335
      raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1336
                                   type(user))
1337
  except KeyError:
1338
    return default
1339
  return result.pw_dir
1340

    
1341

    
1342
def NewUUID():
1343
  """Returns a random UUID.
1344

1345
  @note: This is a Linux-specific method as it uses the /proc
1346
      filesystem.
1347
  @rtype: str
1348

1349
  """
1350
  return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1351

    
1352

    
1353
def GenerateSecret(numbytes=20):
1354
  """Generates a random secret.
1355

1356
  This will generate a pseudo-random secret returning an hex string
1357
  (so that it can be used where an ASCII string is needed).
1358

1359
  @param numbytes: the number of bytes which will be represented by the returned
1360
      string (defaulting to 20, the length of a SHA1 hash)
1361
  @rtype: str
1362
  @return: an hex representation of the pseudo-random sequence
1363

1364
  """
1365
  return os.urandom(numbytes).encode('hex')
1366

    
1367

    
1368
def EnsureDirs(dirs):
1369
  """Make required directories, if they don't exist.
1370

1371
  @param dirs: list of tuples (dir_name, dir_mode)
1372
  @type dirs: list of (string, integer)
1373

1374
  """
1375
  for dir_name, dir_mode in dirs:
1376
    try:
1377
      os.mkdir(dir_name, dir_mode)
1378
    except EnvironmentError, err:
1379
      if err.errno != errno.EEXIST:
1380
        raise errors.GenericError("Cannot create needed directory"
1381
                                  " '%s': %s" % (dir_name, err))
1382
    if not os.path.isdir(dir_name):
1383
      raise errors.GenericError("%s is not a directory" % dir_name)
1384

    
1385

    
1386
def ReadFile(file_name, size=-1, oneline=False):
1387
  """Reads a file.
1388

1389
  @type size: int
1390
  @param size: Read at most size bytes (if negative, entire file)
1391
  @type oneline: bool
1392
  @param oneline: Whether to read only one line (newline char is not included)
1393
  @rtype: str
1394
  @return: the (possibly partial) content of the file
1395

1396
  """
1397
  f = open(file_name, "r")
1398
  try:
1399
    if oneline:
1400
      data = f.readline(size).rstrip("\r\n")
1401
    else:
1402
      data = f.read(size)
1403
  finally:
1404
    f.close()
1405

    
1406
  return data
1407

    
1408

    
1409
def WriteFile(file_name, fn=None, data=None,
1410
              mode=None, uid=-1, gid=-1,
1411
              atime=None, mtime=None, close=True,
1412
              dry_run=False, backup=False,
1413
              prewrite=None, postwrite=None):
1414
  """(Over)write a file atomically.
1415

1416
  The file_name and either fn (a function taking one argument, the
1417
  file descriptor, and which should write the data to it) or data (the
1418
  contents of the file) must be passed. The other arguments are
1419
  optional and allow setting the file mode, owner and group, and the
1420
  mtime/atime of the file.
1421

1422
  If the function doesn't raise an exception, it has succeeded and the
1423
  target file has the new contents. If the function has raised an
1424
  exception, an existing target file should be unmodified and the
1425
  temporary file should be removed.
1426

1427
  @type file_name: str
1428
  @param file_name: the target filename
1429
  @type fn: callable
1430
  @param fn: content writing function, called with
1431
      file descriptor as parameter
1432
  @type data: str
1433
  @param data: contents of the file
1434
  @type mode: int
1435
  @param mode: file mode
1436
  @type uid: int
1437
  @param uid: the owner of the file
1438
  @type gid: int
1439
  @param gid: the group of the file
1440
  @type atime: int
1441
  @param atime: a custom access time to be set on the file
1442
  @type mtime: int
1443
  @param mtime: a custom modification time to be set on the file
1444
  @type close: boolean
1445
  @param close: whether to close file after writing it
1446
  @type prewrite: callable
1447
  @param prewrite: function to be called before writing content
1448
  @type postwrite: callable
1449
  @param postwrite: function to be called after writing content
1450

1451
  @rtype: None or int
1452
  @return: None if the 'close' parameter evaluates to True,
1453
      otherwise the file descriptor
1454

1455
  @raise errors.ProgrammerError: if any of the arguments are not valid
1456

1457
  """
1458
  if not os.path.isabs(file_name):
1459
    raise errors.ProgrammerError("Path passed to WriteFile is not"
1460
                                 " absolute: '%s'" % file_name)
1461

    
1462
  if [fn, data].count(None) != 1:
1463
    raise errors.ProgrammerError("fn or data required")
1464

    
1465
  if [atime, mtime].count(None) == 1:
1466
    raise errors.ProgrammerError("Both atime and mtime must be either"
1467
                                 " set or None")
1468

    
1469
  if backup and not dry_run and os.path.isfile(file_name):
1470
    CreateBackup(file_name)
1471

    
1472
  dir_name, base_name = os.path.split(file_name)
1473
  fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1474
  do_remove = True
1475
  # here we need to make sure we remove the temp file, if any error
1476
  # leaves it in place
1477
  try:
1478
    if uid != -1 or gid != -1:
1479
      os.chown(new_name, uid, gid)
1480
    if mode:
1481
      os.chmod(new_name, mode)
1482
    if callable(prewrite):
1483
      prewrite(fd)
1484
    if data is not None:
1485
      os.write(fd, data)
1486
    else:
1487
      fn(fd)
1488
    if callable(postwrite):
1489
      postwrite(fd)
1490
    os.fsync(fd)
1491
    if atime is not None and mtime is not None:
1492
      os.utime(new_name, (atime, mtime))
1493
    if not dry_run:
1494
      os.rename(new_name, file_name)
1495
      do_remove = False
1496
  finally:
1497
    if close:
1498
      os.close(fd)
1499
      result = None
1500
    else:
1501
      result = fd
1502
    if do_remove:
1503
      RemoveFile(new_name)
1504

    
1505
  return result
1506

    
1507

    
1508
def FirstFree(seq, base=0):
1509
  """Returns the first non-existing integer from seq.
1510

1511
  The seq argument should be a sorted list of positive integers. The
1512
  first time the index of an element is smaller than the element
1513
  value, the index will be returned.
1514

1515
  The base argument is used to start at a different offset,
1516
  i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
1517

1518
  Example: C{[0, 1, 3]} will return I{2}.
1519

1520
  @type seq: sequence
1521
  @param seq: the sequence to be analyzed.
1522
  @type base: int
1523
  @param base: use this value as the base index of the sequence
1524
  @rtype: int
1525
  @return: the first non-used index in the sequence
1526

1527
  """
1528
  for idx, elem in enumerate(seq):
1529
    assert elem >= base, "Passed element is higher than base offset"
1530
    if elem > idx + base:
1531
      # idx is not used
1532
      return idx + base
1533
  return None
1534

    
1535

    
1536
def SingleWaitForFdCondition(fdobj, event, timeout):
1537
  """Waits for a condition to occur on the socket.
1538

1539
  Immediately returns at the first interruption.
1540

1541
  @type fdobj: integer or object supporting a fileno() method
1542
  @param fdobj: entity to wait for events on
1543
  @type event: integer
1544
  @param event: ORed condition (see select module)
1545
  @type timeout: float or None
1546
  @param timeout: Timeout in seconds
1547
  @rtype: int or None
1548
  @return: None for timeout, otherwise occured conditions
1549

1550
  """
1551
  check = (event | select.POLLPRI |
1552
           select.POLLNVAL | select.POLLHUP | select.POLLERR)
1553

    
1554
  if timeout is not None:
1555
    # Poller object expects milliseconds
1556
    timeout *= 1000
1557

    
1558
  poller = select.poll()
1559
  poller.register(fdobj, event)
1560
  try:
1561
    # TODO: If the main thread receives a signal and we have no timeout, we
1562
    # could wait forever. This should check a global "quit" flag or something
1563
    # every so often.
1564
    io_events = poller.poll(timeout)
1565
  except select.error, err:
1566
    if err[0] != errno.EINTR:
1567
      raise
1568
    io_events = []
1569
  if io_events and io_events[0][1] & check:
1570
    return io_events[0][1]
1571
  else:
1572
    return None
1573

    
1574

    
1575
class FdConditionWaiterHelper(object):
1576
  """Retry helper for WaitForFdCondition.
1577

1578
  This class contains the retried and wait functions that make sure
1579
  WaitForFdCondition can continue waiting until the timeout is actually
1580
  expired.
1581

1582
  """
1583

    
1584
  def __init__(self, timeout):
1585
    self.timeout = timeout
1586

    
1587
  def Poll(self, fdobj, event):
1588
    result = SingleWaitForFdCondition(fdobj, event, self.timeout)
1589
    if result is None:
1590
      raise RetryAgain()
1591
    else:
1592
      return result
1593

    
1594
  def UpdateTimeout(self, timeout):
1595
    self.timeout = timeout
1596

    
1597

    
1598
def WaitForFdCondition(fdobj, event, timeout):
1599
  """Waits for a condition to occur on the socket.
1600

1601
  Retries until the timeout is expired, even if interrupted.
1602

1603
  @type fdobj: integer or object supporting a fileno() method
1604
  @param fdobj: entity to wait for events on
1605
  @type event: integer
1606
  @param event: ORed condition (see select module)
1607
  @type timeout: float or None
1608
  @param timeout: Timeout in seconds
1609
  @rtype: int or None
1610
  @return: None for timeout, otherwise occured conditions
1611

1612
  """
1613
  if timeout is not None:
1614
    retrywaiter = FdConditionWaiterHelper(timeout)
1615
    try:
1616
      result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
1617
                     args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
1618
    except RetryTimeout:
1619
      result = None
1620
  else:
1621
    result = None
1622
    while result is None:
1623
      result = SingleWaitForFdCondition(fdobj, event, timeout)
1624
  return result
1625

    
1626

    
1627
def UniqueSequence(seq):
1628
  """Returns a list with unique elements.
1629

1630
  Element order is preserved.
1631

1632
  @type seq: sequence
1633
  @param seq: the sequence with the source elements
1634
  @rtype: list
1635
  @return: list of unique elements from seq
1636

1637
  """
1638
  seen = set()
1639
  return [i for i in seq if i not in seen and not seen.add(i)]
1640

    
1641

    
1642
def NormalizeAndValidateMac(mac):
1643
  """Normalizes and check if a MAC address is valid.
1644

1645
  Checks whether the supplied MAC address is formally correct, only
1646
  accepts colon separated format. Normalize it to all lower.
1647

1648
  @type mac: str
1649
  @param mac: the MAC to be validated
1650
  @rtype: str
1651
  @return: returns the normalized and validated MAC.
1652

1653
  @raise errors.OpPrereqError: If the MAC isn't valid
1654

1655
  """
1656
  mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$", re.I)
1657
  if not mac_check.match(mac):
1658
    raise errors.OpPrereqError("Invalid MAC address specified: %s" %
1659
                               mac, errors.ECODE_INVAL)
1660

    
1661
  return mac.lower()
1662

    
1663

    
1664
def TestDelay(duration):
1665
  """Sleep for a fixed amount of time.
1666

1667
  @type duration: float
1668
  @param duration: the sleep duration
1669
  @rtype: boolean
1670
  @return: False for negative value, True otherwise
1671

1672
  """
1673
  if duration < 0:
1674
    return False, "Invalid sleep duration"
1675
  time.sleep(duration)
1676
  return True, None
1677

    
1678

    
1679
def _CloseFDNoErr(fd, retries=5):
1680
  """Close a file descriptor ignoring errors.
1681

1682
  @type fd: int
1683
  @param fd: the file descriptor
1684
  @type retries: int
1685
  @param retries: how many retries to make, in case we get any
1686
      other error than EBADF
1687

1688
  """
1689
  try:
1690
    os.close(fd)
1691
  except OSError, err:
1692
    if err.errno != errno.EBADF:
1693
      if retries > 0:
1694
        _CloseFDNoErr(fd, retries - 1)
1695
    # else either it's closed already or we're out of retries, so we
1696
    # ignore this and go on
1697

    
1698

    
1699
def CloseFDs(noclose_fds=None):
1700
  """Close file descriptors.
1701

1702
  This closes all file descriptors above 2 (i.e. except
1703
  stdin/out/err).
1704

1705
  @type noclose_fds: list or None
1706
  @param noclose_fds: if given, it denotes a list of file descriptor
1707
      that should not be closed
1708

1709
  """
1710
  # Default maximum for the number of available file descriptors.
1711
  if 'SC_OPEN_MAX' in os.sysconf_names:
1712
    try:
1713
      MAXFD = os.sysconf('SC_OPEN_MAX')
1714
      if MAXFD < 0:
1715
        MAXFD = 1024
1716
    except OSError:
1717
      MAXFD = 1024
1718
  else:
1719
    MAXFD = 1024
1720
  maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
1721
  if (maxfd == resource.RLIM_INFINITY):
1722
    maxfd = MAXFD
1723

    
1724
  # Iterate through and close all file descriptors (except the standard ones)
1725
  for fd in range(3, maxfd):
1726
    if noclose_fds and fd in noclose_fds:
1727
      continue
1728
    _CloseFDNoErr(fd)
1729

    
1730

    
1731
def Daemonize(logfile):
1732
  """Daemonize the current process.
1733

1734
  This detaches the current process from the controlling terminal and
1735
  runs it in the background as a daemon.
1736

1737
  @type logfile: str
1738
  @param logfile: the logfile to which we should redirect stdout/stderr
1739
  @rtype: int
1740
  @return: the value zero
1741

1742
  """
1743
  # pylint: disable-msg=W0212
1744
  # yes, we really want os._exit
1745
  UMASK = 077
1746
  WORKDIR = "/"
1747

    
1748
  # this might fail
1749
  pid = os.fork()
1750
  if (pid == 0):  # The first child.
1751
    os.setsid()
1752
    # this might fail
1753
    pid = os.fork() # Fork a second child.
1754
    if (pid == 0):  # The second child.
1755
      os.chdir(WORKDIR)
1756
      os.umask(UMASK)
1757
    else:
1758
      # exit() or _exit()?  See below.
1759
      os._exit(0) # Exit parent (the first child) of the second child.
1760
  else:
1761
    os._exit(0) # Exit parent of the first child.
1762

    
1763
  for fd in range(3):
1764
    _CloseFDNoErr(fd)
1765
  i = os.open("/dev/null", os.O_RDONLY) # stdin
1766
  assert i == 0, "Can't close/reopen stdin"
1767
  i = os.open(logfile, os.O_WRONLY|os.O_CREAT|os.O_APPEND, 0600) # stdout
1768
  assert i == 1, "Can't close/reopen stdout"
1769
  # Duplicate standard output to standard error.
1770
  os.dup2(1, 2)
1771
  return 0
1772

    
1773

    
1774
def DaemonPidFileName(name):
1775
  """Compute a ganeti pid file absolute path
1776

1777
  @type name: str
1778
  @param name: the daemon name
1779
  @rtype: str
1780
  @return: the full path to the pidfile corresponding to the given
1781
      daemon name
1782

1783
  """
1784
  return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
1785

    
1786

    
1787
def EnsureDaemon(name):
1788
  """Check for and start daemon if not alive.
1789

1790
  """
1791
  result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
1792
  if result.failed:
1793
    logging.error("Can't start daemon '%s', failure %s, output: %s",
1794
                  name, result.fail_reason, result.output)
1795
    return False
1796

    
1797
  return True
1798

    
1799

    
1800
def WritePidFile(name):
1801
  """Write the current process pidfile.
1802

1803
  The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid}
1804

1805
  @type name: str
1806
  @param name: the daemon name to use
1807
  @raise errors.GenericError: if the pid file already exists and
1808
      points to a live process
1809

1810
  """
1811
  pid = os.getpid()
1812
  pidfilename = DaemonPidFileName(name)
1813
  if IsProcessAlive(ReadPidFile(pidfilename)):
1814
    raise errors.GenericError("%s contains a live process" % pidfilename)
1815

    
1816
  WriteFile(pidfilename, data="%d\n" % pid)
1817

    
1818

    
1819
def RemovePidFile(name):
1820
  """Remove the current process pidfile.
1821

1822
  Any errors are ignored.
1823

1824
  @type name: str
1825
  @param name: the daemon name used to derive the pidfile name
1826

1827
  """
1828
  pidfilename = DaemonPidFileName(name)
1829
  # TODO: we could check here that the file contains our pid
1830
  try:
1831
    RemoveFile(pidfilename)
1832
  except: # pylint: disable-msg=W0702
1833
    pass
1834

    
1835

    
1836
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
1837
                waitpid=False):
1838
  """Kill a process given by its pid.
1839

1840
  @type pid: int
1841
  @param pid: The PID to terminate.
1842
  @type signal_: int
1843
  @param signal_: The signal to send, by default SIGTERM
1844
  @type timeout: int
1845
  @param timeout: The timeout after which, if the process is still alive,
1846
                  a SIGKILL will be sent. If not positive, no such checking
1847
                  will be done
1848
  @type waitpid: boolean
1849
  @param waitpid: If true, we should waitpid on this process after
1850
      sending signals, since it's our own child and otherwise it
1851
      would remain as zombie
1852

1853
  """
1854
  def _helper(pid, signal_, wait):
1855
    """Simple helper to encapsulate the kill/waitpid sequence"""
1856
    os.kill(pid, signal_)
1857
    if wait:
1858
      try:
1859
        os.waitpid(pid, os.WNOHANG)
1860
      except OSError:
1861
        pass
1862

    
1863
  if pid <= 0:
1864
    # kill with pid=0 == suicide
1865
    raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
1866

    
1867
  if not IsProcessAlive(pid):
1868
    return
1869

    
1870
  _helper(pid, signal_, waitpid)
1871

    
1872
  if timeout <= 0:
1873
    return
1874

    
1875
  def _CheckProcess():
1876
    if not IsProcessAlive(pid):
1877
      return
1878

    
1879
    try:
1880
      (result_pid, _) = os.waitpid(pid, os.WNOHANG)
1881
    except OSError:
1882
      raise RetryAgain()
1883

    
1884
    if result_pid > 0:
1885
      return
1886

    
1887
    raise RetryAgain()
1888

    
1889
  try:
1890
    # Wait up to $timeout seconds
1891
    Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
1892
  except RetryTimeout:
1893
    pass
1894

    
1895
  if IsProcessAlive(pid):
1896
    # Kill process if it's still alive
1897
    _helper(pid, signal.SIGKILL, waitpid)
1898

    
1899

    
1900
def FindFile(name, search_path, test=os.path.exists):
1901
  """Look for a filesystem object in a given path.
1902

1903
  This is an abstract method to search for filesystem object (files,
1904
  dirs) under a given search path.
1905

1906
  @type name: str
1907
  @param name: the name to look for
1908
  @type search_path: str
1909
  @param search_path: location to start at
1910
  @type test: callable
1911
  @param test: a function taking one argument that should return True
1912
      if the a given object is valid; the default value is
1913
      os.path.exists, causing only existing files to be returned
1914
  @rtype: str or None
1915
  @return: full path to the object if found, None otherwise
1916

1917
  """
1918
  # validate the filename mask
1919
  if constants.EXT_PLUGIN_MASK.match(name) is None:
1920
    logging.critical("Invalid value passed for external script name: '%s'",
1921
                     name)
1922
    return None
1923

    
1924
  for dir_name in search_path:
1925
    # FIXME: investigate switch to PathJoin
1926
    item_name = os.path.sep.join([dir_name, name])
1927
    # check the user test and that we're indeed resolving to the given
1928
    # basename
1929
    if test(item_name) and os.path.basename(item_name) == name:
1930
      return item_name
1931
  return None
1932

    
1933

    
1934
def CheckVolumeGroupSize(vglist, vgname, minsize):
1935
  """Checks if the volume group list is valid.
1936

1937
  The function will check if a given volume group is in the list of
1938
  volume groups and has a minimum size.
1939

1940
  @type vglist: dict
1941
  @param vglist: dictionary of volume group names and their size
1942
  @type vgname: str
1943
  @param vgname: the volume group we should check
1944
  @type minsize: int
1945
  @param minsize: the minimum size we accept
1946
  @rtype: None or str
1947
  @return: None for success, otherwise the error message
1948

1949
  """
1950
  vgsize = vglist.get(vgname, None)
1951
  if vgsize is None:
1952
    return "volume group '%s' missing" % vgname
1953
  elif vgsize < minsize:
1954
    return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
1955
            (vgname, minsize, vgsize))
1956
  return None
1957

    
1958

    
1959
def SplitTime(value):
1960
  """Splits time as floating point number into a tuple.
1961

1962
  @param value: Time in seconds
1963
  @type value: int or float
1964
  @return: Tuple containing (seconds, microseconds)
1965

1966
  """
1967
  (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
1968

    
1969
  assert 0 <= seconds, \
1970
    "Seconds must be larger than or equal to 0, but are %s" % seconds
1971
  assert 0 <= microseconds <= 999999, \
1972
    "Microseconds must be 0-999999, but are %s" % microseconds
1973

    
1974
  return (int(seconds), int(microseconds))
1975

    
1976

    
1977
def MergeTime(timetuple):
1978
  """Merges a tuple into time as a floating point number.
1979

1980
  @param timetuple: Time as tuple, (seconds, microseconds)
1981
  @type timetuple: tuple
1982
  @return: Time as a floating point number expressed in seconds
1983

1984
  """
1985
  (seconds, microseconds) = timetuple
1986

    
1987
  assert 0 <= seconds, \
1988
    "Seconds must be larger than or equal to 0, but are %s" % seconds
1989
  assert 0 <= microseconds <= 999999, \
1990
    "Microseconds must be 0-999999, but are %s" % microseconds
1991

    
1992
  return float(seconds) + (float(microseconds) * 0.000001)
1993

    
1994

    
1995
def GetDaemonPort(daemon_name):
1996
  """Get the daemon port for this cluster.
1997

1998
  Note that this routine does not read a ganeti-specific file, but
1999
  instead uses C{socket.getservbyname} to allow pre-customization of
2000
  this parameter outside of Ganeti.
2001

2002
  @type daemon_name: string
2003
  @param daemon_name: daemon name (in constants.DAEMONS_PORTS)
2004
  @rtype: int
2005

2006
  """
2007
  if daemon_name not in constants.DAEMONS_PORTS:
2008
    raise errors.ProgrammerError("Unknown daemon: %s" % daemon_name)
2009

    
2010
  (proto, default_port) = constants.DAEMONS_PORTS[daemon_name]
2011
  try:
2012
    port = socket.getservbyname(daemon_name, proto)
2013
  except socket.error:
2014
    port = default_port
2015

    
2016
  return port
2017

    
2018

    
2019
def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
2020
                 multithreaded=False, syslog=constants.SYSLOG_USAGE):
2021
  """Configures the logging module.
2022

2023
  @type logfile: str
2024
  @param logfile: the filename to which we should log
2025
  @type debug: integer
2026
  @param debug: if greater than zero, enable debug messages, otherwise
2027
      only those at C{INFO} and above level
2028
  @type stderr_logging: boolean
2029
  @param stderr_logging: whether we should also log to the standard error
2030
  @type program: str
2031
  @param program: the name under which we should log messages
2032
  @type multithreaded: boolean
2033
  @param multithreaded: if True, will add the thread name to the log file
2034
  @type syslog: string
2035
  @param syslog: one of 'no', 'yes', 'only':
2036
      - if no, syslog is not used
2037
      - if yes, syslog is used (in addition to file-logging)
2038
      - if only, only syslog is used
2039
  @raise EnvironmentError: if we can't open the log file and
2040
      syslog/stderr logging is disabled
2041

2042
  """
2043
  fmt = "%(asctime)s: " + program + " pid=%(process)d"
2044
  sft = program + "[%(process)d]:"
2045
  if multithreaded:
2046
    fmt += "/%(threadName)s"
2047
    sft += " (%(threadName)s)"
2048
  if debug:
2049
    fmt += " %(module)s:%(lineno)s"
2050
    # no debug info for syslog loggers
2051
  fmt += " %(levelname)s %(message)s"
2052
  # yes, we do want the textual level, as remote syslog will probably
2053
  # lose the error level, and it's easier to grep for it
2054
  sft += " %(levelname)s %(message)s"
2055
  formatter = logging.Formatter(fmt)
2056
  sys_fmt = logging.Formatter(sft)
2057

    
2058
  root_logger = logging.getLogger("")
2059
  root_logger.setLevel(logging.NOTSET)
2060

    
2061
  # Remove all previously setup handlers
2062
  for handler in root_logger.handlers:
2063
    handler.close()
2064
    root_logger.removeHandler(handler)
2065

    
2066
  if stderr_logging:
2067
    stderr_handler = logging.StreamHandler()
2068
    stderr_handler.setFormatter(formatter)
2069
    if debug:
2070
      stderr_handler.setLevel(logging.NOTSET)
2071
    else:
2072
      stderr_handler.setLevel(logging.CRITICAL)
2073
    root_logger.addHandler(stderr_handler)
2074

    
2075
  if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2076
    facility = logging.handlers.SysLogHandler.LOG_DAEMON
2077
    syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2078
                                                    facility)
2079
    syslog_handler.setFormatter(sys_fmt)
2080
    # Never enable debug over syslog
2081
    syslog_handler.setLevel(logging.INFO)
2082
    root_logger.addHandler(syslog_handler)
2083

    
2084
  if syslog != constants.SYSLOG_ONLY:
2085
    # this can fail, if the logging directories are not setup or we have
2086
    # a permisssion problem; in this case, it's best to log but ignore
2087
    # the error if stderr_logging is True, and if false we re-raise the
2088
    # exception since otherwise we could run but without any logs at all
2089
    try:
2090
      logfile_handler = logging.FileHandler(logfile)
2091
      logfile_handler.setFormatter(formatter)
2092
      if debug:
2093
        logfile_handler.setLevel(logging.DEBUG)
2094
      else:
2095
        logfile_handler.setLevel(logging.INFO)
2096
      root_logger.addHandler(logfile_handler)
2097
    except EnvironmentError:
2098
      if stderr_logging or syslog == constants.SYSLOG_YES:
2099
        logging.exception("Failed to enable logging to file '%s'", logfile)
2100
      else:
2101
        # we need to re-raise the exception
2102
        raise
2103

    
2104

    
2105
def IsNormAbsPath(path):
2106
  """Check whether a path is absolute and also normalized
2107

2108
  This avoids things like /dir/../../other/path to be valid.
2109

2110
  """
2111
  return os.path.normpath(path) == path and os.path.isabs(path)
2112

    
2113

    
2114
def PathJoin(*args):
2115
  """Safe-join a list of path components.
2116

2117
  Requirements:
2118
      - the first argument must be an absolute path
2119
      - no component in the path must have backtracking (e.g. /../),
2120
        since we check for normalization at the end
2121

2122
  @param args: the path components to be joined
2123
  @raise ValueError: for invalid paths
2124

2125
  """
2126
  # ensure we're having at least one path passed in
2127
  assert args
2128
  # ensure the first component is an absolute and normalized path name
2129
  root = args[0]
2130
  if not IsNormAbsPath(root):
2131
    raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2132
  result = os.path.join(*args)
2133
  # ensure that the whole path is normalized
2134
  if not IsNormAbsPath(result):
2135
    raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2136
  # check that we're still under the original prefix
2137
  prefix = os.path.commonprefix([root, result])
2138
  if prefix != root:
2139
    raise ValueError("Error: path joining resulted in different prefix"
2140
                     " (%s != %s)" % (prefix, root))
2141
  return result
2142

    
2143

    
2144
def TailFile(fname, lines=20):
2145
  """Return the last lines from a file.
2146

2147
  @note: this function will only read and parse the last 4KB of
2148
      the file; if the lines are very long, it could be that less
2149
      than the requested number of lines are returned
2150

2151
  @param fname: the file name
2152
  @type lines: int
2153
  @param lines: the (maximum) number of lines to return
2154

2155
  """
2156
  fd = open(fname, "r")
2157
  try:
2158
    fd.seek(0, 2)
2159
    pos = fd.tell()
2160
    pos = max(0, pos-4096)
2161
    fd.seek(pos, 0)
2162
    raw_data = fd.read()
2163
  finally:
2164
    fd.close()
2165

    
2166
  rows = raw_data.splitlines()
2167
  return rows[-lines:]
2168

    
2169

    
2170
def _ParseAsn1Generalizedtime(value):
2171
  """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2172

2173
  @type value: string
2174
  @param value: ASN1 GENERALIZEDTIME timestamp
2175

2176
  """
2177
  m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
2178
  if m:
2179
    # We have an offset
2180
    asn1time = m.group(1)
2181
    hours = int(m.group(2))
2182
    minutes = int(m.group(3))
2183
    utcoffset = (60 * hours) + minutes
2184
  else:
2185
    if not value.endswith("Z"):
2186
      raise ValueError("Missing timezone")
2187
    asn1time = value[:-1]
2188
    utcoffset = 0
2189

    
2190
  parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2191

    
2192
  tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2193

    
2194
  return calendar.timegm(tt.utctimetuple())
2195

    
2196

    
2197
def GetX509CertValidity(cert):
2198
  """Returns the validity period of the certificate.
2199

2200
  @type cert: OpenSSL.crypto.X509
2201
  @param cert: X509 certificate object
2202

2203
  """
2204
  # The get_notBefore and get_notAfter functions are only supported in
2205
  # pyOpenSSL 0.7 and above.
2206
  try:
2207
    get_notbefore_fn = cert.get_notBefore
2208
  except AttributeError:
2209
    not_before = None
2210
  else:
2211
    not_before_asn1 = get_notbefore_fn()
2212

    
2213
    if not_before_asn1 is None:
2214
      not_before = None
2215
    else:
2216
      not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2217

    
2218
  try:
2219
    get_notafter_fn = cert.get_notAfter
2220
  except AttributeError:
2221
    not_after = None
2222
  else:
2223
    not_after_asn1 = get_notafter_fn()
2224

    
2225
    if not_after_asn1 is None:
2226
      not_after = None
2227
    else:
2228
      not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2229

    
2230
  return (not_before, not_after)
2231

    
2232

    
2233
def SafeEncode(text):
2234
  """Return a 'safe' version of a source string.
2235

2236
  This function mangles the input string and returns a version that
2237
  should be safe to display/encode as ASCII. To this end, we first
2238
  convert it to ASCII using the 'backslashreplace' encoding which
2239
  should get rid of any non-ASCII chars, and then we process it
2240
  through a loop copied from the string repr sources in the python; we
2241
  don't use string_escape anymore since that escape single quotes and
2242
  backslashes too, and that is too much; and that escaping is not
2243
  stable, i.e. string_escape(string_escape(x)) != string_escape(x).
2244

2245
  @type text: str or unicode
2246
  @param text: input data
2247
  @rtype: str
2248
  @return: a safe version of text
2249

2250
  """
2251
  if isinstance(text, unicode):
2252
    # only if unicode; if str already, we handle it below
2253
    text = text.encode('ascii', 'backslashreplace')
2254
  resu = ""
2255
  for char in text:
2256
    c = ord(char)
2257
    if char  == '\t':
2258
      resu += r'\t'
2259
    elif char == '\n':
2260
      resu += r'\n'
2261
    elif char == '\r':
2262
      resu += r'\'r'
2263
    elif c < 32 or c >= 127: # non-printable
2264
      resu += "\\x%02x" % (c & 0xff)
2265
    else:
2266
      resu += char
2267
  return resu
2268

    
2269

    
2270
def UnescapeAndSplit(text, sep=","):
2271
  """Split and unescape a string based on a given separator.
2272

2273
  This function splits a string based on a separator where the
2274
  separator itself can be escape in order to be an element of the
2275
  elements. The escaping rules are (assuming coma being the
2276
  separator):
2277
    - a plain , separates the elements
2278
    - a sequence \\\\, (double backslash plus comma) is handled as a
2279
      backslash plus a separator comma
2280
    - a sequence \, (backslash plus comma) is handled as a
2281
      non-separator comma
2282

2283
  @type text: string
2284
  @param text: the string to split
2285
  @type sep: string
2286
  @param text: the separator
2287
  @rtype: string
2288
  @return: a list of strings
2289

2290
  """
2291
  # we split the list by sep (with no escaping at this stage)
2292
  slist = text.split(sep)
2293
  # next, we revisit the elements and if any of them ended with an odd
2294
  # number of backslashes, then we join it with the next
2295
  rlist = []
2296
  while slist:
2297
    e1 = slist.pop(0)
2298
    if e1.endswith("\\"):
2299
      num_b = len(e1) - len(e1.rstrip("\\"))
2300
      if num_b % 2 == 1:
2301
        e2 = slist.pop(0)
2302
        # here the backslashes remain (all), and will be reduced in
2303
        # the next step
2304
        rlist.append(e1 + sep + e2)
2305
        continue
2306
    rlist.append(e1)
2307
  # finally, replace backslash-something with something
2308
  rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
2309
  return rlist
2310

    
2311

    
2312
def CommaJoin(names):
2313
  """Nicely join a set of identifiers.
2314

2315
  @param names: set, list or tuple
2316
  @return: a string with the formatted results
2317

2318
  """
2319
  return ", ".join([str(val) for val in names])
2320

    
2321

    
2322
def BytesToMebibyte(value):
2323
  """Converts bytes to mebibytes.
2324

2325
  @type value: int
2326
  @param value: Value in bytes
2327
  @rtype: int
2328
  @return: Value in mebibytes
2329

2330
  """
2331
  return int(round(value / (1024.0 * 1024.0), 0))
2332

    
2333

    
2334
def CalculateDirectorySize(path):
2335
  """Calculates the size of a directory recursively.
2336

2337
  @type path: string
2338
  @param path: Path to directory
2339
  @rtype: int
2340
  @return: Size in mebibytes
2341

2342
  """
2343
  size = 0
2344

    
2345
  for (curpath, _, files) in os.walk(path):
2346
    for filename in files:
2347
      st = os.lstat(PathJoin(curpath, filename))
2348
      size += st.st_size
2349

    
2350
  return BytesToMebibyte(size)
2351

    
2352

    
2353
def GetFilesystemStats(path):
2354
  """Returns the total and free space on a filesystem.
2355

2356
  @type path: string
2357
  @param path: Path on filesystem to be examined
2358
  @rtype: int
2359
  @return: tuple of (Total space, Free space) in mebibytes
2360

2361
  """
2362
  st = os.statvfs(path)
2363

    
2364
  fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
2365
  tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
2366
  return (tsize, fsize)
2367

    
2368

    
2369
def RunInSeparateProcess(fn, *args):
2370
  """Runs a function in a separate process.
2371

2372
  Note: Only boolean return values are supported.
2373

2374
  @type fn: callable
2375
  @param fn: Function to be called
2376
  @rtype: bool
2377
  @return: Function's result
2378

2379
  """
2380
  pid = os.fork()
2381
  if pid == 0:
2382
    # Child process
2383
    try:
2384
      # In case the function uses temporary files
2385
      ResetTempfileModule()
2386

    
2387
      # Call function
2388
      result = int(bool(fn(*args)))
2389
      assert result in (0, 1)
2390
    except: # pylint: disable-msg=W0702
2391
      logging.exception("Error while calling function in separate process")
2392
      # 0 and 1 are reserved for the return value
2393
      result = 33
2394

    
2395
    os._exit(result) # pylint: disable-msg=W0212
2396

    
2397
  # Parent process
2398

    
2399
  # Avoid zombies and check exit code
2400
  (_, status) = os.waitpid(pid, 0)
2401

    
2402
  if os.WIFSIGNALED(status):
2403
    exitcode = None
2404
    signum = os.WTERMSIG(status)
2405
  else:
2406
    exitcode = os.WEXITSTATUS(status)
2407
    signum = None
2408

    
2409
  if not (exitcode in (0, 1) and signum is None):
2410
    raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
2411
                              (exitcode, signum))
2412

    
2413
  return bool(exitcode)
2414

    
2415

    
2416
def LockedMethod(fn):
2417
  """Synchronized object access decorator.
2418

2419
  This decorator is intended to protect access to an object using the
2420
  object's own lock which is hardcoded to '_lock'.
2421

2422
  """
2423
  def _LockDebug(*args, **kwargs):
2424
    if debug_locks:
2425
      logging.debug(*args, **kwargs)
2426

    
2427
  def wrapper(self, *args, **kwargs):
2428
    # pylint: disable-msg=W0212
2429
    assert hasattr(self, '_lock')
2430
    lock = self._lock
2431
    _LockDebug("Waiting for %s", lock)
2432
    lock.acquire()
2433
    try:
2434
      _LockDebug("Acquired %s", lock)
2435
      result = fn(self, *args, **kwargs)
2436
    finally:
2437
      _LockDebug("Releasing %s", lock)
2438
      lock.release()
2439
      _LockDebug("Released %s", lock)
2440
    return result
2441
  return wrapper
2442

    
2443

    
2444
def LockFile(fd):
2445
  """Locks a file using POSIX locks.
2446

2447
  @type fd: int
2448
  @param fd: the file descriptor we need to lock
2449

2450
  """
2451
  try:
2452
    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
2453
  except IOError, err:
2454
    if err.errno == errno.EAGAIN:
2455
      raise errors.LockError("File already locked")
2456
    raise
2457

    
2458

    
2459
def FormatTime(val):
2460
  """Formats a time value.
2461

2462
  @type val: float or None
2463
  @param val: the timestamp as returned by time.time()
2464
  @return: a string value or N/A if we don't have a valid timestamp
2465

2466
  """
2467
  if val is None or not isinstance(val, (int, float)):
2468
    return "N/A"
2469
  # these two codes works on Linux, but they are not guaranteed on all
2470
  # platforms
2471
  return time.strftime("%F %T", time.localtime(val))
2472

    
2473

    
2474
def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
2475
  """Reads the watcher pause file.
2476

2477
  @type filename: string
2478
  @param filename: Path to watcher pause file
2479
  @type now: None, float or int
2480
  @param now: Current time as Unix timestamp
2481
  @type remove_after: int
2482
  @param remove_after: Remove watcher pause file after specified amount of
2483
    seconds past the pause end time
2484

2485
  """
2486
  if now is None:
2487
    now = time.time()
2488

    
2489
  try:
2490
    value = ReadFile(filename)
2491
  except IOError, err:
2492
    if err.errno != errno.ENOENT:
2493
      raise
2494
    value = None
2495

    
2496
  if value is not None:
2497
    try:
2498
      value = int(value)
2499
    except ValueError:
2500
      logging.warning(("Watcher pause file (%s) contains invalid value,"
2501
                       " removing it"), filename)
2502
      RemoveFile(filename)
2503
      value = None
2504

    
2505
    if value is not None:
2506
      # Remove file if it's outdated
2507
      if now > (value + remove_after):
2508
        RemoveFile(filename)
2509
        value = None
2510

    
2511
      elif now > value:
2512
        value = None
2513

    
2514
  return value
2515

    
2516

    
2517
class RetryTimeout(Exception):
2518
  """Retry loop timed out.
2519

2520
  Any arguments which was passed by the retried function to RetryAgain will be
2521
  preserved in RetryTimeout, if it is raised. If such argument was an exception
2522
  the RaiseInner helper method will reraise it.
2523

2524
  """
2525
  def RaiseInner(self):
2526
    if self.args and isinstance(self.args[0], Exception):
2527
      raise self.args[0]
2528
    else:
2529
      raise RetryTimeout(*self.args)
2530

    
2531

    
2532
class RetryAgain(Exception):
2533
  """Retry again.
2534

2535
  Any arguments passed to RetryAgain will be preserved, if a timeout occurs, as
2536
  arguments to RetryTimeout. If an exception is passed, the RaiseInner() method
2537
  of the RetryTimeout() method can be used to reraise it.
2538

2539
  """
2540

    
2541

    
2542
class _RetryDelayCalculator(object):
2543
  """Calculator for increasing delays.
2544

2545
  """
2546
  __slots__ = [
2547
    "_factor",
2548
    "_limit",
2549
    "_next",
2550
    "_start",
2551
    ]
2552

    
2553
  def __init__(self, start, factor, limit):
2554
    """Initializes this class.
2555

2556
    @type start: float
2557
    @param start: Initial delay
2558
    @type factor: float
2559
    @param factor: Factor for delay increase
2560
    @type limit: float or None
2561
    @param limit: Upper limit for delay or None for no limit
2562

2563
    """
2564
    assert start > 0.0
2565
    assert factor >= 1.0
2566
    assert limit is None or limit >= 0.0
2567

    
2568
    self._start = start
2569
    self._factor = factor
2570
    self._limit = limit
2571

    
2572
    self._next = start
2573

    
2574
  def __call__(self):
2575
    """Returns current delay and calculates the next one.
2576

2577
    """
2578
    current = self._next
2579

    
2580
    # Update for next run
2581
    if self._limit is None or self._next < self._limit:
2582
      self._next = min(self._limit, self._next * self._factor)
2583

    
2584
    return current
2585

    
2586

    
2587
#: Special delay to specify whole remaining timeout
2588
RETRY_REMAINING_TIME = object()
2589

    
2590

    
2591
def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
2592
          _time_fn=time.time):
2593
  """Call a function repeatedly until it succeeds.
2594

2595
  The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
2596
  anymore. Between calls a delay, specified by C{delay}, is inserted. After a
2597
  total of C{timeout} seconds, this function throws L{RetryTimeout}.
2598

2599
  C{delay} can be one of the following:
2600
    - callable returning the delay length as a float
2601
    - Tuple of (start, factor, limit)
2602
    - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
2603
      useful when overriding L{wait_fn} to wait for an external event)
2604
    - A static delay as a number (int or float)
2605

2606
  @type fn: callable
2607
  @param fn: Function to be called
2608
  @param delay: Either a callable (returning the delay), a tuple of (start,
2609
                factor, limit) (see L{_RetryDelayCalculator}),
2610
                L{RETRY_REMAINING_TIME} or a number (int or float)
2611
  @type timeout: float
2612
  @param timeout: Total timeout
2613
  @type wait_fn: callable
2614
  @param wait_fn: Waiting function
2615
  @return: Return value of function
2616

2617
  """
2618
  assert callable(fn)
2619
  assert callable(wait_fn)
2620
  assert callable(_time_fn)
2621

    
2622
  if args is None:
2623
    args = []
2624

    
2625
  end_time = _time_fn() + timeout
2626

    
2627
  if callable(delay):
2628
    # External function to calculate delay
2629
    calc_delay = delay
2630

    
2631
  elif isinstance(delay, (tuple, list)):
2632
    # Increasing delay with optional upper boundary
2633
    (start, factor, limit) = delay
2634
    calc_delay = _RetryDelayCalculator(start, factor, limit)
2635

    
2636
  elif delay is RETRY_REMAINING_TIME:
2637
    # Always use the remaining time
2638
    calc_delay = None
2639

    
2640
  else:
2641
    # Static delay
2642
    calc_delay = lambda: delay
2643

    
2644
  assert calc_delay is None or callable(calc_delay)
2645

    
2646
  while True:
2647
    retry_args = []
2648
    try:
2649
      # pylint: disable-msg=W0142
2650
      return fn(*args)
2651
    except RetryAgain, err:
2652
      retry_args = err.args
2653
    except RetryTimeout:
2654
      raise errors.ProgrammerError("Nested retry loop detected that didn't"
2655
                                   " handle RetryTimeout")
2656

    
2657
    remaining_time = end_time - _time_fn()
2658

    
2659
    if remaining_time < 0.0:
2660
      # pylint: disable-msg=W0142
2661
      raise RetryTimeout(*retry_args)
2662

    
2663
    assert remaining_time >= 0.0
2664

    
2665
    if calc_delay is None:
2666
      wait_fn(remaining_time)
2667
    else:
2668
      current_delay = calc_delay()
2669
      if current_delay > 0.0:
2670
        wait_fn(current_delay)
2671

    
2672

    
2673
class FileLock(object):
2674
  """Utility class for file locks.
2675

2676
  """
2677
  def __init__(self, fd, filename):
2678
    """Constructor for FileLock.
2679

2680
    @type fd: file
2681
    @param fd: File object
2682
    @type filename: str
2683
    @param filename: Path of the file opened at I{fd}
2684

2685
    """
2686
    self.fd = fd
2687
    self.filename = filename
2688

    
2689
  @classmethod
2690
  def Open(cls, filename):
2691
    """Creates and opens a file to be used as a file-based lock.
2692

2693
    @type filename: string
2694
    @param filename: path to the file to be locked
2695

2696
    """
2697
    # Using "os.open" is necessary to allow both opening existing file
2698
    # read/write and creating if not existing. Vanilla "open" will truncate an
2699
    # existing file -or- allow creating if not existing.
2700
    return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
2701
               filename)
2702

    
2703
  def __del__(self):
2704
    self.Close()
2705

    
2706
  def Close(self):
2707
    """Close the file and release the lock.
2708

2709
    """
2710
    if hasattr(self, "fd") and self.fd:
2711
      self.fd.close()
2712
      self.fd = None
2713

    
2714
  def _flock(self, flag, blocking, timeout, errmsg):
2715
    """Wrapper for fcntl.flock.
2716

2717
    @type flag: int
2718
    @param flag: operation flag
2719
    @type blocking: bool
2720
    @param blocking: whether the operation should be done in blocking mode.
2721
    @type timeout: None or float
2722
    @param timeout: for how long the operation should be retried (implies
2723
                    non-blocking mode).
2724
    @type errmsg: string
2725
    @param errmsg: error message in case operation fails.
2726

2727
    """
2728
    assert self.fd, "Lock was closed"
2729
    assert timeout is None or timeout >= 0, \
2730
      "If specified, timeout must be positive"
2731
    assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
2732

    
2733
    # When a timeout is used, LOCK_NB must always be set
2734
    if not (timeout is None and blocking):
2735
      flag |= fcntl.LOCK_NB
2736

    
2737
    if timeout is None:
2738
      self._Lock(self.fd, flag, timeout)
2739
    else:
2740
      try:
2741
        Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
2742
              args=(self.fd, flag, timeout))
2743
      except RetryTimeout:
2744
        raise errors.LockError(errmsg)
2745

    
2746
  @staticmethod
2747
  def _Lock(fd, flag, timeout):
2748
    try:
2749
      fcntl.flock(fd, flag)
2750
    except IOError, err:
2751
      if timeout is not None and err.errno == errno.EAGAIN:
2752
        raise RetryAgain()
2753

    
2754
      logging.exception("fcntl.flock failed")
2755
      raise
2756

    
2757
  def Exclusive(self, blocking=False, timeout=None):
2758
    """Locks the file in exclusive mode.
2759

2760
    @type blocking: boolean
2761
    @param blocking: whether to block and wait until we
2762
        can lock the file or return immediately
2763
    @type timeout: int or None
2764
    @param timeout: if not None, the duration to wait for the lock
2765
        (in blocking mode)
2766

2767
    """
2768
    self._flock(fcntl.LOCK_EX, blocking, timeout,
2769
                "Failed to lock %s in exclusive mode" % self.filename)
2770

    
2771
  def Shared(self, blocking=False, timeout=None):
2772
    """Locks the file in shared mode.
2773

2774
    @type blocking: boolean
2775
    @param blocking: whether to block and wait until we
2776
        can lock the file or return immediately
2777
    @type timeout: int or None
2778
    @param timeout: if not None, the duration to wait for the lock
2779
        (in blocking mode)
2780

2781
    """
2782
    self._flock(fcntl.LOCK_SH, blocking, timeout,
2783
                "Failed to lock %s in shared mode" % self.filename)
2784

    
2785
  def Unlock(self, blocking=True, timeout=None):
2786
    """Unlocks the file.
2787

2788
    According to C{flock(2)}, unlocking can also be a nonblocking
2789
    operation::
2790

2791
      To make a non-blocking request, include LOCK_NB with any of the above
2792
      operations.
2793

2794
    @type blocking: boolean
2795
    @param blocking: whether to block and wait until we
2796
        can lock the file or return immediately
2797
    @type timeout: int or None
2798
    @param timeout: if not None, the duration to wait for the lock
2799
        (in blocking mode)
2800

2801
    """
2802
    self._flock(fcntl.LOCK_UN, blocking, timeout,
2803
                "Failed to unlock %s" % self.filename)
2804

    
2805

    
2806
class LineSplitter:
2807
  """Splits data chunks into lines separated by newline.
2808

2809
  Instances provide a file-like interface.
2810

2811
  """
2812
  def __init__(self, line_fn, *args):
2813
    """Initializes this class.
2814

2815
    @type line_fn: callable
2816
    @param line_fn: Function called for each line, first parameter is line
2817
    @param args: Extra arguments for L{line_fn}
2818

2819
    """
2820
    assert callable(line_fn)
2821

    
2822
    if args:
2823
      # Python 2.4 doesn't have functools.partial yet
2824
      self._line_fn = \
2825
        lambda line: line_fn(line, *args) # pylint: disable-msg=W0142
2826
    else:
2827
      self._line_fn = line_fn
2828

    
2829
    self._lines = collections.deque()
2830
    self._buffer = ""
2831

    
2832
  def write(self, data):
2833
    parts = (self._buffer + data).split("\n")
2834
    self._buffer = parts.pop()
2835
    self._lines.extend(parts)
2836

    
2837
  def flush(self):
2838
    while self._lines:
2839
      self._line_fn(self._lines.popleft().rstrip("\r\n"))
2840

    
2841
  def close(self):
2842
    self.flush()
2843
    if self._buffer:
2844
      self._line_fn(self._buffer)
2845

    
2846

    
2847
def SignalHandled(signums):
2848
  """Signal Handled decoration.
2849

2850
  This special decorator installs a signal handler and then calls the target
2851
  function. The function must accept a 'signal_handlers' keyword argument,
2852
  which will contain a dict indexed by signal number, with SignalHandler
2853
  objects as values.
2854

2855
  The decorator can be safely stacked with iself, to handle multiple signals
2856
  with different handlers.
2857

2858
  @type signums: list
2859
  @param signums: signals to intercept
2860

2861
  """
2862
  def wrap(fn):
2863
    def sig_function(*args, **kwargs):
2864
      assert 'signal_handlers' not in kwargs or \
2865
             kwargs['signal_handlers'] is None or \
2866
             isinstance(kwargs['signal_handlers'], dict), \
2867
             "Wrong signal_handlers parameter in original function call"
2868
      if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
2869
        signal_handlers = kwargs['signal_handlers']
2870
      else:
2871
        signal_handlers = {}
2872
        kwargs['signal_handlers'] = signal_handlers
2873
      sighandler = SignalHandler(signums)
2874
      try:
2875
        for sig in signums:
2876
          signal_handlers[sig] = sighandler
2877
        return fn(*args, **kwargs)
2878
      finally:
2879
        sighandler.Reset()
2880
    return sig_function
2881
  return wrap
2882

    
2883

    
2884
class SignalHandler(object):
2885
  """Generic signal handler class.
2886

2887
  It automatically restores the original handler when deconstructed or
2888
  when L{Reset} is called. You can either pass your own handler
2889
  function in or query the L{called} attribute to detect whether the
2890
  signal was sent.
2891

2892
  @type signum: list
2893
  @ivar signum: the signals we handle
2894
  @type called: boolean
2895
  @ivar called: tracks whether any of the signals have been raised
2896

2897
  """
2898
  def __init__(self, signum):
2899
    """Constructs a new SignalHandler instance.
2900

2901
    @type signum: int or list of ints
2902
    @param signum: Single signal number or set of signal numbers
2903

2904
    """
2905
    self.signum = set(signum)
2906
    self.called = False
2907

    
2908
    self._previous = {}
2909
    try:
2910
      for signum in self.signum:
2911
        # Setup handler
2912
        prev_handler = signal.signal(signum, self._HandleSignal)
2913
        try:
2914
          self._previous[signum] = prev_handler
2915
        except:
2916
          # Restore previous handler
2917
          signal.signal(signum, prev_handler)
2918
          raise
2919
    except:
2920
      # Reset all handlers
2921
      self.Reset()
2922
      # Here we have a race condition: a handler may have already been called,
2923
      # but there's not much we can do about it at this point.
2924
      raise
2925

    
2926
  def __del__(self):
2927
    self.Reset()
2928

    
2929
  def Reset(self):
2930
    """Restore previous handler.
2931

2932
    This will reset all the signals to their previous handlers.
2933

2934
    """
2935
    for signum, prev_handler in self._previous.items():
2936
      signal.signal(signum, prev_handler)
2937
      # If successful, remove from dict
2938
      del self._previous[signum]
2939

    
2940
  def Clear(self):
2941
    """Unsets the L{called} flag.
2942

2943
    This function can be used in case a signal may arrive several times.
2944

2945
    """
2946
    self.called = False
2947

    
2948
  # we don't care about arguments, but we leave them named for the future
2949
  def _HandleSignal(self, signum, frame): # pylint: disable-msg=W0613
2950
    """Actual signal handling function.
2951

2952
    """
2953
    # This is not nice and not absolutely atomic, but it appears to be the only
2954
    # solution in Python -- there are no atomic types.
2955
    self.called = True
2956

    
2957

    
2958
class FieldSet(object):
2959
  """A simple field set.
2960

2961
  Among the features are:
2962
    - checking if a string is among a list of static string or regex objects
2963
    - checking if a whole list of string matches
2964
    - returning the matching groups from a regex match
2965

2966
  Internally, all fields are held as regular expression objects.
2967

2968
  """
2969
  def __init__(self, *items):
2970
    self.items = [re.compile("^%s$" % value) for value in items]
2971

    
2972
  def Extend(self, other_set):
2973
    """Extend the field set with the items from another one"""
2974
    self.items.extend(other_set.items)
2975

    
2976
  def Matches(self, field):
2977
    """Checks if a field matches the current set
2978

2979
    @type field: str
2980
    @param field: the string to match
2981
    @return: either None or a regular expression match object
2982

2983
    """
2984
    for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
2985
      return m
2986
    return None
2987

    
2988
  def NonMatching(self, items):
2989
    """Returns the list of fields not matching the current set
2990

2991
    @type items: list
2992
    @param items: the list of fields to check
2993
    @rtype: list
2994
    @return: list of non-matching fields
2995

2996
    """
2997
    return [val for val in items if not self.Matches(val)]