Statistics
| Branch: | Tag: | Revision:

root / lib / utils.py @ 5ef5ea45

History | View | Annotate | Download (80.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti utility module.
23

24
This module holds functions that can be used in both daemons (all) and
25
the command line scripts.
26

27
"""
28

    
29

    
30
import os
31
import time
32
import subprocess
33
import re
34
import socket
35
import tempfile
36
import shutil
37
import errno
38
import pwd
39
import itertools
40
import select
41
import fcntl
42
import resource
43
import logging
44
import logging.handlers
45
import signal
46
import datetime
47
import calendar
48
import collections
49
import struct
50
import IN
51

    
52
from cStringIO import StringIO
53

    
54
try:
55
  from hashlib import sha1
56
except ImportError:
57
  import sha
58
  sha1 = sha.new
59

    
60
from ganeti import errors
61
from ganeti import constants
62

    
63

    
64
_locksheld = []
65
_re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
66

    
67
debug_locks = False
68

    
69
#: when set to True, L{RunCmd} is disabled
70
no_fork = False
71

    
72
_RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
73

    
74
# Structure definition for getsockopt(SOL_SOCKET, SO_PEERCRED, ...):
75
# struct ucred { pid_t pid; uid_t uid; gid_t gid; };
76
#
77
# The GNU C Library defines gid_t and uid_t to be "unsigned int" and
78
# pid_t to "int".
79
#
80
# IEEE Std 1003.1-2008:
81
# "nlink_t, uid_t, gid_t, and id_t shall be integer types"
82
# "blksize_t, pid_t, and ssize_t shall be signed integer types"
83
_STRUCT_UCRED = "iII"
84
_STRUCT_UCRED_SIZE = struct.calcsize(_STRUCT_UCRED)
85

    
86

    
87
class RunResult(object):
88
  """Holds the result of running external programs.
89

90
  @type exit_code: int
91
  @ivar exit_code: the exit code of the program, or None (if the program
92
      didn't exit())
93
  @type signal: int or None
94
  @ivar signal: the signal that caused the program to finish, or None
95
      (if the program wasn't terminated by a signal)
96
  @type stdout: str
97
  @ivar stdout: the standard output of the program
98
  @type stderr: str
99
  @ivar stderr: the standard error of the program
100
  @type failed: boolean
101
  @ivar failed: True in case the program was
102
      terminated by a signal or exited with a non-zero exit code
103
  @ivar fail_reason: a string detailing the termination reason
104

105
  """
106
  __slots__ = ["exit_code", "signal", "stdout", "stderr",
107
               "failed", "fail_reason", "cmd"]
108

    
109

    
110
  def __init__(self, exit_code, signal_, stdout, stderr, cmd):
111
    self.cmd = cmd
112
    self.exit_code = exit_code
113
    self.signal = signal_
114
    self.stdout = stdout
115
    self.stderr = stderr
116
    self.failed = (signal_ is not None or exit_code != 0)
117

    
118
    if self.signal is not None:
119
      self.fail_reason = "terminated by signal %s" % self.signal
120
    elif self.exit_code is not None:
121
      self.fail_reason = "exited with exit code %s" % self.exit_code
122
    else:
123
      self.fail_reason = "unable to determine termination reason"
124

    
125
    if self.failed:
126
      logging.debug("Command '%s' failed (%s); output: %s",
127
                    self.cmd, self.fail_reason, self.output)
128

    
129
  def _GetOutput(self):
130
    """Returns the combined stdout and stderr for easier usage.
131

132
    """
133
    return self.stdout + self.stderr
134

    
135
  output = property(_GetOutput, None, None, "Return full output")
136

    
137

    
138
def RunCmd(cmd, env=None, output=None, cwd='/', reset_env=False):
139
  """Execute a (shell) command.
140

141
  The command should not read from its standard input, as it will be
142
  closed.
143

144
  @type cmd: string or list
145
  @param cmd: Command to run
146
  @type env: dict
147
  @param env: Additional environment
148
  @type output: str
149
  @param output: if desired, the output of the command can be
150
      saved in a file instead of the RunResult instance; this
151
      parameter denotes the file name (if not None)
152
  @type cwd: string
153
  @param cwd: if specified, will be used as the working
154
      directory for the command; the default will be /
155
  @type reset_env: boolean
156
  @param reset_env: whether to reset or keep the default os environment
157
  @rtype: L{RunResult}
158
  @return: RunResult instance
159
  @raise errors.ProgrammerError: if we call this when forks are disabled
160

161
  """
162
  if no_fork:
163
    raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
164

    
165
  if isinstance(cmd, list):
166
    cmd = [str(val) for val in cmd]
167
    strcmd = " ".join(cmd)
168
    shell = False
169
  else:
170
    strcmd = cmd
171
    shell = True
172
  logging.debug("RunCmd '%s'", strcmd)
173

    
174
  if not reset_env:
175
    cmd_env = os.environ.copy()
176
    cmd_env["LC_ALL"] = "C"
177
  else:
178
    cmd_env = {}
179

    
180
  if env is not None:
181
    cmd_env.update(env)
182

    
183
  try:
184
    if output is None:
185
      out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd)
186
    else:
187
      status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
188
      out = err = ""
189
  except OSError, err:
190
    if err.errno == errno.ENOENT:
191
      raise errors.OpExecError("Can't execute '%s': not found (%s)" %
192
                               (strcmd, err))
193
    else:
194
      raise
195

    
196
  if status >= 0:
197
    exitcode = status
198
    signal_ = None
199
  else:
200
    exitcode = None
201
    signal_ = -status
202

    
203
  return RunResult(exitcode, signal_, out, err, strcmd)
204

    
205

    
206
def _RunCmdPipe(cmd, env, via_shell, cwd):
207
  """Run a command and return its output.
208

209
  @type  cmd: string or list
210
  @param cmd: Command to run
211
  @type env: dict
212
  @param env: The environment to use
213
  @type via_shell: bool
214
  @param via_shell: if we should run via the shell
215
  @type cwd: string
216
  @param cwd: the working directory for the program
217
  @rtype: tuple
218
  @return: (out, err, status)
219

220
  """
221
  poller = select.poll()
222
  child = subprocess.Popen(cmd, shell=via_shell,
223
                           stderr=subprocess.PIPE,
224
                           stdout=subprocess.PIPE,
225
                           stdin=subprocess.PIPE,
226
                           close_fds=True, env=env,
227
                           cwd=cwd)
228

    
229
  child.stdin.close()
230
  poller.register(child.stdout, select.POLLIN)
231
  poller.register(child.stderr, select.POLLIN)
232
  out = StringIO()
233
  err = StringIO()
234
  fdmap = {
235
    child.stdout.fileno(): (out, child.stdout),
236
    child.stderr.fileno(): (err, child.stderr),
237
    }
238
  for fd in fdmap:
239
    status = fcntl.fcntl(fd, fcntl.F_GETFL)
240
    fcntl.fcntl(fd, fcntl.F_SETFL, status | os.O_NONBLOCK)
241

    
242
  while fdmap:
243
    try:
244
      pollresult = poller.poll()
245
    except EnvironmentError, eerr:
246
      if eerr.errno == errno.EINTR:
247
        continue
248
      raise
249
    except select.error, serr:
250
      if serr[0] == errno.EINTR:
251
        continue
252
      raise
253

    
254
    for fd, event in pollresult:
255
      if event & select.POLLIN or event & select.POLLPRI:
256
        data = fdmap[fd][1].read()
257
        # no data from read signifies EOF (the same as POLLHUP)
258
        if not data:
259
          poller.unregister(fd)
260
          del fdmap[fd]
261
          continue
262
        fdmap[fd][0].write(data)
263
      if (event & select.POLLNVAL or event & select.POLLHUP or
264
          event & select.POLLERR):
265
        poller.unregister(fd)
266
        del fdmap[fd]
267

    
268
  out = out.getvalue()
269
  err = err.getvalue()
270

    
271
  status = child.wait()
272
  return out, err, status
273

    
274

    
275
def _RunCmdFile(cmd, env, via_shell, output, cwd):
276
  """Run a command and save its output to a file.
277

278
  @type  cmd: string or list
279
  @param cmd: Command to run
280
  @type env: dict
281
  @param env: The environment to use
282
  @type via_shell: bool
283
  @param via_shell: if we should run via the shell
284
  @type output: str
285
  @param output: the filename in which to save the output
286
  @type cwd: string
287
  @param cwd: the working directory for the program
288
  @rtype: int
289
  @return: the exit status
290

291
  """
292
  fh = open(output, "a")
293
  try:
294
    child = subprocess.Popen(cmd, shell=via_shell,
295
                             stderr=subprocess.STDOUT,
296
                             stdout=fh,
297
                             stdin=subprocess.PIPE,
298
                             close_fds=True, env=env,
299
                             cwd=cwd)
300

    
301
    child.stdin.close()
302
    status = child.wait()
303
  finally:
304
    fh.close()
305
  return status
306

    
307

    
308
def RunParts(dir_name, env=None, reset_env=False):
309
  """Run Scripts or programs in a directory
310

311
  @type dir_name: string
312
  @param dir_name: absolute path to a directory
313
  @type env: dict
314
  @param env: The environment to use
315
  @type reset_env: boolean
316
  @param reset_env: whether to reset or keep the default os environment
317
  @rtype: list of tuples
318
  @return: list of (name, (one of RUNDIR_STATUS), RunResult)
319

320
  """
321
  rr = []
322

    
323
  try:
324
    dir_contents = ListVisibleFiles(dir_name)
325
  except OSError, err:
326
    logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
327
    return rr
328

    
329
  for relname in sorted(dir_contents):
330
    fname = PathJoin(dir_name, relname)
331
    if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
332
            constants.EXT_PLUGIN_MASK.match(relname) is not None):
333
      rr.append((relname, constants.RUNPARTS_SKIP, None))
334
    else:
335
      try:
336
        result = RunCmd([fname], env=env, reset_env=reset_env)
337
      except Exception, err: # pylint: disable-msg=W0703
338
        rr.append((relname, constants.RUNPARTS_ERR, str(err)))
339
      else:
340
        rr.append((relname, constants.RUNPARTS_RUN, result))
341

    
342
  return rr
343

    
344

    
345
def GetSocketCredentials(sock):
346
  """Returns the credentials of the foreign process connected to a socket.
347

348
  @param sock: Unix socket
349
  @rtype: tuple; (number, number, number)
350
  @return: The PID, UID and GID of the connected foreign process.
351

352
  """
353
  peercred = sock.getsockopt(socket.SOL_SOCKET, IN.SO_PEERCRED,
354
                             _STRUCT_UCRED_SIZE)
355
  return struct.unpack(_STRUCT_UCRED, peercred)
356

    
357

    
358
def RemoveFile(filename):
359
  """Remove a file ignoring some errors.
360

361
  Remove a file, ignoring non-existing ones or directories. Other
362
  errors are passed.
363

364
  @type filename: str
365
  @param filename: the file to be removed
366

367
  """
368
  try:
369
    os.unlink(filename)
370
  except OSError, err:
371
    if err.errno not in (errno.ENOENT, errno.EISDIR):
372
      raise
373

    
374

    
375
def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
376
  """Renames a file.
377

378
  @type old: string
379
  @param old: Original path
380
  @type new: string
381
  @param new: New path
382
  @type mkdir: bool
383
  @param mkdir: Whether to create target directory if it doesn't exist
384
  @type mkdir_mode: int
385
  @param mkdir_mode: Mode for newly created directories
386

387
  """
388
  try:
389
    return os.rename(old, new)
390
  except OSError, err:
391
    # In at least one use case of this function, the job queue, directory
392
    # creation is very rare. Checking for the directory before renaming is not
393
    # as efficient.
394
    if mkdir and err.errno == errno.ENOENT:
395
      # Create directory and try again
396
      Makedirs(os.path.dirname(new), mode=mkdir_mode)
397

    
398
      return os.rename(old, new)
399

    
400
    raise
401

    
402

    
403
def Makedirs(path, mode=0750):
404
  """Super-mkdir; create a leaf directory and all intermediate ones.
405

406
  This is a wrapper around C{os.makedirs} adding error handling not implemented
407
  before Python 2.5.
408

409
  """
410
  try:
411
    os.makedirs(path, mode)
412
  except OSError, err:
413
    # Ignore EEXIST. This is only handled in os.makedirs as included in
414
    # Python 2.5 and above.
415
    if err.errno != errno.EEXIST or not os.path.exists(path):
416
      raise
417

    
418

    
419
def ResetTempfileModule():
420
  """Resets the random name generator of the tempfile module.
421

422
  This function should be called after C{os.fork} in the child process to
423
  ensure it creates a newly seeded random generator. Otherwise it would
424
  generate the same random parts as the parent process. If several processes
425
  race for the creation of a temporary file, this could lead to one not getting
426
  a temporary name.
427

428
  """
429
  # pylint: disable-msg=W0212
430
  if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
431
    tempfile._once_lock.acquire()
432
    try:
433
      # Reset random name generator
434
      tempfile._name_sequence = None
435
    finally:
436
      tempfile._once_lock.release()
437
  else:
438
    logging.critical("The tempfile module misses at least one of the"
439
                     " '_once_lock' and '_name_sequence' attributes")
440

    
441

    
442
def _FingerprintFile(filename):
443
  """Compute the fingerprint of a file.
444

445
  If the file does not exist, a None will be returned
446
  instead.
447

448
  @type filename: str
449
  @param filename: the filename to checksum
450
  @rtype: str
451
  @return: the hex digest of the sha checksum of the contents
452
      of the file
453

454
  """
455
  if not (os.path.exists(filename) and os.path.isfile(filename)):
456
    return None
457

    
458
  f = open(filename)
459

    
460
  fp = sha1()
461
  while True:
462
    data = f.read(4096)
463
    if not data:
464
      break
465

    
466
    fp.update(data)
467

    
468
  return fp.hexdigest()
469

    
470

    
471
def FingerprintFiles(files):
472
  """Compute fingerprints for a list of files.
473

474
  @type files: list
475
  @param files: the list of filename to fingerprint
476
  @rtype: dict
477
  @return: a dictionary filename: fingerprint, holding only
478
      existing files
479

480
  """
481
  ret = {}
482

    
483
  for filename in files:
484
    cksum = _FingerprintFile(filename)
485
    if cksum:
486
      ret[filename] = cksum
487

    
488
  return ret
489

    
490

    
491
def ForceDictType(target, key_types, allowed_values=None):
492
  """Force the values of a dict to have certain types.
493

494
  @type target: dict
495
  @param target: the dict to update
496
  @type key_types: dict
497
  @param key_types: dict mapping target dict keys to types
498
                    in constants.ENFORCEABLE_TYPES
499
  @type allowed_values: list
500
  @keyword allowed_values: list of specially allowed values
501

502
  """
503
  if allowed_values is None:
504
    allowed_values = []
505

    
506
  if not isinstance(target, dict):
507
    msg = "Expected dictionary, got '%s'" % target
508
    raise errors.TypeEnforcementError(msg)
509

    
510
  for key in target:
511
    if key not in key_types:
512
      msg = "Unknown key '%s'" % key
513
      raise errors.TypeEnforcementError(msg)
514

    
515
    if target[key] in allowed_values:
516
      continue
517

    
518
    ktype = key_types[key]
519
    if ktype not in constants.ENFORCEABLE_TYPES:
520
      msg = "'%s' has non-enforceable type %s" % (key, ktype)
521
      raise errors.ProgrammerError(msg)
522

    
523
    if ktype == constants.VTYPE_STRING:
524
      if not isinstance(target[key], basestring):
525
        if isinstance(target[key], bool) and not target[key]:
526
          target[key] = ''
527
        else:
528
          msg = "'%s' (value %s) is not a valid string" % (key, target[key])
529
          raise errors.TypeEnforcementError(msg)
530
    elif ktype == constants.VTYPE_BOOL:
531
      if isinstance(target[key], basestring) and target[key]:
532
        if target[key].lower() == constants.VALUE_FALSE:
533
          target[key] = False
534
        elif target[key].lower() == constants.VALUE_TRUE:
535
          target[key] = True
536
        else:
537
          msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
538
          raise errors.TypeEnforcementError(msg)
539
      elif target[key]:
540
        target[key] = True
541
      else:
542
        target[key] = False
543
    elif ktype == constants.VTYPE_SIZE:
544
      try:
545
        target[key] = ParseUnit(target[key])
546
      except errors.UnitParseError, err:
547
        msg = "'%s' (value %s) is not a valid size. error: %s" % \
548
              (key, target[key], err)
549
        raise errors.TypeEnforcementError(msg)
550
    elif ktype == constants.VTYPE_INT:
551
      try:
552
        target[key] = int(target[key])
553
      except (ValueError, TypeError):
554
        msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
555
        raise errors.TypeEnforcementError(msg)
556

    
557

    
558
def IsProcessAlive(pid):
559
  """Check if a given pid exists on the system.
560

561
  @note: zombie status is not handled, so zombie processes
562
      will be returned as alive
563
  @type pid: int
564
  @param pid: the process ID to check
565
  @rtype: boolean
566
  @return: True if the process exists
567

568
  """
569
  def _TryStat(name):
570
    try:
571
      os.stat(name)
572
      return True
573
    except EnvironmentError, err:
574
      if err.errno in (errno.ENOENT, errno.ENOTDIR):
575
        return False
576
      elif err.errno == errno.EINVAL:
577
        raise RetryAgain(err)
578
      raise
579

    
580
  assert isinstance(pid, int), "pid must be an integer"
581
  if pid <= 0:
582
    return False
583

    
584
  proc_entry = "/proc/%d/status" % pid
585
  # /proc in a multiprocessor environment can have strange behaviors.
586
  # Retry the os.stat a few times until we get a good result.
587
  try:
588
    return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5, args=[proc_entry])
589
  except RetryTimeout, err:
590
    err.RaiseInner()
591

    
592

    
593
def ReadPidFile(pidfile):
594
  """Read a pid from a file.
595

596
  @type  pidfile: string
597
  @param pidfile: path to the file containing the pid
598
  @rtype: int
599
  @return: The process id, if the file exists and contains a valid PID,
600
           otherwise 0
601

602
  """
603
  try:
604
    raw_data = ReadFile(pidfile)
605
  except EnvironmentError, err:
606
    if err.errno != errno.ENOENT:
607
      logging.exception("Can't read pid file")
608
    return 0
609

    
610
  try:
611
    pid = int(raw_data)
612
  except (TypeError, ValueError), err:
613
    logging.info("Can't parse pid file contents", exc_info=True)
614
    return 0
615

    
616
  return pid
617

    
618

    
619
def MatchNameComponent(key, name_list, case_sensitive=True):
620
  """Try to match a name against a list.
621

622
  This function will try to match a name like test1 against a list
623
  like C{['test1.example.com', 'test2.example.com', ...]}. Against
624
  this list, I{'test1'} as well as I{'test1.example'} will match, but
625
  not I{'test1.ex'}. A multiple match will be considered as no match
626
  at all (e.g. I{'test1'} against C{['test1.example.com',
627
  'test1.example.org']}), except when the key fully matches an entry
628
  (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
629

630
  @type key: str
631
  @param key: the name to be searched
632
  @type name_list: list
633
  @param name_list: the list of strings against which to search the key
634
  @type case_sensitive: boolean
635
  @param case_sensitive: whether to provide a case-sensitive match
636

637
  @rtype: None or str
638
  @return: None if there is no match I{or} if there are multiple matches,
639
      otherwise the element from the list which matches
640

641
  """
642
  if key in name_list:
643
    return key
644

    
645
  re_flags = 0
646
  if not case_sensitive:
647
    re_flags |= re.IGNORECASE
648
    key = key.upper()
649
  mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
650
  names_filtered = []
651
  string_matches = []
652
  for name in name_list:
653
    if mo.match(name) is not None:
654
      names_filtered.append(name)
655
      if not case_sensitive and key == name.upper():
656
        string_matches.append(name)
657

    
658
  if len(string_matches) == 1:
659
    return string_matches[0]
660
  if len(names_filtered) == 1:
661
    return names_filtered[0]
662
  return None
663

    
664

    
665
class HostInfo:
666
  """Class implementing resolver and hostname functionality
667

668
  """
669
  _VALID_NAME_RE = re.compile("^[a-z0-9._-]{1,255}$")
670

    
671
  def __init__(self, name=None):
672
    """Initialize the host name object.
673

674
    If the name argument is not passed, it will use this system's
675
    name.
676

677
    """
678
    if name is None:
679
      name = self.SysName()
680

    
681
    self.query = name
682
    self.name, self.aliases, self.ipaddrs = self.LookupHostname(name)
683
    self.ip = self.ipaddrs[0]
684

    
685
  def ShortName(self):
686
    """Returns the hostname without domain.
687

688
    """
689
    return self.name.split('.')[0]
690

    
691
  @staticmethod
692
  def SysName():
693
    """Return the current system's name.
694

695
    This is simply a wrapper over C{socket.gethostname()}.
696

697
    """
698
    return socket.gethostname()
699

    
700
  @staticmethod
701
  def LookupHostname(hostname):
702
    """Look up hostname
703

704
    @type hostname: str
705
    @param hostname: hostname to look up
706

707
    @rtype: tuple
708
    @return: a tuple (name, aliases, ipaddrs) as returned by
709
        C{socket.gethostbyname_ex}
710
    @raise errors.ResolverError: in case of errors in resolving
711

712
    """
713
    try:
714
      result = socket.gethostbyname_ex(hostname)
715
    except socket.gaierror, err:
716
      # hostname not found in DNS
717
      raise errors.ResolverError(hostname, err.args[0], err.args[1])
718

    
719
    return result
720

    
721
  @classmethod
722
  def NormalizeName(cls, hostname):
723
    """Validate and normalize the given hostname.
724

725
    @attention: the validation is a bit more relaxed than the standards
726
        require; most importantly, we allow underscores in names
727
    @raise errors.OpPrereqError: when the name is not valid
728

729
    """
730
    hostname = hostname.lower()
731
    if (not cls._VALID_NAME_RE.match(hostname) or
732
        # double-dots, meaning empty label
733
        ".." in hostname or
734
        # empty initial label
735
        hostname.startswith(".")):
736
      raise errors.OpPrereqError("Invalid hostname '%s'" % hostname,
737
                                 errors.ECODE_INVAL)
738
    if hostname.endswith("."):
739
      hostname = hostname.rstrip(".")
740
    return hostname
741

    
742

    
743
def GetHostInfo(name=None):
744
  """Lookup host name and raise an OpPrereqError for failures"""
745

    
746
  try:
747
    return HostInfo(name)
748
  except errors.ResolverError, err:
749
    raise errors.OpPrereqError("The given name (%s) does not resolve: %s" %
750
                               (err[0], err[2]), errors.ECODE_RESOLVER)
751

    
752

    
753
def ListVolumeGroups():
754
  """List volume groups and their size
755

756
  @rtype: dict
757
  @return:
758
       Dictionary with keys volume name and values
759
       the size of the volume
760

761
  """
762
  command = "vgs --noheadings --units m --nosuffix -o name,size"
763
  result = RunCmd(command)
764
  retval = {}
765
  if result.failed:
766
    return retval
767

    
768
  for line in result.stdout.splitlines():
769
    try:
770
      name, size = line.split()
771
      size = int(float(size))
772
    except (IndexError, ValueError), err:
773
      logging.error("Invalid output from vgs (%s): %s", err, line)
774
      continue
775

    
776
    retval[name] = size
777

    
778
  return retval
779

    
780

    
781
def BridgeExists(bridge):
782
  """Check whether the given bridge exists in the system
783

784
  @type bridge: str
785
  @param bridge: the bridge name to check
786
  @rtype: boolean
787
  @return: True if it does
788

789
  """
790
  return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
791

    
792

    
793
def NiceSort(name_list):
794
  """Sort a list of strings based on digit and non-digit groupings.
795

796
  Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
797
  will sort the list in the logical order C{['a1', 'a2', 'a10',
798
  'a11']}.
799

800
  The sort algorithm breaks each name in groups of either only-digits
801
  or no-digits. Only the first eight such groups are considered, and
802
  after that we just use what's left of the string.
803

804
  @type name_list: list
805
  @param name_list: the names to be sorted
806
  @rtype: list
807
  @return: a copy of the name list sorted with our algorithm
808

809
  """
810
  _SORTER_BASE = "(\D+|\d+)"
811
  _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
812
                                                  _SORTER_BASE, _SORTER_BASE,
813
                                                  _SORTER_BASE, _SORTER_BASE,
814
                                                  _SORTER_BASE, _SORTER_BASE)
815
  _SORTER_RE = re.compile(_SORTER_FULL)
816
  _SORTER_NODIGIT = re.compile("^\D*$")
817
  def _TryInt(val):
818
    """Attempts to convert a variable to integer."""
819
    if val is None or _SORTER_NODIGIT.match(val):
820
      return val
821
    rval = int(val)
822
    return rval
823

    
824
  to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
825
             for name in name_list]
826
  to_sort.sort()
827
  return [tup[1] for tup in to_sort]
828

    
829

    
830
def TryConvert(fn, val):
831
  """Try to convert a value ignoring errors.
832

833
  This function tries to apply function I{fn} to I{val}. If no
834
  C{ValueError} or C{TypeError} exceptions are raised, it will return
835
  the result, else it will return the original value. Any other
836
  exceptions are propagated to the caller.
837

838
  @type fn: callable
839
  @param fn: function to apply to the value
840
  @param val: the value to be converted
841
  @return: The converted value if the conversion was successful,
842
      otherwise the original value.
843

844
  """
845
  try:
846
    nv = fn(val)
847
  except (ValueError, TypeError):
848
    nv = val
849
  return nv
850

    
851

    
852
def IsValidIP(ip):
853
  """Verifies the syntax of an IPv4 address.
854

855
  This function checks if the IPv4 address passes is valid or not based
856
  on syntax (not IP range, class calculations, etc.).
857

858
  @type ip: str
859
  @param ip: the address to be checked
860
  @rtype: a regular expression match object
861
  @return: a regular expression match object, or None if the
862
      address is not valid
863

864
  """
865
  unit = "(0|[1-9]\d{0,2})"
866
  #TODO: convert and return only boolean
867
  return re.match("^%s\.%s\.%s\.%s$" % (unit, unit, unit, unit), ip)
868

    
869

    
870
def IsValidShellParam(word):
871
  """Verifies is the given word is safe from the shell's p.o.v.
872

873
  This means that we can pass this to a command via the shell and be
874
  sure that it doesn't alter the command line and is passed as such to
875
  the actual command.
876

877
  Note that we are overly restrictive here, in order to be on the safe
878
  side.
879

880
  @type word: str
881
  @param word: the word to check
882
  @rtype: boolean
883
  @return: True if the word is 'safe'
884

885
  """
886
  return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
887

    
888

    
889
def BuildShellCmd(template, *args):
890
  """Build a safe shell command line from the given arguments.
891

892
  This function will check all arguments in the args list so that they
893
  are valid shell parameters (i.e. they don't contain shell
894
  metacharacters). If everything is ok, it will return the result of
895
  template % args.
896

897
  @type template: str
898
  @param template: the string holding the template for the
899
      string formatting
900
  @rtype: str
901
  @return: the expanded command line
902

903
  """
904
  for word in args:
905
    if not IsValidShellParam(word):
906
      raise errors.ProgrammerError("Shell argument '%s' contains"
907
                                   " invalid characters" % word)
908
  return template % args
909

    
910

    
911
def FormatUnit(value, units):
912
  """Formats an incoming number of MiB with the appropriate unit.
913

914
  @type value: int
915
  @param value: integer representing the value in MiB (1048576)
916
  @type units: char
917
  @param units: the type of formatting we should do:
918
      - 'h' for automatic scaling
919
      - 'm' for MiBs
920
      - 'g' for GiBs
921
      - 't' for TiBs
922
  @rtype: str
923
  @return: the formatted value (with suffix)
924

925
  """
926
  if units not in ('m', 'g', 't', 'h'):
927
    raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
928

    
929
  suffix = ''
930

    
931
  if units == 'm' or (units == 'h' and value < 1024):
932
    if units == 'h':
933
      suffix = 'M'
934
    return "%d%s" % (round(value, 0), suffix)
935

    
936
  elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
937
    if units == 'h':
938
      suffix = 'G'
939
    return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
940

    
941
  else:
942
    if units == 'h':
943
      suffix = 'T'
944
    return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
945

    
946

    
947
def ParseUnit(input_string):
948
  """Tries to extract number and scale from the given string.
949

950
  Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
951
  [UNIT]}. If no unit is specified, it defaults to MiB. Return value
952
  is always an int in MiB.
953

954
  """
955
  m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
956
  if not m:
957
    raise errors.UnitParseError("Invalid format")
958

    
959
  value = float(m.groups()[0])
960

    
961
  unit = m.groups()[1]
962
  if unit:
963
    lcunit = unit.lower()
964
  else:
965
    lcunit = 'm'
966

    
967
  if lcunit in ('m', 'mb', 'mib'):
968
    # Value already in MiB
969
    pass
970

    
971
  elif lcunit in ('g', 'gb', 'gib'):
972
    value *= 1024
973

    
974
  elif lcunit in ('t', 'tb', 'tib'):
975
    value *= 1024 * 1024
976

    
977
  else:
978
    raise errors.UnitParseError("Unknown unit: %s" % unit)
979

    
980
  # Make sure we round up
981
  if int(value) < value:
982
    value += 1
983

    
984
  # Round up to the next multiple of 4
985
  value = int(value)
986
  if value % 4:
987
    value += 4 - value % 4
988

    
989
  return value
990

    
991

    
992
def AddAuthorizedKey(file_name, key):
993
  """Adds an SSH public key to an authorized_keys file.
994

995
  @type file_name: str
996
  @param file_name: path to authorized_keys file
997
  @type key: str
998
  @param key: string containing key
999

1000
  """
1001
  key_fields = key.split()
1002

    
1003
  f = open(file_name, 'a+')
1004
  try:
1005
    nl = True
1006
    for line in f:
1007
      # Ignore whitespace changes
1008
      if line.split() == key_fields:
1009
        break
1010
      nl = line.endswith('\n')
1011
    else:
1012
      if not nl:
1013
        f.write("\n")
1014
      f.write(key.rstrip('\r\n'))
1015
      f.write("\n")
1016
      f.flush()
1017
  finally:
1018
    f.close()
1019

    
1020

    
1021
def RemoveAuthorizedKey(file_name, key):
1022
  """Removes an SSH public key from an authorized_keys file.
1023

1024
  @type file_name: str
1025
  @param file_name: path to authorized_keys file
1026
  @type key: str
1027
  @param key: string containing key
1028

1029
  """
1030
  key_fields = key.split()
1031

    
1032
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1033
  try:
1034
    out = os.fdopen(fd, 'w')
1035
    try:
1036
      f = open(file_name, 'r')
1037
      try:
1038
        for line in f:
1039
          # Ignore whitespace changes while comparing lines
1040
          if line.split() != key_fields:
1041
            out.write(line)
1042

    
1043
        out.flush()
1044
        os.rename(tmpname, file_name)
1045
      finally:
1046
        f.close()
1047
    finally:
1048
      out.close()
1049
  except:
1050
    RemoveFile(tmpname)
1051
    raise
1052

    
1053

    
1054
def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1055
  """Sets the name of an IP address and hostname in /etc/hosts.
1056

1057
  @type file_name: str
1058
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1059
  @type ip: str
1060
  @param ip: the IP address
1061
  @type hostname: str
1062
  @param hostname: the hostname to be added
1063
  @type aliases: list
1064
  @param aliases: the list of aliases to add for the hostname
1065

1066
  """
1067
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1068
  # Ensure aliases are unique
1069
  aliases = UniqueSequence([hostname] + aliases)[1:]
1070

    
1071
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1072
  try:
1073
    out = os.fdopen(fd, 'w')
1074
    try:
1075
      f = open(file_name, 'r')
1076
      try:
1077
        for line in f:
1078
          fields = line.split()
1079
          if fields and not fields[0].startswith('#') and ip == fields[0]:
1080
            continue
1081
          out.write(line)
1082

    
1083
        out.write("%s\t%s" % (ip, hostname))
1084
        if aliases:
1085
          out.write(" %s" % ' '.join(aliases))
1086
        out.write('\n')
1087

    
1088
        out.flush()
1089
        os.fsync(out)
1090
        os.chmod(tmpname, 0644)
1091
        os.rename(tmpname, file_name)
1092
      finally:
1093
        f.close()
1094
    finally:
1095
      out.close()
1096
  except:
1097
    RemoveFile(tmpname)
1098
    raise
1099

    
1100

    
1101
def AddHostToEtcHosts(hostname):
1102
  """Wrapper around SetEtcHostsEntry.
1103

1104
  @type hostname: str
1105
  @param hostname: a hostname that will be resolved and added to
1106
      L{constants.ETC_HOSTS}
1107

1108
  """
1109
  hi = HostInfo(name=hostname)
1110
  SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
1111

    
1112

    
1113
def RemoveEtcHostsEntry(file_name, hostname):
1114
  """Removes a hostname from /etc/hosts.
1115

1116
  IP addresses without names are removed from the file.
1117

1118
  @type file_name: str
1119
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1120
  @type hostname: str
1121
  @param hostname: the hostname to be removed
1122

1123
  """
1124
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1125
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1126
  try:
1127
    out = os.fdopen(fd, 'w')
1128
    try:
1129
      f = open(file_name, 'r')
1130
      try:
1131
        for line in f:
1132
          fields = line.split()
1133
          if len(fields) > 1 and not fields[0].startswith('#'):
1134
            names = fields[1:]
1135
            if hostname in names:
1136
              while hostname in names:
1137
                names.remove(hostname)
1138
              if names:
1139
                out.write("%s %s\n" % (fields[0], ' '.join(names)))
1140
              continue
1141

    
1142
          out.write(line)
1143

    
1144
        out.flush()
1145
        os.fsync(out)
1146
        os.chmod(tmpname, 0644)
1147
        os.rename(tmpname, file_name)
1148
      finally:
1149
        f.close()
1150
    finally:
1151
      out.close()
1152
  except:
1153
    RemoveFile(tmpname)
1154
    raise
1155

    
1156

    
1157
def RemoveHostFromEtcHosts(hostname):
1158
  """Wrapper around RemoveEtcHostsEntry.
1159

1160
  @type hostname: str
1161
  @param hostname: hostname that will be resolved and its
1162
      full and shot name will be removed from
1163
      L{constants.ETC_HOSTS}
1164

1165
  """
1166
  hi = HostInfo(name=hostname)
1167
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
1168
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
1169

    
1170

    
1171
def TimestampForFilename():
1172
  """Returns the current time formatted for filenames.
1173

1174
  The format doesn't contain colons as some shells and applications them as
1175
  separators.
1176

1177
  """
1178
  return time.strftime("%Y-%m-%d_%H_%M_%S")
1179

    
1180

    
1181
def CreateBackup(file_name):
1182
  """Creates a backup of a file.
1183

1184
  @type file_name: str
1185
  @param file_name: file to be backed up
1186
  @rtype: str
1187
  @return: the path to the newly created backup
1188
  @raise errors.ProgrammerError: for invalid file names
1189

1190
  """
1191
  if not os.path.isfile(file_name):
1192
    raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1193
                                file_name)
1194

    
1195
  prefix = ("%s.backup-%s." %
1196
            (os.path.basename(file_name), TimestampForFilename()))
1197
  dir_name = os.path.dirname(file_name)
1198

    
1199
  fsrc = open(file_name, 'rb')
1200
  try:
1201
    (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1202
    fdst = os.fdopen(fd, 'wb')
1203
    try:
1204
      logging.debug("Backing up %s at %s", file_name, backup_name)
1205
      shutil.copyfileobj(fsrc, fdst)
1206
    finally:
1207
      fdst.close()
1208
  finally:
1209
    fsrc.close()
1210

    
1211
  return backup_name
1212

    
1213

    
1214
def ShellQuote(value):
1215
  """Quotes shell argument according to POSIX.
1216

1217
  @type value: str
1218
  @param value: the argument to be quoted
1219
  @rtype: str
1220
  @return: the quoted value
1221

1222
  """
1223
  if _re_shell_unquoted.match(value):
1224
    return value
1225
  else:
1226
    return "'%s'" % value.replace("'", "'\\''")
1227

    
1228

    
1229
def ShellQuoteArgs(args):
1230
  """Quotes a list of shell arguments.
1231

1232
  @type args: list
1233
  @param args: list of arguments to be quoted
1234
  @rtype: str
1235
  @return: the quoted arguments concatenated with spaces
1236

1237
  """
1238
  return ' '.join([ShellQuote(i) for i in args])
1239

    
1240

    
1241
def TcpPing(target, port, timeout=10, live_port_needed=False, source=None):
1242
  """Simple ping implementation using TCP connect(2).
1243

1244
  Check if the given IP is reachable by doing attempting a TCP connect
1245
  to it.
1246

1247
  @type target: str
1248
  @param target: the IP or hostname to ping
1249
  @type port: int
1250
  @param port: the port to connect to
1251
  @type timeout: int
1252
  @param timeout: the timeout on the connection attempt
1253
  @type live_port_needed: boolean
1254
  @param live_port_needed: whether a closed port will cause the
1255
      function to return failure, as if there was a timeout
1256
  @type source: str or None
1257
  @param source: if specified, will cause the connect to be made
1258
      from this specific source address; failures to bind other
1259
      than C{EADDRNOTAVAIL} will be ignored
1260

1261
  """
1262
  sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
1263

    
1264
  success = False
1265

    
1266
  if source is not None:
1267
    try:
1268
      sock.bind((source, 0))
1269
    except socket.error, (errcode, _):
1270
      if errcode == errno.EADDRNOTAVAIL:
1271
        success = False
1272

    
1273
  sock.settimeout(timeout)
1274

    
1275
  try:
1276
    sock.connect((target, port))
1277
    sock.close()
1278
    success = True
1279
  except socket.timeout:
1280
    success = False
1281
  except socket.error, (errcode, _):
1282
    success = (not live_port_needed) and (errcode == errno.ECONNREFUSED)
1283

    
1284
  return success
1285

    
1286

    
1287
def OwnIpAddress(address):
1288
  """Check if the current host has the the given IP address.
1289

1290
  Currently this is done by TCP-pinging the address from the loopback
1291
  address.
1292

1293
  @type address: string
1294
  @param address: the address to check
1295
  @rtype: bool
1296
  @return: True if we own the address
1297

1298
  """
1299
  return TcpPing(address, constants.DEFAULT_NODED_PORT,
1300
                 source=constants.LOCALHOST_IP_ADDRESS)
1301

    
1302

    
1303
def ListVisibleFiles(path):
1304
  """Returns a list of visible files in a directory.
1305

1306
  @type path: str
1307
  @param path: the directory to enumerate
1308
  @rtype: list
1309
  @return: the list of all files not starting with a dot
1310
  @raise ProgrammerError: if L{path} is not an absolue and normalized path
1311

1312
  """
1313
  if not IsNormAbsPath(path):
1314
    raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1315
                                 " absolute/normalized: '%s'" % path)
1316
  files = [i for i in os.listdir(path) if not i.startswith(".")]
1317
  files.sort()
1318
  return files
1319

    
1320

    
1321
def GetHomeDir(user, default=None):
1322
  """Try to get the homedir of the given user.
1323

1324
  The user can be passed either as a string (denoting the name) or as
1325
  an integer (denoting the user id). If the user is not found, the
1326
  'default' argument is returned, which defaults to None.
1327

1328
  """
1329
  try:
1330
    if isinstance(user, basestring):
1331
      result = pwd.getpwnam(user)
1332
    elif isinstance(user, (int, long)):
1333
      result = pwd.getpwuid(user)
1334
    else:
1335
      raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1336
                                   type(user))
1337
  except KeyError:
1338
    return default
1339
  return result.pw_dir
1340

    
1341

    
1342
def NewUUID():
1343
  """Returns a random UUID.
1344

1345
  @note: This is a Linux-specific method as it uses the /proc
1346
      filesystem.
1347
  @rtype: str
1348

1349
  """
1350
  return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1351

    
1352

    
1353
def GenerateSecret(numbytes=20):
1354
  """Generates a random secret.
1355

1356
  This will generate a pseudo-random secret returning an hex string
1357
  (so that it can be used where an ASCII string is needed).
1358

1359
  @param numbytes: the number of bytes which will be represented by the returned
1360
      string (defaulting to 20, the length of a SHA1 hash)
1361
  @rtype: str
1362
  @return: an hex representation of the pseudo-random sequence
1363

1364
  """
1365
  return os.urandom(numbytes).encode('hex')
1366

    
1367

    
1368
def EnsureDirs(dirs):
1369
  """Make required directories, if they don't exist.
1370

1371
  @param dirs: list of tuples (dir_name, dir_mode)
1372
  @type dirs: list of (string, integer)
1373

1374
  """
1375
  for dir_name, dir_mode in dirs:
1376
    try:
1377
      os.mkdir(dir_name, dir_mode)
1378
    except EnvironmentError, err:
1379
      if err.errno != errno.EEXIST:
1380
        raise errors.GenericError("Cannot create needed directory"
1381
                                  " '%s': %s" % (dir_name, err))
1382
    if not os.path.isdir(dir_name):
1383
      raise errors.GenericError("%s is not a directory" % dir_name)
1384

    
1385

    
1386
def ReadFile(file_name, size=-1):
1387
  """Reads a file.
1388

1389
  @type size: int
1390
  @param size: Read at most size bytes (if negative, entire file)
1391
  @rtype: str
1392
  @return: the (possibly partial) content of the file
1393

1394
  """
1395
  f = open(file_name, "r")
1396
  try:
1397
    return f.read(size)
1398
  finally:
1399
    f.close()
1400

    
1401

    
1402
def WriteFile(file_name, fn=None, data=None,
1403
              mode=None, uid=-1, gid=-1,
1404
              atime=None, mtime=None, close=True,
1405
              dry_run=False, backup=False,
1406
              prewrite=None, postwrite=None):
1407
  """(Over)write a file atomically.
1408

1409
  The file_name and either fn (a function taking one argument, the
1410
  file descriptor, and which should write the data to it) or data (the
1411
  contents of the file) must be passed. The other arguments are
1412
  optional and allow setting the file mode, owner and group, and the
1413
  mtime/atime of the file.
1414

1415
  If the function doesn't raise an exception, it has succeeded and the
1416
  target file has the new contents. If the function has raised an
1417
  exception, an existing target file should be unmodified and the
1418
  temporary file should be removed.
1419

1420
  @type file_name: str
1421
  @param file_name: the target filename
1422
  @type fn: callable
1423
  @param fn: content writing function, called with
1424
      file descriptor as parameter
1425
  @type data: str
1426
  @param data: contents of the file
1427
  @type mode: int
1428
  @param mode: file mode
1429
  @type uid: int
1430
  @param uid: the owner of the file
1431
  @type gid: int
1432
  @param gid: the group of the file
1433
  @type atime: int
1434
  @param atime: a custom access time to be set on the file
1435
  @type mtime: int
1436
  @param mtime: a custom modification time to be set on the file
1437
  @type close: boolean
1438
  @param close: whether to close file after writing it
1439
  @type prewrite: callable
1440
  @param prewrite: function to be called before writing content
1441
  @type postwrite: callable
1442
  @param postwrite: function to be called after writing content
1443

1444
  @rtype: None or int
1445
  @return: None if the 'close' parameter evaluates to True,
1446
      otherwise the file descriptor
1447

1448
  @raise errors.ProgrammerError: if any of the arguments are not valid
1449

1450
  """
1451
  if not os.path.isabs(file_name):
1452
    raise errors.ProgrammerError("Path passed to WriteFile is not"
1453
                                 " absolute: '%s'" % file_name)
1454

    
1455
  if [fn, data].count(None) != 1:
1456
    raise errors.ProgrammerError("fn or data required")
1457

    
1458
  if [atime, mtime].count(None) == 1:
1459
    raise errors.ProgrammerError("Both atime and mtime must be either"
1460
                                 " set or None")
1461

    
1462
  if backup and not dry_run and os.path.isfile(file_name):
1463
    CreateBackup(file_name)
1464

    
1465
  dir_name, base_name = os.path.split(file_name)
1466
  fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1467
  do_remove = True
1468
  # here we need to make sure we remove the temp file, if any error
1469
  # leaves it in place
1470
  try:
1471
    if uid != -1 or gid != -1:
1472
      os.chown(new_name, uid, gid)
1473
    if mode:
1474
      os.chmod(new_name, mode)
1475
    if callable(prewrite):
1476
      prewrite(fd)
1477
    if data is not None:
1478
      os.write(fd, data)
1479
    else:
1480
      fn(fd)
1481
    if callable(postwrite):
1482
      postwrite(fd)
1483
    os.fsync(fd)
1484
    if atime is not None and mtime is not None:
1485
      os.utime(new_name, (atime, mtime))
1486
    if not dry_run:
1487
      os.rename(new_name, file_name)
1488
      do_remove = False
1489
  finally:
1490
    if close:
1491
      os.close(fd)
1492
      result = None
1493
    else:
1494
      result = fd
1495
    if do_remove:
1496
      RemoveFile(new_name)
1497

    
1498
  return result
1499

    
1500

    
1501
def FirstFree(seq, base=0):
1502
  """Returns the first non-existing integer from seq.
1503

1504
  The seq argument should be a sorted list of positive integers. The
1505
  first time the index of an element is smaller than the element
1506
  value, the index will be returned.
1507

1508
  The base argument is used to start at a different offset,
1509
  i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
1510

1511
  Example: C{[0, 1, 3]} will return I{2}.
1512

1513
  @type seq: sequence
1514
  @param seq: the sequence to be analyzed.
1515
  @type base: int
1516
  @param base: use this value as the base index of the sequence
1517
  @rtype: int
1518
  @return: the first non-used index in the sequence
1519

1520
  """
1521
  for idx, elem in enumerate(seq):
1522
    assert elem >= base, "Passed element is higher than base offset"
1523
    if elem > idx + base:
1524
      # idx is not used
1525
      return idx + base
1526
  return None
1527

    
1528

    
1529
def SingleWaitForFdCondition(fdobj, event, timeout):
1530
  """Waits for a condition to occur on the socket.
1531

1532
  Immediately returns at the first interruption.
1533

1534
  @type fdobj: integer or object supporting a fileno() method
1535
  @param fdobj: entity to wait for events on
1536
  @type event: integer
1537
  @param event: ORed condition (see select module)
1538
  @type timeout: float or None
1539
  @param timeout: Timeout in seconds
1540
  @rtype: int or None
1541
  @return: None for timeout, otherwise occured conditions
1542

1543
  """
1544
  check = (event | select.POLLPRI |
1545
           select.POLLNVAL | select.POLLHUP | select.POLLERR)
1546

    
1547
  if timeout is not None:
1548
    # Poller object expects milliseconds
1549
    timeout *= 1000
1550

    
1551
  poller = select.poll()
1552
  poller.register(fdobj, event)
1553
  try:
1554
    # TODO: If the main thread receives a signal and we have no timeout, we
1555
    # could wait forever. This should check a global "quit" flag or something
1556
    # every so often.
1557
    io_events = poller.poll(timeout)
1558
  except select.error, err:
1559
    if err[0] != errno.EINTR:
1560
      raise
1561
    io_events = []
1562
  if io_events and io_events[0][1] & check:
1563
    return io_events[0][1]
1564
  else:
1565
    return None
1566

    
1567

    
1568
class FdConditionWaiterHelper(object):
1569
  """Retry helper for WaitForFdCondition.
1570

1571
  This class contains the retried and wait functions that make sure
1572
  WaitForFdCondition can continue waiting until the timeout is actually
1573
  expired.
1574

1575
  """
1576

    
1577
  def __init__(self, timeout):
1578
    self.timeout = timeout
1579

    
1580
  def Poll(self, fdobj, event):
1581
    result = SingleWaitForFdCondition(fdobj, event, self.timeout)
1582
    if result is None:
1583
      raise RetryAgain()
1584
    else:
1585
      return result
1586

    
1587
  def UpdateTimeout(self, timeout):
1588
    self.timeout = timeout
1589

    
1590

    
1591
def WaitForFdCondition(fdobj, event, timeout):
1592
  """Waits for a condition to occur on the socket.
1593

1594
  Retries until the timeout is expired, even if interrupted.
1595

1596
  @type fdobj: integer or object supporting a fileno() method
1597
  @param fdobj: entity to wait for events on
1598
  @type event: integer
1599
  @param event: ORed condition (see select module)
1600
  @type timeout: float or None
1601
  @param timeout: Timeout in seconds
1602
  @rtype: int or None
1603
  @return: None for timeout, otherwise occured conditions
1604

1605
  """
1606
  if timeout is not None:
1607
    retrywaiter = FdConditionWaiterHelper(timeout)
1608
    try:
1609
      result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
1610
                     args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
1611
    except RetryTimeout:
1612
      result = None
1613
  else:
1614
    result = None
1615
    while result is None:
1616
      result = SingleWaitForFdCondition(fdobj, event, timeout)
1617
  return result
1618

    
1619

    
1620
def UniqueSequence(seq):
1621
  """Returns a list with unique elements.
1622

1623
  Element order is preserved.
1624

1625
  @type seq: sequence
1626
  @param seq: the sequence with the source elements
1627
  @rtype: list
1628
  @return: list of unique elements from seq
1629

1630
  """
1631
  seen = set()
1632
  return [i for i in seq if i not in seen and not seen.add(i)]
1633

    
1634

    
1635
def NormalizeAndValidateMac(mac):
1636
  """Normalizes and check if a MAC address is valid.
1637

1638
  Checks whether the supplied MAC address is formally correct, only
1639
  accepts colon separated format. Normalize it to all lower.
1640

1641
  @type mac: str
1642
  @param mac: the MAC to be validated
1643
  @rtype: str
1644
  @return: returns the normalized and validated MAC.
1645

1646
  @raise errors.OpPrereqError: If the MAC isn't valid
1647

1648
  """
1649
  mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$", re.I)
1650
  if not mac_check.match(mac):
1651
    raise errors.OpPrereqError("Invalid MAC address specified: %s" %
1652
                               mac, errors.ECODE_INVAL)
1653

    
1654
  return mac.lower()
1655

    
1656

    
1657
def TestDelay(duration):
1658
  """Sleep for a fixed amount of time.
1659

1660
  @type duration: float
1661
  @param duration: the sleep duration
1662
  @rtype: boolean
1663
  @return: False for negative value, True otherwise
1664

1665
  """
1666
  if duration < 0:
1667
    return False, "Invalid sleep duration"
1668
  time.sleep(duration)
1669
  return True, None
1670

    
1671

    
1672
def _CloseFDNoErr(fd, retries=5):
1673
  """Close a file descriptor ignoring errors.
1674

1675
  @type fd: int
1676
  @param fd: the file descriptor
1677
  @type retries: int
1678
  @param retries: how many retries to make, in case we get any
1679
      other error than EBADF
1680

1681
  """
1682
  try:
1683
    os.close(fd)
1684
  except OSError, err:
1685
    if err.errno != errno.EBADF:
1686
      if retries > 0:
1687
        _CloseFDNoErr(fd, retries - 1)
1688
    # else either it's closed already or we're out of retries, so we
1689
    # ignore this and go on
1690

    
1691

    
1692
def CloseFDs(noclose_fds=None):
1693
  """Close file descriptors.
1694

1695
  This closes all file descriptors above 2 (i.e. except
1696
  stdin/out/err).
1697

1698
  @type noclose_fds: list or None
1699
  @param noclose_fds: if given, it denotes a list of file descriptor
1700
      that should not be closed
1701

1702
  """
1703
  # Default maximum for the number of available file descriptors.
1704
  if 'SC_OPEN_MAX' in os.sysconf_names:
1705
    try:
1706
      MAXFD = os.sysconf('SC_OPEN_MAX')
1707
      if MAXFD < 0:
1708
        MAXFD = 1024
1709
    except OSError:
1710
      MAXFD = 1024
1711
  else:
1712
    MAXFD = 1024
1713
  maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
1714
  if (maxfd == resource.RLIM_INFINITY):
1715
    maxfd = MAXFD
1716

    
1717
  # Iterate through and close all file descriptors (except the standard ones)
1718
  for fd in range(3, maxfd):
1719
    if noclose_fds and fd in noclose_fds:
1720
      continue
1721
    _CloseFDNoErr(fd)
1722

    
1723

    
1724
def Daemonize(logfile):
1725
  """Daemonize the current process.
1726

1727
  This detaches the current process from the controlling terminal and
1728
  runs it in the background as a daemon.
1729

1730
  @type logfile: str
1731
  @param logfile: the logfile to which we should redirect stdout/stderr
1732
  @rtype: int
1733
  @return: the value zero
1734

1735
  """
1736
  # pylint: disable-msg=W0212
1737
  # yes, we really want os._exit
1738
  UMASK = 077
1739
  WORKDIR = "/"
1740

    
1741
  # this might fail
1742
  pid = os.fork()
1743
  if (pid == 0):  # The first child.
1744
    os.setsid()
1745
    # this might fail
1746
    pid = os.fork() # Fork a second child.
1747
    if (pid == 0):  # The second child.
1748
      os.chdir(WORKDIR)
1749
      os.umask(UMASK)
1750
    else:
1751
      # exit() or _exit()?  See below.
1752
      os._exit(0) # Exit parent (the first child) of the second child.
1753
  else:
1754
    os._exit(0) # Exit parent of the first child.
1755

    
1756
  for fd in range(3):
1757
    _CloseFDNoErr(fd)
1758
  i = os.open("/dev/null", os.O_RDONLY) # stdin
1759
  assert i == 0, "Can't close/reopen stdin"
1760
  i = os.open(logfile, os.O_WRONLY|os.O_CREAT|os.O_APPEND, 0600) # stdout
1761
  assert i == 1, "Can't close/reopen stdout"
1762
  # Duplicate standard output to standard error.
1763
  os.dup2(1, 2)
1764
  return 0
1765

    
1766

    
1767
def DaemonPidFileName(name):
1768
  """Compute a ganeti pid file absolute path
1769

1770
  @type name: str
1771
  @param name: the daemon name
1772
  @rtype: str
1773
  @return: the full path to the pidfile corresponding to the given
1774
      daemon name
1775

1776
  """
1777
  return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
1778

    
1779

    
1780
def EnsureDaemon(name):
1781
  """Check for and start daemon if not alive.
1782

1783
  """
1784
  result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
1785
  if result.failed:
1786
    logging.error("Can't start daemon '%s', failure %s, output: %s",
1787
                  name, result.fail_reason, result.output)
1788
    return False
1789

    
1790
  return True
1791

    
1792

    
1793
def WritePidFile(name):
1794
  """Write the current process pidfile.
1795

1796
  The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid}
1797

1798
  @type name: str
1799
  @param name: the daemon name to use
1800
  @raise errors.GenericError: if the pid file already exists and
1801
      points to a live process
1802

1803
  """
1804
  pid = os.getpid()
1805
  pidfilename = DaemonPidFileName(name)
1806
  if IsProcessAlive(ReadPidFile(pidfilename)):
1807
    raise errors.GenericError("%s contains a live process" % pidfilename)
1808

    
1809
  WriteFile(pidfilename, data="%d\n" % pid)
1810

    
1811

    
1812
def RemovePidFile(name):
1813
  """Remove the current process pidfile.
1814

1815
  Any errors are ignored.
1816

1817
  @type name: str
1818
  @param name: the daemon name used to derive the pidfile name
1819

1820
  """
1821
  pidfilename = DaemonPidFileName(name)
1822
  # TODO: we could check here that the file contains our pid
1823
  try:
1824
    RemoveFile(pidfilename)
1825
  except: # pylint: disable-msg=W0702
1826
    pass
1827

    
1828

    
1829
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
1830
                waitpid=False):
1831
  """Kill a process given by its pid.
1832

1833
  @type pid: int
1834
  @param pid: The PID to terminate.
1835
  @type signal_: int
1836
  @param signal_: The signal to send, by default SIGTERM
1837
  @type timeout: int
1838
  @param timeout: The timeout after which, if the process is still alive,
1839
                  a SIGKILL will be sent. If not positive, no such checking
1840
                  will be done
1841
  @type waitpid: boolean
1842
  @param waitpid: If true, we should waitpid on this process after
1843
      sending signals, since it's our own child and otherwise it
1844
      would remain as zombie
1845

1846
  """
1847
  def _helper(pid, signal_, wait):
1848
    """Simple helper to encapsulate the kill/waitpid sequence"""
1849
    os.kill(pid, signal_)
1850
    if wait:
1851
      try:
1852
        os.waitpid(pid, os.WNOHANG)
1853
      except OSError:
1854
        pass
1855

    
1856
  if pid <= 0:
1857
    # kill with pid=0 == suicide
1858
    raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
1859

    
1860
  if not IsProcessAlive(pid):
1861
    return
1862

    
1863
  _helper(pid, signal_, waitpid)
1864

    
1865
  if timeout <= 0:
1866
    return
1867

    
1868
  def _CheckProcess():
1869
    if not IsProcessAlive(pid):
1870
      return
1871

    
1872
    try:
1873
      (result_pid, _) = os.waitpid(pid, os.WNOHANG)
1874
    except OSError:
1875
      raise RetryAgain()
1876

    
1877
    if result_pid > 0:
1878
      return
1879

    
1880
    raise RetryAgain()
1881

    
1882
  try:
1883
    # Wait up to $timeout seconds
1884
    Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
1885
  except RetryTimeout:
1886
    pass
1887

    
1888
  if IsProcessAlive(pid):
1889
    # Kill process if it's still alive
1890
    _helper(pid, signal.SIGKILL, waitpid)
1891

    
1892

    
1893
def FindFile(name, search_path, test=os.path.exists):
1894
  """Look for a filesystem object in a given path.
1895

1896
  This is an abstract method to search for filesystem object (files,
1897
  dirs) under a given search path.
1898

1899
  @type name: str
1900
  @param name: the name to look for
1901
  @type search_path: str
1902
  @param search_path: location to start at
1903
  @type test: callable
1904
  @param test: a function taking one argument that should return True
1905
      if the a given object is valid; the default value is
1906
      os.path.exists, causing only existing files to be returned
1907
  @rtype: str or None
1908
  @return: full path to the object if found, None otherwise
1909

1910
  """
1911
  # validate the filename mask
1912
  if constants.EXT_PLUGIN_MASK.match(name) is None:
1913
    logging.critical("Invalid value passed for external script name: '%s'",
1914
                     name)
1915
    return None
1916

    
1917
  for dir_name in search_path:
1918
    # FIXME: investigate switch to PathJoin
1919
    item_name = os.path.sep.join([dir_name, name])
1920
    # check the user test and that we're indeed resolving to the given
1921
    # basename
1922
    if test(item_name) and os.path.basename(item_name) == name:
1923
      return item_name
1924
  return None
1925

    
1926

    
1927
def CheckVolumeGroupSize(vglist, vgname, minsize):
1928
  """Checks if the volume group list is valid.
1929

1930
  The function will check if a given volume group is in the list of
1931
  volume groups and has a minimum size.
1932

1933
  @type vglist: dict
1934
  @param vglist: dictionary of volume group names and their size
1935
  @type vgname: str
1936
  @param vgname: the volume group we should check
1937
  @type minsize: int
1938
  @param minsize: the minimum size we accept
1939
  @rtype: None or str
1940
  @return: None for success, otherwise the error message
1941

1942
  """
1943
  vgsize = vglist.get(vgname, None)
1944
  if vgsize is None:
1945
    return "volume group '%s' missing" % vgname
1946
  elif vgsize < minsize:
1947
    return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
1948
            (vgname, minsize, vgsize))
1949
  return None
1950

    
1951

    
1952
def SplitTime(value):
1953
  """Splits time as floating point number into a tuple.
1954

1955
  @param value: Time in seconds
1956
  @type value: int or float
1957
  @return: Tuple containing (seconds, microseconds)
1958

1959
  """
1960
  (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
1961

    
1962
  assert 0 <= seconds, \
1963
    "Seconds must be larger than or equal to 0, but are %s" % seconds
1964
  assert 0 <= microseconds <= 999999, \
1965
    "Microseconds must be 0-999999, but are %s" % microseconds
1966

    
1967
  return (int(seconds), int(microseconds))
1968

    
1969

    
1970
def MergeTime(timetuple):
1971
  """Merges a tuple into time as a floating point number.
1972

1973
  @param timetuple: Time as tuple, (seconds, microseconds)
1974
  @type timetuple: tuple
1975
  @return: Time as a floating point number expressed in seconds
1976

1977
  """
1978
  (seconds, microseconds) = timetuple
1979

    
1980
  assert 0 <= seconds, \
1981
    "Seconds must be larger than or equal to 0, but are %s" % seconds
1982
  assert 0 <= microseconds <= 999999, \
1983
    "Microseconds must be 0-999999, but are %s" % microseconds
1984

    
1985
  return float(seconds) + (float(microseconds) * 0.000001)
1986

    
1987

    
1988
def GetDaemonPort(daemon_name):
1989
  """Get the daemon port for this cluster.
1990

1991
  Note that this routine does not read a ganeti-specific file, but
1992
  instead uses C{socket.getservbyname} to allow pre-customization of
1993
  this parameter outside of Ganeti.
1994

1995
  @type daemon_name: string
1996
  @param daemon_name: daemon name (in constants.DAEMONS_PORTS)
1997
  @rtype: int
1998

1999
  """
2000
  if daemon_name not in constants.DAEMONS_PORTS:
2001
    raise errors.ProgrammerError("Unknown daemon: %s" % daemon_name)
2002

    
2003
  (proto, default_port) = constants.DAEMONS_PORTS[daemon_name]
2004
  try:
2005
    port = socket.getservbyname(daemon_name, proto)
2006
  except socket.error:
2007
    port = default_port
2008

    
2009
  return port
2010

    
2011

    
2012
def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
2013
                 multithreaded=False, syslog=constants.SYSLOG_USAGE):
2014
  """Configures the logging module.
2015

2016
  @type logfile: str
2017
  @param logfile: the filename to which we should log
2018
  @type debug: integer
2019
  @param debug: if greater than zero, enable debug messages, otherwise
2020
      only those at C{INFO} and above level
2021
  @type stderr_logging: boolean
2022
  @param stderr_logging: whether we should also log to the standard error
2023
  @type program: str
2024
  @param program: the name under which we should log messages
2025
  @type multithreaded: boolean
2026
  @param multithreaded: if True, will add the thread name to the log file
2027
  @type syslog: string
2028
  @param syslog: one of 'no', 'yes', 'only':
2029
      - if no, syslog is not used
2030
      - if yes, syslog is used (in addition to file-logging)
2031
      - if only, only syslog is used
2032
  @raise EnvironmentError: if we can't open the log file and
2033
      syslog/stderr logging is disabled
2034

2035
  """
2036
  fmt = "%(asctime)s: " + program + " pid=%(process)d"
2037
  sft = program + "[%(process)d]:"
2038
  if multithreaded:
2039
    fmt += "/%(threadName)s"
2040
    sft += " (%(threadName)s)"
2041
  if debug:
2042
    fmt += " %(module)s:%(lineno)s"
2043
    # no debug info for syslog loggers
2044
  fmt += " %(levelname)s %(message)s"
2045
  # yes, we do want the textual level, as remote syslog will probably
2046
  # lose the error level, and it's easier to grep for it
2047
  sft += " %(levelname)s %(message)s"
2048
  formatter = logging.Formatter(fmt)
2049
  sys_fmt = logging.Formatter(sft)
2050

    
2051
  root_logger = logging.getLogger("")
2052
  root_logger.setLevel(logging.NOTSET)
2053

    
2054
  # Remove all previously setup handlers
2055
  for handler in root_logger.handlers:
2056
    handler.close()
2057
    root_logger.removeHandler(handler)
2058

    
2059
  if stderr_logging:
2060
    stderr_handler = logging.StreamHandler()
2061
    stderr_handler.setFormatter(formatter)
2062
    if debug:
2063
      stderr_handler.setLevel(logging.NOTSET)
2064
    else:
2065
      stderr_handler.setLevel(logging.CRITICAL)
2066
    root_logger.addHandler(stderr_handler)
2067

    
2068
  if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2069
    facility = logging.handlers.SysLogHandler.LOG_DAEMON
2070
    syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2071
                                                    facility)
2072
    syslog_handler.setFormatter(sys_fmt)
2073
    # Never enable debug over syslog
2074
    syslog_handler.setLevel(logging.INFO)
2075
    root_logger.addHandler(syslog_handler)
2076

    
2077
  if syslog != constants.SYSLOG_ONLY:
2078
    # this can fail, if the logging directories are not setup or we have
2079
    # a permisssion problem; in this case, it's best to log but ignore
2080
    # the error if stderr_logging is True, and if false we re-raise the
2081
    # exception since otherwise we could run but without any logs at all
2082
    try:
2083
      logfile_handler = logging.FileHandler(logfile)
2084
      logfile_handler.setFormatter(formatter)
2085
      if debug:
2086
        logfile_handler.setLevel(logging.DEBUG)
2087
      else:
2088
        logfile_handler.setLevel(logging.INFO)
2089
      root_logger.addHandler(logfile_handler)
2090
    except EnvironmentError:
2091
      if stderr_logging or syslog == constants.SYSLOG_YES:
2092
        logging.exception("Failed to enable logging to file '%s'", logfile)
2093
      else:
2094
        # we need to re-raise the exception
2095
        raise
2096

    
2097

    
2098
def IsNormAbsPath(path):
2099
  """Check whether a path is absolute and also normalized
2100

2101
  This avoids things like /dir/../../other/path to be valid.
2102

2103
  """
2104
  return os.path.normpath(path) == path and os.path.isabs(path)
2105

    
2106

    
2107
def PathJoin(*args):
2108
  """Safe-join a list of path components.
2109

2110
  Requirements:
2111
      - the first argument must be an absolute path
2112
      - no component in the path must have backtracking (e.g. /../),
2113
        since we check for normalization at the end
2114

2115
  @param args: the path components to be joined
2116
  @raise ValueError: for invalid paths
2117

2118
  """
2119
  # ensure we're having at least one path passed in
2120
  assert args
2121
  # ensure the first component is an absolute and normalized path name
2122
  root = args[0]
2123
  if not IsNormAbsPath(root):
2124
    raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2125
  result = os.path.join(*args)
2126
  # ensure that the whole path is normalized
2127
  if not IsNormAbsPath(result):
2128
    raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2129
  # check that we're still under the original prefix
2130
  prefix = os.path.commonprefix([root, result])
2131
  if prefix != root:
2132
    raise ValueError("Error: path joining resulted in different prefix"
2133
                     " (%s != %s)" % (prefix, root))
2134
  return result
2135

    
2136

    
2137
def TailFile(fname, lines=20):
2138
  """Return the last lines from a file.
2139

2140
  @note: this function will only read and parse the last 4KB of
2141
      the file; if the lines are very long, it could be that less
2142
      than the requested number of lines are returned
2143

2144
  @param fname: the file name
2145
  @type lines: int
2146
  @param lines: the (maximum) number of lines to return
2147

2148
  """
2149
  fd = open(fname, "r")
2150
  try:
2151
    fd.seek(0, 2)
2152
    pos = fd.tell()
2153
    pos = max(0, pos-4096)
2154
    fd.seek(pos, 0)
2155
    raw_data = fd.read()
2156
  finally:
2157
    fd.close()
2158

    
2159
  rows = raw_data.splitlines()
2160
  return rows[-lines:]
2161

    
2162

    
2163
def _ParseAsn1Generalizedtime(value):
2164
  """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2165

2166
  @type value: string
2167
  @param value: ASN1 GENERALIZEDTIME timestamp
2168

2169
  """
2170
  m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
2171
  if m:
2172
    # We have an offset
2173
    asn1time = m.group(1)
2174
    hours = int(m.group(2))
2175
    minutes = int(m.group(3))
2176
    utcoffset = (60 * hours) + minutes
2177
  else:
2178
    if not value.endswith("Z"):
2179
      raise ValueError("Missing timezone")
2180
    asn1time = value[:-1]
2181
    utcoffset = 0
2182

    
2183
  parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2184

    
2185
  tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2186

    
2187
  return calendar.timegm(tt.utctimetuple())
2188

    
2189

    
2190
def GetX509CertValidity(cert):
2191
  """Returns the validity period of the certificate.
2192

2193
  @type cert: OpenSSL.crypto.X509
2194
  @param cert: X509 certificate object
2195

2196
  """
2197
  # The get_notBefore and get_notAfter functions are only supported in
2198
  # pyOpenSSL 0.7 and above.
2199
  try:
2200
    get_notbefore_fn = cert.get_notBefore
2201
  except AttributeError:
2202
    not_before = None
2203
  else:
2204
    not_before_asn1 = get_notbefore_fn()
2205

    
2206
    if not_before_asn1 is None:
2207
      not_before = None
2208
    else:
2209
      not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2210

    
2211
  try:
2212
    get_notafter_fn = cert.get_notAfter
2213
  except AttributeError:
2214
    not_after = None
2215
  else:
2216
    not_after_asn1 = get_notafter_fn()
2217

    
2218
    if not_after_asn1 is None:
2219
      not_after = None
2220
    else:
2221
      not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2222

    
2223
  return (not_before, not_after)
2224

    
2225

    
2226
def SafeEncode(text):
2227
  """Return a 'safe' version of a source string.
2228

2229
  This function mangles the input string and returns a version that
2230
  should be safe to display/encode as ASCII. To this end, we first
2231
  convert it to ASCII using the 'backslashreplace' encoding which
2232
  should get rid of any non-ASCII chars, and then we process it
2233
  through a loop copied from the string repr sources in the python; we
2234
  don't use string_escape anymore since that escape single quotes and
2235
  backslashes too, and that is too much; and that escaping is not
2236
  stable, i.e. string_escape(string_escape(x)) != string_escape(x).
2237

2238
  @type text: str or unicode
2239
  @param text: input data
2240
  @rtype: str
2241
  @return: a safe version of text
2242

2243
  """
2244
  if isinstance(text, unicode):
2245
    # only if unicode; if str already, we handle it below
2246
    text = text.encode('ascii', 'backslashreplace')
2247
  resu = ""
2248
  for char in text:
2249
    c = ord(char)
2250
    if char  == '\t':
2251
      resu += r'\t'
2252
    elif char == '\n':
2253
      resu += r'\n'
2254
    elif char == '\r':
2255
      resu += r'\'r'
2256
    elif c < 32 or c >= 127: # non-printable
2257
      resu += "\\x%02x" % (c & 0xff)
2258
    else:
2259
      resu += char
2260
  return resu
2261

    
2262

    
2263
def UnescapeAndSplit(text, sep=","):
2264
  """Split and unescape a string based on a given separator.
2265

2266
  This function splits a string based on a separator where the
2267
  separator itself can be escape in order to be an element of the
2268
  elements. The escaping rules are (assuming coma being the
2269
  separator):
2270
    - a plain , separates the elements
2271
    - a sequence \\\\, (double backslash plus comma) is handled as a
2272
      backslash plus a separator comma
2273
    - a sequence \, (backslash plus comma) is handled as a
2274
      non-separator comma
2275

2276
  @type text: string
2277
  @param text: the string to split
2278
  @type sep: string
2279
  @param text: the separator
2280
  @rtype: string
2281
  @return: a list of strings
2282

2283
  """
2284
  # we split the list by sep (with no escaping at this stage)
2285
  slist = text.split(sep)
2286
  # next, we revisit the elements and if any of them ended with an odd
2287
  # number of backslashes, then we join it with the next
2288
  rlist = []
2289
  while slist:
2290
    e1 = slist.pop(0)
2291
    if e1.endswith("\\"):
2292
      num_b = len(e1) - len(e1.rstrip("\\"))
2293
      if num_b % 2 == 1:
2294
        e2 = slist.pop(0)
2295
        # here the backslashes remain (all), and will be reduced in
2296
        # the next step
2297
        rlist.append(e1 + sep + e2)
2298
        continue
2299
    rlist.append(e1)
2300
  # finally, replace backslash-something with something
2301
  rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
2302
  return rlist
2303

    
2304

    
2305
def CommaJoin(names):
2306
  """Nicely join a set of identifiers.
2307

2308
  @param names: set, list or tuple
2309
  @return: a string with the formatted results
2310

2311
  """
2312
  return ", ".join([str(val) for val in names])
2313

    
2314

    
2315
def BytesToMebibyte(value):
2316
  """Converts bytes to mebibytes.
2317

2318
  @type value: int
2319
  @param value: Value in bytes
2320
  @rtype: int
2321
  @return: Value in mebibytes
2322

2323
  """
2324
  return int(round(value / (1024.0 * 1024.0), 0))
2325

    
2326

    
2327
def CalculateDirectorySize(path):
2328
  """Calculates the size of a directory recursively.
2329

2330
  @type path: string
2331
  @param path: Path to directory
2332
  @rtype: int
2333
  @return: Size in mebibytes
2334

2335
  """
2336
  size = 0
2337

    
2338
  for (curpath, _, files) in os.walk(path):
2339
    for filename in files:
2340
      st = os.lstat(PathJoin(curpath, filename))
2341
      size += st.st_size
2342

    
2343
  return BytesToMebibyte(size)
2344

    
2345

    
2346
def GetFilesystemStats(path):
2347
  """Returns the total and free space on a filesystem.
2348

2349
  @type path: string
2350
  @param path: Path on filesystem to be examined
2351
  @rtype: int
2352
  @return: tuple of (Total space, Free space) in mebibytes
2353

2354
  """
2355
  st = os.statvfs(path)
2356

    
2357
  fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
2358
  tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
2359
  return (tsize, fsize)
2360

    
2361

    
2362
def RunInSeparateProcess(fn, *args):
2363
  """Runs a function in a separate process.
2364

2365
  Note: Only boolean return values are supported.
2366

2367
  @type fn: callable
2368
  @param fn: Function to be called
2369
  @rtype: bool
2370
  @return: Function's result
2371

2372
  """
2373
  pid = os.fork()
2374
  if pid == 0:
2375
    # Child process
2376
    try:
2377
      # In case the function uses temporary files
2378
      ResetTempfileModule()
2379

    
2380
      # Call function
2381
      result = int(bool(fn(*args)))
2382
      assert result in (0, 1)
2383
    except: # pylint: disable-msg=W0702
2384
      logging.exception("Error while calling function in separate process")
2385
      # 0 and 1 are reserved for the return value
2386
      result = 33
2387

    
2388
    os._exit(result) # pylint: disable-msg=W0212
2389

    
2390
  # Parent process
2391

    
2392
  # Avoid zombies and check exit code
2393
  (_, status) = os.waitpid(pid, 0)
2394

    
2395
  if os.WIFSIGNALED(status):
2396
    exitcode = None
2397
    signum = os.WTERMSIG(status)
2398
  else:
2399
    exitcode = os.WEXITSTATUS(status)
2400
    signum = None
2401

    
2402
  if not (exitcode in (0, 1) and signum is None):
2403
    raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
2404
                              (exitcode, signum))
2405

    
2406
  return bool(exitcode)
2407

    
2408

    
2409
def LockedMethod(fn):
2410
  """Synchronized object access decorator.
2411

2412
  This decorator is intended to protect access to an object using the
2413
  object's own lock which is hardcoded to '_lock'.
2414

2415
  """
2416
  def _LockDebug(*args, **kwargs):
2417
    if debug_locks:
2418
      logging.debug(*args, **kwargs)
2419

    
2420
  def wrapper(self, *args, **kwargs):
2421
    # pylint: disable-msg=W0212
2422
    assert hasattr(self, '_lock')
2423
    lock = self._lock
2424
    _LockDebug("Waiting for %s", lock)
2425
    lock.acquire()
2426
    try:
2427
      _LockDebug("Acquired %s", lock)
2428
      result = fn(self, *args, **kwargs)
2429
    finally:
2430
      _LockDebug("Releasing %s", lock)
2431
      lock.release()
2432
      _LockDebug("Released %s", lock)
2433
    return result
2434
  return wrapper
2435

    
2436

    
2437
def LockFile(fd):
2438
  """Locks a file using POSIX locks.
2439

2440
  @type fd: int
2441
  @param fd: the file descriptor we need to lock
2442

2443
  """
2444
  try:
2445
    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
2446
  except IOError, err:
2447
    if err.errno == errno.EAGAIN:
2448
      raise errors.LockError("File already locked")
2449
    raise
2450

    
2451

    
2452
def FormatTime(val):
2453
  """Formats a time value.
2454

2455
  @type val: float or None
2456
  @param val: the timestamp as returned by time.time()
2457
  @return: a string value or N/A if we don't have a valid timestamp
2458

2459
  """
2460
  if val is None or not isinstance(val, (int, float)):
2461
    return "N/A"
2462
  # these two codes works on Linux, but they are not guaranteed on all
2463
  # platforms
2464
  return time.strftime("%F %T", time.localtime(val))
2465

    
2466

    
2467
def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
2468
  """Reads the watcher pause file.
2469

2470
  @type filename: string
2471
  @param filename: Path to watcher pause file
2472
  @type now: None, float or int
2473
  @param now: Current time as Unix timestamp
2474
  @type remove_after: int
2475
  @param remove_after: Remove watcher pause file after specified amount of
2476
    seconds past the pause end time
2477

2478
  """
2479
  if now is None:
2480
    now = time.time()
2481

    
2482
  try:
2483
    value = ReadFile(filename)
2484
  except IOError, err:
2485
    if err.errno != errno.ENOENT:
2486
      raise
2487
    value = None
2488

    
2489
  if value is not None:
2490
    try:
2491
      value = int(value)
2492
    except ValueError:
2493
      logging.warning(("Watcher pause file (%s) contains invalid value,"
2494
                       " removing it"), filename)
2495
      RemoveFile(filename)
2496
      value = None
2497

    
2498
    if value is not None:
2499
      # Remove file if it's outdated
2500
      if now > (value + remove_after):
2501
        RemoveFile(filename)
2502
        value = None
2503

    
2504
      elif now > value:
2505
        value = None
2506

    
2507
  return value
2508

    
2509

    
2510
class RetryTimeout(Exception):
2511
  """Retry loop timed out.
2512

2513
  Any arguments which was passed by the retried function to RetryAgain will be
2514
  preserved in RetryTimeout, if it is raised. If such argument was an exception
2515
  the RaiseInner helper method will reraise it.
2516

2517
  """
2518
  def RaiseInner(self):
2519
    if self.args and isinstance(self.args[0], Exception):
2520
      raise self.args[0]
2521
    else:
2522
      raise RetryTimeout(*self.args)
2523

    
2524

    
2525
class RetryAgain(Exception):
2526
  """Retry again.
2527

2528
  Any arguments passed to RetryAgain will be preserved, if a timeout occurs, as
2529
  arguments to RetryTimeout. If an exception is passed, the RaiseInner() method
2530
  of the RetryTimeout() method can be used to reraise it.
2531

2532
  """
2533

    
2534

    
2535
class _RetryDelayCalculator(object):
2536
  """Calculator for increasing delays.
2537

2538
  """
2539
  __slots__ = [
2540
    "_factor",
2541
    "_limit",
2542
    "_next",
2543
    "_start",
2544
    ]
2545

    
2546
  def __init__(self, start, factor, limit):
2547
    """Initializes this class.
2548

2549
    @type start: float
2550
    @param start: Initial delay
2551
    @type factor: float
2552
    @param factor: Factor for delay increase
2553
    @type limit: float or None
2554
    @param limit: Upper limit for delay or None for no limit
2555

2556
    """
2557
    assert start > 0.0
2558
    assert factor >= 1.0
2559
    assert limit is None or limit >= 0.0
2560

    
2561
    self._start = start
2562
    self._factor = factor
2563
    self._limit = limit
2564

    
2565
    self._next = start
2566

    
2567
  def __call__(self):
2568
    """Returns current delay and calculates the next one.
2569

2570
    """
2571
    current = self._next
2572

    
2573
    # Update for next run
2574
    if self._limit is None or self._next < self._limit:
2575
      self._next = min(self._limit, self._next * self._factor)
2576

    
2577
    return current
2578

    
2579

    
2580
#: Special delay to specify whole remaining timeout
2581
RETRY_REMAINING_TIME = object()
2582

    
2583

    
2584
def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
2585
          _time_fn=time.time):
2586
  """Call a function repeatedly until it succeeds.
2587

2588
  The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
2589
  anymore. Between calls a delay, specified by C{delay}, is inserted. After a
2590
  total of C{timeout} seconds, this function throws L{RetryTimeout}.
2591

2592
  C{delay} can be one of the following:
2593
    - callable returning the delay length as a float
2594
    - Tuple of (start, factor, limit)
2595
    - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
2596
      useful when overriding L{wait_fn} to wait for an external event)
2597
    - A static delay as a number (int or float)
2598

2599
  @type fn: callable
2600
  @param fn: Function to be called
2601
  @param delay: Either a callable (returning the delay), a tuple of (start,
2602
                factor, limit) (see L{_RetryDelayCalculator}),
2603
                L{RETRY_REMAINING_TIME} or a number (int or float)
2604
  @type timeout: float
2605
  @param timeout: Total timeout
2606
  @type wait_fn: callable
2607
  @param wait_fn: Waiting function
2608
  @return: Return value of function
2609

2610
  """
2611
  assert callable(fn)
2612
  assert callable(wait_fn)
2613
  assert callable(_time_fn)
2614

    
2615
  if args is None:
2616
    args = []
2617

    
2618
  end_time = _time_fn() + timeout
2619

    
2620
  if callable(delay):
2621
    # External function to calculate delay
2622
    calc_delay = delay
2623

    
2624
  elif isinstance(delay, (tuple, list)):
2625
    # Increasing delay with optional upper boundary
2626
    (start, factor, limit) = delay
2627
    calc_delay = _RetryDelayCalculator(start, factor, limit)
2628

    
2629
  elif delay is RETRY_REMAINING_TIME:
2630
    # Always use the remaining time
2631
    calc_delay = None
2632

    
2633
  else:
2634
    # Static delay
2635
    calc_delay = lambda: delay
2636

    
2637
  assert calc_delay is None or callable(calc_delay)
2638

    
2639
  while True:
2640
    retry_args = []
2641
    try:
2642
      # pylint: disable-msg=W0142
2643
      return fn(*args)
2644
    except RetryAgain, err:
2645
      retry_args = err.args
2646
    except RetryTimeout:
2647
      raise errors.ProgrammerError("Nested retry loop detected that didn't"
2648
                                   " handle RetryTimeout")
2649

    
2650
    remaining_time = end_time - _time_fn()
2651

    
2652
    if remaining_time < 0.0:
2653
      # pylint: disable-msg=W0142
2654
      raise RetryTimeout(*retry_args)
2655

    
2656
    assert remaining_time >= 0.0
2657

    
2658
    if calc_delay is None:
2659
      wait_fn(remaining_time)
2660
    else:
2661
      current_delay = calc_delay()
2662
      if current_delay > 0.0:
2663
        wait_fn(current_delay)
2664

    
2665

    
2666
class FileLock(object):
2667
  """Utility class for file locks.
2668

2669
  """
2670
  def __init__(self, fd, filename):
2671
    """Constructor for FileLock.
2672

2673
    @type fd: file
2674
    @param fd: File object
2675
    @type filename: str
2676
    @param filename: Path of the file opened at I{fd}
2677

2678
    """
2679
    self.fd = fd
2680
    self.filename = filename
2681

    
2682
  @classmethod
2683
  def Open(cls, filename):
2684
    """Creates and opens a file to be used as a file-based lock.
2685

2686
    @type filename: string
2687
    @param filename: path to the file to be locked
2688

2689
    """
2690
    # Using "os.open" is necessary to allow both opening existing file
2691
    # read/write and creating if not existing. Vanilla "open" will truncate an
2692
    # existing file -or- allow creating if not existing.
2693
    return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
2694
               filename)
2695

    
2696
  def __del__(self):
2697
    self.Close()
2698

    
2699
  def Close(self):
2700
    """Close the file and release the lock.
2701

2702
    """
2703
    if hasattr(self, "fd") and self.fd:
2704
      self.fd.close()
2705
      self.fd = None
2706

    
2707
  def _flock(self, flag, blocking, timeout, errmsg):
2708
    """Wrapper for fcntl.flock.
2709

2710
    @type flag: int
2711
    @param flag: operation flag
2712
    @type blocking: bool
2713
    @param blocking: whether the operation should be done in blocking mode.
2714
    @type timeout: None or float
2715
    @param timeout: for how long the operation should be retried (implies
2716
                    non-blocking mode).
2717
    @type errmsg: string
2718
    @param errmsg: error message in case operation fails.
2719

2720
    """
2721
    assert self.fd, "Lock was closed"
2722
    assert timeout is None or timeout >= 0, \
2723
      "If specified, timeout must be positive"
2724
    assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
2725

    
2726
    # When a timeout is used, LOCK_NB must always be set
2727
    if not (timeout is None and blocking):
2728
      flag |= fcntl.LOCK_NB
2729

    
2730
    if timeout is None:
2731
      self._Lock(self.fd, flag, timeout)
2732
    else:
2733
      try:
2734
        Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
2735
              args=(self.fd, flag, timeout))
2736
      except RetryTimeout:
2737
        raise errors.LockError(errmsg)
2738

    
2739
  @staticmethod
2740
  def _Lock(fd, flag, timeout):
2741
    try:
2742
      fcntl.flock(fd, flag)
2743
    except IOError, err:
2744
      if timeout is not None and err.errno == errno.EAGAIN:
2745
        raise RetryAgain()
2746

    
2747
      logging.exception("fcntl.flock failed")
2748
      raise
2749

    
2750
  def Exclusive(self, blocking=False, timeout=None):
2751
    """Locks the file in exclusive mode.
2752

2753
    @type blocking: boolean
2754
    @param blocking: whether to block and wait until we
2755
        can lock the file or return immediately
2756
    @type timeout: int or None
2757
    @param timeout: if not None, the duration to wait for the lock
2758
        (in blocking mode)
2759

2760
    """
2761
    self._flock(fcntl.LOCK_EX, blocking, timeout,
2762
                "Failed to lock %s in exclusive mode" % self.filename)
2763

    
2764
  def Shared(self, blocking=False, timeout=None):
2765
    """Locks the file in shared mode.
2766

2767
    @type blocking: boolean
2768
    @param blocking: whether to block and wait until we
2769
        can lock the file or return immediately
2770
    @type timeout: int or None
2771
    @param timeout: if not None, the duration to wait for the lock
2772
        (in blocking mode)
2773

2774
    """
2775
    self._flock(fcntl.LOCK_SH, blocking, timeout,
2776
                "Failed to lock %s in shared mode" % self.filename)
2777

    
2778
  def Unlock(self, blocking=True, timeout=None):
2779
    """Unlocks the file.
2780

2781
    According to C{flock(2)}, unlocking can also be a nonblocking
2782
    operation::
2783

2784
      To make a non-blocking request, include LOCK_NB with any of the above
2785
      operations.
2786

2787
    @type blocking: boolean
2788
    @param blocking: whether to block and wait until we
2789
        can lock the file or return immediately
2790
    @type timeout: int or None
2791
    @param timeout: if not None, the duration to wait for the lock
2792
        (in blocking mode)
2793

2794
    """
2795
    self._flock(fcntl.LOCK_UN, blocking, timeout,
2796
                "Failed to unlock %s" % self.filename)
2797

    
2798

    
2799
class LineSplitter:
2800
  """Splits data chunks into lines separated by newline.
2801

2802
  Instances provide a file-like interface.
2803

2804
  """
2805
  def __init__(self, line_fn, *args):
2806
    """Initializes this class.
2807

2808
    @type line_fn: callable
2809
    @param line_fn: Function called for each line, first parameter is line
2810
    @param args: Extra arguments for L{line_fn}
2811

2812
    """
2813
    assert callable(line_fn)
2814

    
2815
    if args:
2816
      # Python 2.4 doesn't have functools.partial yet
2817
      self._line_fn = \
2818
        lambda line: line_fn(line, *args) # pylint: disable-msg=W0142
2819
    else:
2820
      self._line_fn = line_fn
2821

    
2822
    self._lines = collections.deque()
2823
    self._buffer = ""
2824

    
2825
  def write(self, data):
2826
    parts = (self._buffer + data).split("\n")
2827
    self._buffer = parts.pop()
2828
    self._lines.extend(parts)
2829

    
2830
  def flush(self):
2831
    while self._lines:
2832
      self._line_fn(self._lines.popleft().rstrip("\r\n"))
2833

    
2834
  def close(self):
2835
    self.flush()
2836
    if self._buffer:
2837
      self._line_fn(self._buffer)
2838

    
2839

    
2840
def SignalHandled(signums):
2841
  """Signal Handled decoration.
2842

2843
  This special decorator installs a signal handler and then calls the target
2844
  function. The function must accept a 'signal_handlers' keyword argument,
2845
  which will contain a dict indexed by signal number, with SignalHandler
2846
  objects as values.
2847

2848
  The decorator can be safely stacked with iself, to handle multiple signals
2849
  with different handlers.
2850

2851
  @type signums: list
2852
  @param signums: signals to intercept
2853

2854
  """
2855
  def wrap(fn):
2856
    def sig_function(*args, **kwargs):
2857
      assert 'signal_handlers' not in kwargs or \
2858
             kwargs['signal_handlers'] is None or \
2859
             isinstance(kwargs['signal_handlers'], dict), \
2860
             "Wrong signal_handlers parameter in original function call"
2861
      if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
2862
        signal_handlers = kwargs['signal_handlers']
2863
      else:
2864
        signal_handlers = {}
2865
        kwargs['signal_handlers'] = signal_handlers
2866
      sighandler = SignalHandler(signums)
2867
      try:
2868
        for sig in signums:
2869
          signal_handlers[sig] = sighandler
2870
        return fn(*args, **kwargs)
2871
      finally:
2872
        sighandler.Reset()
2873
    return sig_function
2874
  return wrap
2875

    
2876

    
2877
class SignalHandler(object):
2878
  """Generic signal handler class.
2879

2880
  It automatically restores the original handler when deconstructed or
2881
  when L{Reset} is called. You can either pass your own handler
2882
  function in or query the L{called} attribute to detect whether the
2883
  signal was sent.
2884

2885
  @type signum: list
2886
  @ivar signum: the signals we handle
2887
  @type called: boolean
2888
  @ivar called: tracks whether any of the signals have been raised
2889

2890
  """
2891
  def __init__(self, signum):
2892
    """Constructs a new SignalHandler instance.
2893

2894
    @type signum: int or list of ints
2895
    @param signum: Single signal number or set of signal numbers
2896

2897
    """
2898
    self.signum = set(signum)
2899
    self.called = False
2900

    
2901
    self._previous = {}
2902
    try:
2903
      for signum in self.signum:
2904
        # Setup handler
2905
        prev_handler = signal.signal(signum, self._HandleSignal)
2906
        try:
2907
          self._previous[signum] = prev_handler
2908
        except:
2909
          # Restore previous handler
2910
          signal.signal(signum, prev_handler)
2911
          raise
2912
    except:
2913
      # Reset all handlers
2914
      self.Reset()
2915
      # Here we have a race condition: a handler may have already been called,
2916
      # but there's not much we can do about it at this point.
2917
      raise
2918

    
2919
  def __del__(self):
2920
    self.Reset()
2921

    
2922
  def Reset(self):
2923
    """Restore previous handler.
2924

2925
    This will reset all the signals to their previous handlers.
2926

2927
    """
2928
    for signum, prev_handler in self._previous.items():
2929
      signal.signal(signum, prev_handler)
2930
      # If successful, remove from dict
2931
      del self._previous[signum]
2932

    
2933
  def Clear(self):
2934
    """Unsets the L{called} flag.
2935

2936
    This function can be used in case a signal may arrive several times.
2937

2938
    """
2939
    self.called = False
2940

    
2941
  # we don't care about arguments, but we leave them named for the future
2942
  def _HandleSignal(self, signum, frame): # pylint: disable-msg=W0613
2943
    """Actual signal handling function.
2944

2945
    """
2946
    # This is not nice and not absolutely atomic, but it appears to be the only
2947
    # solution in Python -- there are no atomic types.
2948
    self.called = True
2949

    
2950

    
2951
class FieldSet(object):
2952
  """A simple field set.
2953

2954
  Among the features are:
2955
    - checking if a string is among a list of static string or regex objects
2956
    - checking if a whole list of string matches
2957
    - returning the matching groups from a regex match
2958

2959
  Internally, all fields are held as regular expression objects.
2960

2961
  """
2962
  def __init__(self, *items):
2963
    self.items = [re.compile("^%s$" % value) for value in items]
2964

    
2965
  def Extend(self, other_set):
2966
    """Extend the field set with the items from another one"""
2967
    self.items.extend(other_set.items)
2968

    
2969
  def Matches(self, field):
2970
    """Checks if a field matches the current set
2971

2972
    @type field: str
2973
    @param field: the string to match
2974
    @return: either None or a regular expression match object
2975

2976
    """
2977
    for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
2978
      return m
2979
    return None
2980

    
2981
  def NonMatching(self, items):
2982
    """Returns the list of fields not matching the current set
2983

2984
    @type items: list
2985
    @param items: the list of fields to check
2986
    @rtype: list
2987
    @return: list of non-matching fields
2988

2989
    """
2990
    return [val for val in items if not self.Matches(val)]