Statistics
| Branch: | Tag: | Revision:

root / lib / utils.py @ 1d466a4f

History | View | Annotate | Download (75.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti utility module.
23

24
This module holds functions that can be used in both daemons (all) and
25
the command line scripts.
26

27
"""
28

    
29

    
30
import os
31
import time
32
import subprocess
33
import re
34
import socket
35
import tempfile
36
import shutil
37
import errno
38
import pwd
39
import itertools
40
import select
41
import fcntl
42
import resource
43
import logging
44
import logging.handlers
45
import signal
46
import datetime
47
import calendar
48

    
49
from cStringIO import StringIO
50

    
51
try:
52
  from hashlib import sha1
53
except ImportError:
54
  import sha
55
  sha1 = sha.new
56

    
57
from ganeti import errors
58
from ganeti import constants
59

    
60

    
61
_locksheld = []
62
_re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
63

    
64
debug_locks = False
65

    
66
#: when set to True, L{RunCmd} is disabled
67
no_fork = False
68

    
69
_RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
70

    
71

    
72
class RunResult(object):
73
  """Holds the result of running external programs.
74

75
  @type exit_code: int
76
  @ivar exit_code: the exit code of the program, or None (if the program
77
      didn't exit())
78
  @type signal: int or None
79
  @ivar signal: the signal that caused the program to finish, or None
80
      (if the program wasn't terminated by a signal)
81
  @type stdout: str
82
  @ivar stdout: the standard output of the program
83
  @type stderr: str
84
  @ivar stderr: the standard error of the program
85
  @type failed: boolean
86
  @ivar failed: True in case the program was
87
      terminated by a signal or exited with a non-zero exit code
88
  @ivar fail_reason: a string detailing the termination reason
89

90
  """
91
  __slots__ = ["exit_code", "signal", "stdout", "stderr",
92
               "failed", "fail_reason", "cmd"]
93

    
94

    
95
  def __init__(self, exit_code, signal_, stdout, stderr, cmd):
96
    self.cmd = cmd
97
    self.exit_code = exit_code
98
    self.signal = signal_
99
    self.stdout = stdout
100
    self.stderr = stderr
101
    self.failed = (signal_ is not None or exit_code != 0)
102

    
103
    if self.signal is not None:
104
      self.fail_reason = "terminated by signal %s" % self.signal
105
    elif self.exit_code is not None:
106
      self.fail_reason = "exited with exit code %s" % self.exit_code
107
    else:
108
      self.fail_reason = "unable to determine termination reason"
109

    
110
    if self.failed:
111
      logging.debug("Command '%s' failed (%s); output: %s",
112
                    self.cmd, self.fail_reason, self.output)
113

    
114
  def _GetOutput(self):
115
    """Returns the combined stdout and stderr for easier usage.
116

117
    """
118
    return self.stdout + self.stderr
119

    
120
  output = property(_GetOutput, None, None, "Return full output")
121

    
122

    
123
def RunCmd(cmd, env=None, output=None, cwd='/', reset_env=False):
124
  """Execute a (shell) command.
125

126
  The command should not read from its standard input, as it will be
127
  closed.
128

129
  @type cmd: string or list
130
  @param cmd: Command to run
131
  @type env: dict
132
  @param env: Additional environment
133
  @type output: str
134
  @param output: if desired, the output of the command can be
135
      saved in a file instead of the RunResult instance; this
136
      parameter denotes the file name (if not None)
137
  @type cwd: string
138
  @param cwd: if specified, will be used as the working
139
      directory for the command; the default will be /
140
  @type reset_env: boolean
141
  @param reset_env: whether to reset or keep the default os environment
142
  @rtype: L{RunResult}
143
  @return: RunResult instance
144
  @raise errors.ProgrammerError: if we call this when forks are disabled
145

146
  """
147
  if no_fork:
148
    raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
149

    
150
  if isinstance(cmd, list):
151
    cmd = [str(val) for val in cmd]
152
    strcmd = " ".join(cmd)
153
    shell = False
154
  else:
155
    strcmd = cmd
156
    shell = True
157
  logging.debug("RunCmd '%s'", strcmd)
158

    
159
  if not reset_env:
160
    cmd_env = os.environ.copy()
161
    cmd_env["LC_ALL"] = "C"
162
  else:
163
    cmd_env = {}
164

    
165
  if env is not None:
166
    cmd_env.update(env)
167

    
168
  try:
169
    if output is None:
170
      out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd)
171
    else:
172
      status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
173
      out = err = ""
174
  except OSError, err:
175
    if err.errno == errno.ENOENT:
176
      raise errors.OpExecError("Can't execute '%s': not found (%s)" %
177
                               (strcmd, err))
178
    else:
179
      raise
180

    
181
  if status >= 0:
182
    exitcode = status
183
    signal_ = None
184
  else:
185
    exitcode = None
186
    signal_ = -status
187

    
188
  return RunResult(exitcode, signal_, out, err, strcmd)
189

    
190

    
191
def _RunCmdPipe(cmd, env, via_shell, cwd):
192
  """Run a command and return its output.
193

194
  @type  cmd: string or list
195
  @param cmd: Command to run
196
  @type env: dict
197
  @param env: The environment to use
198
  @type via_shell: bool
199
  @param via_shell: if we should run via the shell
200
  @type cwd: string
201
  @param cwd: the working directory for the program
202
  @rtype: tuple
203
  @return: (out, err, status)
204

205
  """
206
  poller = select.poll()
207
  child = subprocess.Popen(cmd, shell=via_shell,
208
                           stderr=subprocess.PIPE,
209
                           stdout=subprocess.PIPE,
210
                           stdin=subprocess.PIPE,
211
                           close_fds=True, env=env,
212
                           cwd=cwd)
213

    
214
  child.stdin.close()
215
  poller.register(child.stdout, select.POLLIN)
216
  poller.register(child.stderr, select.POLLIN)
217
  out = StringIO()
218
  err = StringIO()
219
  fdmap = {
220
    child.stdout.fileno(): (out, child.stdout),
221
    child.stderr.fileno(): (err, child.stderr),
222
    }
223
  for fd in fdmap:
224
    status = fcntl.fcntl(fd, fcntl.F_GETFL)
225
    fcntl.fcntl(fd, fcntl.F_SETFL, status | os.O_NONBLOCK)
226

    
227
  while fdmap:
228
    try:
229
      pollresult = poller.poll()
230
    except EnvironmentError, eerr:
231
      if eerr.errno == errno.EINTR:
232
        continue
233
      raise
234
    except select.error, serr:
235
      if serr[0] == errno.EINTR:
236
        continue
237
      raise
238

    
239
    for fd, event in pollresult:
240
      if event & select.POLLIN or event & select.POLLPRI:
241
        data = fdmap[fd][1].read()
242
        # no data from read signifies EOF (the same as POLLHUP)
243
        if not data:
244
          poller.unregister(fd)
245
          del fdmap[fd]
246
          continue
247
        fdmap[fd][0].write(data)
248
      if (event & select.POLLNVAL or event & select.POLLHUP or
249
          event & select.POLLERR):
250
        poller.unregister(fd)
251
        del fdmap[fd]
252

    
253
  out = out.getvalue()
254
  err = err.getvalue()
255

    
256
  status = child.wait()
257
  return out, err, status
258

    
259

    
260
def _RunCmdFile(cmd, env, via_shell, output, cwd):
261
  """Run a command and save its output to a file.
262

263
  @type  cmd: string or list
264
  @param cmd: Command to run
265
  @type env: dict
266
  @param env: The environment to use
267
  @type via_shell: bool
268
  @param via_shell: if we should run via the shell
269
  @type output: str
270
  @param output: the filename in which to save the output
271
  @type cwd: string
272
  @param cwd: the working directory for the program
273
  @rtype: int
274
  @return: the exit status
275

276
  """
277
  fh = open(output, "a")
278
  try:
279
    child = subprocess.Popen(cmd, shell=via_shell,
280
                             stderr=subprocess.STDOUT,
281
                             stdout=fh,
282
                             stdin=subprocess.PIPE,
283
                             close_fds=True, env=env,
284
                             cwd=cwd)
285

    
286
    child.stdin.close()
287
    status = child.wait()
288
  finally:
289
    fh.close()
290
  return status
291

    
292

    
293
def RunParts(dir_name, env=None, reset_env=False):
294
  """Run Scripts or programs in a directory
295

296
  @type dir_name: string
297
  @param dir_name: absolute path to a directory
298
  @type env: dict
299
  @param env: The environment to use
300
  @type reset_env: boolean
301
  @param reset_env: whether to reset or keep the default os environment
302
  @rtype: list of tuples
303
  @return: list of (name, (one of RUNDIR_STATUS), RunResult)
304

305
  """
306
  rr = []
307

    
308
  try:
309
    dir_contents = ListVisibleFiles(dir_name)
310
  except OSError, err:
311
    logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
312
    return rr
313

    
314
  for relname in sorted(dir_contents):
315
    fname = PathJoin(dir_name, relname)
316
    if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
317
            constants.EXT_PLUGIN_MASK.match(relname) is not None):
318
      rr.append((relname, constants.RUNPARTS_SKIP, None))
319
    else:
320
      try:
321
        result = RunCmd([fname], env=env, reset_env=reset_env)
322
      except Exception, err: # pylint: disable-msg=W0703
323
        rr.append((relname, constants.RUNPARTS_ERR, str(err)))
324
      else:
325
        rr.append((relname, constants.RUNPARTS_RUN, result))
326

    
327
  return rr
328

    
329

    
330
def RemoveFile(filename):
331
  """Remove a file ignoring some errors.
332

333
  Remove a file, ignoring non-existing ones or directories. Other
334
  errors are passed.
335

336
  @type filename: str
337
  @param filename: the file to be removed
338

339
  """
340
  try:
341
    os.unlink(filename)
342
  except OSError, err:
343
    if err.errno not in (errno.ENOENT, errno.EISDIR):
344
      raise
345

    
346

    
347
def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
348
  """Renames a file.
349

350
  @type old: string
351
  @param old: Original path
352
  @type new: string
353
  @param new: New path
354
  @type mkdir: bool
355
  @param mkdir: Whether to create target directory if it doesn't exist
356
  @type mkdir_mode: int
357
  @param mkdir_mode: Mode for newly created directories
358

359
  """
360
  try:
361
    return os.rename(old, new)
362
  except OSError, err:
363
    # In at least one use case of this function, the job queue, directory
364
    # creation is very rare. Checking for the directory before renaming is not
365
    # as efficient.
366
    if mkdir and err.errno == errno.ENOENT:
367
      # Create directory and try again
368
      dirname = os.path.dirname(new)
369
      try:
370
        os.makedirs(dirname, mode=mkdir_mode)
371
      except OSError, err:
372
        # Ignore EEXIST. This is only handled in os.makedirs as included in
373
        # Python 2.5 and above.
374
        if err.errno != errno.EEXIST or not os.path.exists(dirname):
375
          raise
376

    
377
      return os.rename(old, new)
378

    
379
    raise
380

    
381

    
382
def ResetTempfileModule():
383
  """Resets the random name generator of the tempfile module.
384

385
  This function should be called after C{os.fork} in the child process to
386
  ensure it creates a newly seeded random generator. Otherwise it would
387
  generate the same random parts as the parent process. If several processes
388
  race for the creation of a temporary file, this could lead to one not getting
389
  a temporary name.
390

391
  """
392
  # pylint: disable-msg=W0212
393
  if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
394
    tempfile._once_lock.acquire()
395
    try:
396
      # Reset random name generator
397
      tempfile._name_sequence = None
398
    finally:
399
      tempfile._once_lock.release()
400
  else:
401
    logging.critical("The tempfile module misses at least one of the"
402
                     " '_once_lock' and '_name_sequence' attributes")
403

    
404

    
405
def _FingerprintFile(filename):
406
  """Compute the fingerprint of a file.
407

408
  If the file does not exist, a None will be returned
409
  instead.
410

411
  @type filename: str
412
  @param filename: the filename to checksum
413
  @rtype: str
414
  @return: the hex digest of the sha checksum of the contents
415
      of the file
416

417
  """
418
  if not (os.path.exists(filename) and os.path.isfile(filename)):
419
    return None
420

    
421
  f = open(filename)
422

    
423
  fp = sha1()
424
  while True:
425
    data = f.read(4096)
426
    if not data:
427
      break
428

    
429
    fp.update(data)
430

    
431
  return fp.hexdigest()
432

    
433

    
434
def FingerprintFiles(files):
435
  """Compute fingerprints for a list of files.
436

437
  @type files: list
438
  @param files: the list of filename to fingerprint
439
  @rtype: dict
440
  @return: a dictionary filename: fingerprint, holding only
441
      existing files
442

443
  """
444
  ret = {}
445

    
446
  for filename in files:
447
    cksum = _FingerprintFile(filename)
448
    if cksum:
449
      ret[filename] = cksum
450

    
451
  return ret
452

    
453

    
454
def ForceDictType(target, key_types, allowed_values=None):
455
  """Force the values of a dict to have certain types.
456

457
  @type target: dict
458
  @param target: the dict to update
459
  @type key_types: dict
460
  @param key_types: dict mapping target dict keys to types
461
                    in constants.ENFORCEABLE_TYPES
462
  @type allowed_values: list
463
  @keyword allowed_values: list of specially allowed values
464

465
  """
466
  if allowed_values is None:
467
    allowed_values = []
468

    
469
  if not isinstance(target, dict):
470
    msg = "Expected dictionary, got '%s'" % target
471
    raise errors.TypeEnforcementError(msg)
472

    
473
  for key in target:
474
    if key not in key_types:
475
      msg = "Unknown key '%s'" % key
476
      raise errors.TypeEnforcementError(msg)
477

    
478
    if target[key] in allowed_values:
479
      continue
480

    
481
    ktype = key_types[key]
482
    if ktype not in constants.ENFORCEABLE_TYPES:
483
      msg = "'%s' has non-enforceable type %s" % (key, ktype)
484
      raise errors.ProgrammerError(msg)
485

    
486
    if ktype == constants.VTYPE_STRING:
487
      if not isinstance(target[key], basestring):
488
        if isinstance(target[key], bool) and not target[key]:
489
          target[key] = ''
490
        else:
491
          msg = "'%s' (value %s) is not a valid string" % (key, target[key])
492
          raise errors.TypeEnforcementError(msg)
493
    elif ktype == constants.VTYPE_BOOL:
494
      if isinstance(target[key], basestring) and target[key]:
495
        if target[key].lower() == constants.VALUE_FALSE:
496
          target[key] = False
497
        elif target[key].lower() == constants.VALUE_TRUE:
498
          target[key] = True
499
        else:
500
          msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
501
          raise errors.TypeEnforcementError(msg)
502
      elif target[key]:
503
        target[key] = True
504
      else:
505
        target[key] = False
506
    elif ktype == constants.VTYPE_SIZE:
507
      try:
508
        target[key] = ParseUnit(target[key])
509
      except errors.UnitParseError, err:
510
        msg = "'%s' (value %s) is not a valid size. error: %s" % \
511
              (key, target[key], err)
512
        raise errors.TypeEnforcementError(msg)
513
    elif ktype == constants.VTYPE_INT:
514
      try:
515
        target[key] = int(target[key])
516
      except (ValueError, TypeError):
517
        msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
518
        raise errors.TypeEnforcementError(msg)
519

    
520

    
521
def IsProcessAlive(pid):
522
  """Check if a given pid exists on the system.
523

524
  @note: zombie status is not handled, so zombie processes
525
      will be returned as alive
526
  @type pid: int
527
  @param pid: the process ID to check
528
  @rtype: boolean
529
  @return: True if the process exists
530

531
  """
532
  if pid <= 0:
533
    return False
534

    
535
  try:
536
    os.stat("/proc/%d/status" % pid)
537
    return True
538
  except EnvironmentError, err:
539
    if err.errno in (errno.ENOENT, errno.ENOTDIR):
540
      return False
541
    raise
542

    
543

    
544
def ReadPidFile(pidfile):
545
  """Read a pid from a file.
546

547
  @type  pidfile: string
548
  @param pidfile: path to the file containing the pid
549
  @rtype: int
550
  @return: The process id, if the file exists and contains a valid PID,
551
           otherwise 0
552

553
  """
554
  try:
555
    raw_data = ReadFile(pidfile)
556
  except EnvironmentError, err:
557
    if err.errno != errno.ENOENT:
558
      logging.exception("Can't read pid file")
559
    return 0
560

    
561
  try:
562
    pid = int(raw_data)
563
  except (TypeError, ValueError), err:
564
    logging.info("Can't parse pid file contents", exc_info=True)
565
    return 0
566

    
567
  return pid
568

    
569

    
570
def MatchNameComponent(key, name_list, case_sensitive=True):
571
  """Try to match a name against a list.
572

573
  This function will try to match a name like test1 against a list
574
  like C{['test1.example.com', 'test2.example.com', ...]}. Against
575
  this list, I{'test1'} as well as I{'test1.example'} will match, but
576
  not I{'test1.ex'}. A multiple match will be considered as no match
577
  at all (e.g. I{'test1'} against C{['test1.example.com',
578
  'test1.example.org']}), except when the key fully matches an entry
579
  (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
580

581
  @type key: str
582
  @param key: the name to be searched
583
  @type name_list: list
584
  @param name_list: the list of strings against which to search the key
585
  @type case_sensitive: boolean
586
  @param case_sensitive: whether to provide a case-sensitive match
587

588
  @rtype: None or str
589
  @return: None if there is no match I{or} if there are multiple matches,
590
      otherwise the element from the list which matches
591

592
  """
593
  if key in name_list:
594
    return key
595

    
596
  re_flags = 0
597
  if not case_sensitive:
598
    re_flags |= re.IGNORECASE
599
    key = key.upper()
600
  mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
601
  names_filtered = []
602
  string_matches = []
603
  for name in name_list:
604
    if mo.match(name) is not None:
605
      names_filtered.append(name)
606
      if not case_sensitive and key == name.upper():
607
        string_matches.append(name)
608

    
609
  if len(string_matches) == 1:
610
    return string_matches[0]
611
  if len(names_filtered) == 1:
612
    return names_filtered[0]
613
  return None
614

    
615

    
616
class HostInfo:
617
  """Class implementing resolver and hostname functionality
618

619
  """
620
  _VALID_NAME_RE = re.compile("^[a-z0-9._-]{1,255}$")
621

    
622
  def __init__(self, name=None):
623
    """Initialize the host name object.
624

625
    If the name argument is not passed, it will use this system's
626
    name.
627

628
    """
629
    if name is None:
630
      name = self.SysName()
631

    
632
    self.query = name
633
    self.name, self.aliases, self.ipaddrs = self.LookupHostname(name)
634
    self.ip = self.ipaddrs[0]
635

    
636
  def ShortName(self):
637
    """Returns the hostname without domain.
638

639
    """
640
    return self.name.split('.')[0]
641

    
642
  @staticmethod
643
  def SysName():
644
    """Return the current system's name.
645

646
    This is simply a wrapper over C{socket.gethostname()}.
647

648
    """
649
    return socket.gethostname()
650

    
651
  @staticmethod
652
  def LookupHostname(hostname):
653
    """Look up hostname
654

655
    @type hostname: str
656
    @param hostname: hostname to look up
657

658
    @rtype: tuple
659
    @return: a tuple (name, aliases, ipaddrs) as returned by
660
        C{socket.gethostbyname_ex}
661
    @raise errors.ResolverError: in case of errors in resolving
662

663
    """
664
    try:
665
      result = socket.gethostbyname_ex(hostname)
666
    except socket.gaierror, err:
667
      # hostname not found in DNS
668
      raise errors.ResolverError(hostname, err.args[0], err.args[1])
669

    
670
    return result
671

    
672
  @classmethod
673
  def NormalizeName(cls, hostname):
674
    """Validate and normalize the given hostname.
675

676
    @attention: the validation is a bit more relaxed than the standards
677
        require; most importantly, we allow underscores in names
678
    @raise errors.OpPrereqError: when the name is not valid
679

680
    """
681
    hostname = hostname.lower()
682
    if (not cls._VALID_NAME_RE.match(hostname) or
683
        # double-dots, meaning empty label
684
        ".." in hostname or
685
        # empty initial label
686
        hostname.startswith(".")):
687
      raise errors.OpPrereqError("Invalid hostname '%s'" % hostname,
688
                                 errors.ECODE_INVAL)
689
    if hostname.endswith("."):
690
      hostname = hostname.rstrip(".")
691
    return hostname
692

    
693

    
694
def GetHostInfo(name=None):
695
  """Lookup host name and raise an OpPrereqError for failures"""
696

    
697
  try:
698
    return HostInfo(name)
699
  except errors.ResolverError, err:
700
    raise errors.OpPrereqError("The given name (%s) does not resolve: %s" %
701
                               (err[0], err[2]), errors.ECODE_RESOLVER)
702

    
703

    
704
def ListVolumeGroups():
705
  """List volume groups and their size
706

707
  @rtype: dict
708
  @return:
709
       Dictionary with keys volume name and values
710
       the size of the volume
711

712
  """
713
  command = "vgs --noheadings --units m --nosuffix -o name,size"
714
  result = RunCmd(command)
715
  retval = {}
716
  if result.failed:
717
    return retval
718

    
719
  for line in result.stdout.splitlines():
720
    try:
721
      name, size = line.split()
722
      size = int(float(size))
723
    except (IndexError, ValueError), err:
724
      logging.error("Invalid output from vgs (%s): %s", err, line)
725
      continue
726

    
727
    retval[name] = size
728

    
729
  return retval
730

    
731

    
732
def BridgeExists(bridge):
733
  """Check whether the given bridge exists in the system
734

735
  @type bridge: str
736
  @param bridge: the bridge name to check
737
  @rtype: boolean
738
  @return: True if it does
739

740
  """
741
  return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
742

    
743

    
744
def NiceSort(name_list):
745
  """Sort a list of strings based on digit and non-digit groupings.
746

747
  Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
748
  will sort the list in the logical order C{['a1', 'a2', 'a10',
749
  'a11']}.
750

751
  The sort algorithm breaks each name in groups of either only-digits
752
  or no-digits. Only the first eight such groups are considered, and
753
  after that we just use what's left of the string.
754

755
  @type name_list: list
756
  @param name_list: the names to be sorted
757
  @rtype: list
758
  @return: a copy of the name list sorted with our algorithm
759

760
  """
761
  _SORTER_BASE = "(\D+|\d+)"
762
  _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
763
                                                  _SORTER_BASE, _SORTER_BASE,
764
                                                  _SORTER_BASE, _SORTER_BASE,
765
                                                  _SORTER_BASE, _SORTER_BASE)
766
  _SORTER_RE = re.compile(_SORTER_FULL)
767
  _SORTER_NODIGIT = re.compile("^\D*$")
768
  def _TryInt(val):
769
    """Attempts to convert a variable to integer."""
770
    if val is None or _SORTER_NODIGIT.match(val):
771
      return val
772
    rval = int(val)
773
    return rval
774

    
775
  to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
776
             for name in name_list]
777
  to_sort.sort()
778
  return [tup[1] for tup in to_sort]
779

    
780

    
781
def TryConvert(fn, val):
782
  """Try to convert a value ignoring errors.
783

784
  This function tries to apply function I{fn} to I{val}. If no
785
  C{ValueError} or C{TypeError} exceptions are raised, it will return
786
  the result, else it will return the original value. Any other
787
  exceptions are propagated to the caller.
788

789
  @type fn: callable
790
  @param fn: function to apply to the value
791
  @param val: the value to be converted
792
  @return: The converted value if the conversion was successful,
793
      otherwise the original value.
794

795
  """
796
  try:
797
    nv = fn(val)
798
  except (ValueError, TypeError):
799
    nv = val
800
  return nv
801

    
802

    
803
def IsValidIP(ip):
804
  """Verifies the syntax of an IPv4 address.
805

806
  This function checks if the IPv4 address passes is valid or not based
807
  on syntax (not IP range, class calculations, etc.).
808

809
  @type ip: str
810
  @param ip: the address to be checked
811
  @rtype: a regular expression match object
812
  @return: a regular expression match object, or None if the
813
      address is not valid
814

815
  """
816
  unit = "(0|[1-9]\d{0,2})"
817
  #TODO: convert and return only boolean
818
  return re.match("^%s\.%s\.%s\.%s$" % (unit, unit, unit, unit), ip)
819

    
820

    
821
def IsValidShellParam(word):
822
  """Verifies is the given word is safe from the shell's p.o.v.
823

824
  This means that we can pass this to a command via the shell and be
825
  sure that it doesn't alter the command line and is passed as such to
826
  the actual command.
827

828
  Note that we are overly restrictive here, in order to be on the safe
829
  side.
830

831
  @type word: str
832
  @param word: the word to check
833
  @rtype: boolean
834
  @return: True if the word is 'safe'
835

836
  """
837
  return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
838

    
839

    
840
def BuildShellCmd(template, *args):
841
  """Build a safe shell command line from the given arguments.
842

843
  This function will check all arguments in the args list so that they
844
  are valid shell parameters (i.e. they don't contain shell
845
  metacharacters). If everything is ok, it will return the result of
846
  template % args.
847

848
  @type template: str
849
  @param template: the string holding the template for the
850
      string formatting
851
  @rtype: str
852
  @return: the expanded command line
853

854
  """
855
  for word in args:
856
    if not IsValidShellParam(word):
857
      raise errors.ProgrammerError("Shell argument '%s' contains"
858
                                   " invalid characters" % word)
859
  return template % args
860

    
861

    
862
def FormatUnit(value, units):
863
  """Formats an incoming number of MiB with the appropriate unit.
864

865
  @type value: int
866
  @param value: integer representing the value in MiB (1048576)
867
  @type units: char
868
  @param units: the type of formatting we should do:
869
      - 'h' for automatic scaling
870
      - 'm' for MiBs
871
      - 'g' for GiBs
872
      - 't' for TiBs
873
  @rtype: str
874
  @return: the formatted value (with suffix)
875

876
  """
877
  if units not in ('m', 'g', 't', 'h'):
878
    raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
879

    
880
  suffix = ''
881

    
882
  if units == 'm' or (units == 'h' and value < 1024):
883
    if units == 'h':
884
      suffix = 'M'
885
    return "%d%s" % (round(value, 0), suffix)
886

    
887
  elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
888
    if units == 'h':
889
      suffix = 'G'
890
    return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
891

    
892
  else:
893
    if units == 'h':
894
      suffix = 'T'
895
    return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
896

    
897

    
898
def ParseUnit(input_string):
899
  """Tries to extract number and scale from the given string.
900

901
  Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
902
  [UNIT]}. If no unit is specified, it defaults to MiB. Return value
903
  is always an int in MiB.
904

905
  """
906
  m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
907
  if not m:
908
    raise errors.UnitParseError("Invalid format")
909

    
910
  value = float(m.groups()[0])
911

    
912
  unit = m.groups()[1]
913
  if unit:
914
    lcunit = unit.lower()
915
  else:
916
    lcunit = 'm'
917

    
918
  if lcunit in ('m', 'mb', 'mib'):
919
    # Value already in MiB
920
    pass
921

    
922
  elif lcunit in ('g', 'gb', 'gib'):
923
    value *= 1024
924

    
925
  elif lcunit in ('t', 'tb', 'tib'):
926
    value *= 1024 * 1024
927

    
928
  else:
929
    raise errors.UnitParseError("Unknown unit: %s" % unit)
930

    
931
  # Make sure we round up
932
  if int(value) < value:
933
    value += 1
934

    
935
  # Round up to the next multiple of 4
936
  value = int(value)
937
  if value % 4:
938
    value += 4 - value % 4
939

    
940
  return value
941

    
942

    
943
def AddAuthorizedKey(file_name, key):
944
  """Adds an SSH public key to an authorized_keys file.
945

946
  @type file_name: str
947
  @param file_name: path to authorized_keys file
948
  @type key: str
949
  @param key: string containing key
950

951
  """
952
  key_fields = key.split()
953

    
954
  f = open(file_name, 'a+')
955
  try:
956
    nl = True
957
    for line in f:
958
      # Ignore whitespace changes
959
      if line.split() == key_fields:
960
        break
961
      nl = line.endswith('\n')
962
    else:
963
      if not nl:
964
        f.write("\n")
965
      f.write(key.rstrip('\r\n'))
966
      f.write("\n")
967
      f.flush()
968
  finally:
969
    f.close()
970

    
971

    
972
def RemoveAuthorizedKey(file_name, key):
973
  """Removes an SSH public key from an authorized_keys file.
974

975
  @type file_name: str
976
  @param file_name: path to authorized_keys file
977
  @type key: str
978
  @param key: string containing key
979

980
  """
981
  key_fields = key.split()
982

    
983
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
984
  try:
985
    out = os.fdopen(fd, 'w')
986
    try:
987
      f = open(file_name, 'r')
988
      try:
989
        for line in f:
990
          # Ignore whitespace changes while comparing lines
991
          if line.split() != key_fields:
992
            out.write(line)
993

    
994
        out.flush()
995
        os.rename(tmpname, file_name)
996
      finally:
997
        f.close()
998
    finally:
999
      out.close()
1000
  except:
1001
    RemoveFile(tmpname)
1002
    raise
1003

    
1004

    
1005
def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1006
  """Sets the name of an IP address and hostname in /etc/hosts.
1007

1008
  @type file_name: str
1009
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1010
  @type ip: str
1011
  @param ip: the IP address
1012
  @type hostname: str
1013
  @param hostname: the hostname to be added
1014
  @type aliases: list
1015
  @param aliases: the list of aliases to add for the hostname
1016

1017
  """
1018
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1019
  # Ensure aliases are unique
1020
  aliases = UniqueSequence([hostname] + aliases)[1:]
1021

    
1022
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1023
  try:
1024
    out = os.fdopen(fd, 'w')
1025
    try:
1026
      f = open(file_name, 'r')
1027
      try:
1028
        for line in f:
1029
          fields = line.split()
1030
          if fields and not fields[0].startswith('#') and ip == fields[0]:
1031
            continue
1032
          out.write(line)
1033

    
1034
        out.write("%s\t%s" % (ip, hostname))
1035
        if aliases:
1036
          out.write(" %s" % ' '.join(aliases))
1037
        out.write('\n')
1038

    
1039
        out.flush()
1040
        os.fsync(out)
1041
        os.chmod(tmpname, 0644)
1042
        os.rename(tmpname, file_name)
1043
      finally:
1044
        f.close()
1045
    finally:
1046
      out.close()
1047
  except:
1048
    RemoveFile(tmpname)
1049
    raise
1050

    
1051

    
1052
def AddHostToEtcHosts(hostname):
1053
  """Wrapper around SetEtcHostsEntry.
1054

1055
  @type hostname: str
1056
  @param hostname: a hostname that will be resolved and added to
1057
      L{constants.ETC_HOSTS}
1058

1059
  """
1060
  hi = HostInfo(name=hostname)
1061
  SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
1062

    
1063

    
1064
def RemoveEtcHostsEntry(file_name, hostname):
1065
  """Removes a hostname from /etc/hosts.
1066

1067
  IP addresses without names are removed from the file.
1068

1069
  @type file_name: str
1070
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1071
  @type hostname: str
1072
  @param hostname: the hostname to be removed
1073

1074
  """
1075
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1076
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1077
  try:
1078
    out = os.fdopen(fd, 'w')
1079
    try:
1080
      f = open(file_name, 'r')
1081
      try:
1082
        for line in f:
1083
          fields = line.split()
1084
          if len(fields) > 1 and not fields[0].startswith('#'):
1085
            names = fields[1:]
1086
            if hostname in names:
1087
              while hostname in names:
1088
                names.remove(hostname)
1089
              if names:
1090
                out.write("%s %s\n" % (fields[0], ' '.join(names)))
1091
              continue
1092

    
1093
          out.write(line)
1094

    
1095
        out.flush()
1096
        os.fsync(out)
1097
        os.chmod(tmpname, 0644)
1098
        os.rename(tmpname, file_name)
1099
      finally:
1100
        f.close()
1101
    finally:
1102
      out.close()
1103
  except:
1104
    RemoveFile(tmpname)
1105
    raise
1106

    
1107

    
1108
def RemoveHostFromEtcHosts(hostname):
1109
  """Wrapper around RemoveEtcHostsEntry.
1110

1111
  @type hostname: str
1112
  @param hostname: hostname that will be resolved and its
1113
      full and shot name will be removed from
1114
      L{constants.ETC_HOSTS}
1115

1116
  """
1117
  hi = HostInfo(name=hostname)
1118
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
1119
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
1120

    
1121

    
1122
def TimestampForFilename():
1123
  """Returns the current time formatted for filenames.
1124

1125
  The format doesn't contain colons as some shells and applications them as
1126
  separators.
1127

1128
  """
1129
  return time.strftime("%Y-%m-%d_%H_%M_%S")
1130

    
1131

    
1132
def CreateBackup(file_name):
1133
  """Creates a backup of a file.
1134

1135
  @type file_name: str
1136
  @param file_name: file to be backed up
1137
  @rtype: str
1138
  @return: the path to the newly created backup
1139
  @raise errors.ProgrammerError: for invalid file names
1140

1141
  """
1142
  if not os.path.isfile(file_name):
1143
    raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1144
                                file_name)
1145

    
1146
  prefix = ("%s.backup-%s." %
1147
            (os.path.basename(file_name), TimestampForFilename()))
1148
  dir_name = os.path.dirname(file_name)
1149

    
1150
  fsrc = open(file_name, 'rb')
1151
  try:
1152
    (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1153
    fdst = os.fdopen(fd, 'wb')
1154
    try:
1155
      logging.debug("Backing up %s at %s", file_name, backup_name)
1156
      shutil.copyfileobj(fsrc, fdst)
1157
    finally:
1158
      fdst.close()
1159
  finally:
1160
    fsrc.close()
1161

    
1162
  return backup_name
1163

    
1164

    
1165
def ShellQuote(value):
1166
  """Quotes shell argument according to POSIX.
1167

1168
  @type value: str
1169
  @param value: the argument to be quoted
1170
  @rtype: str
1171
  @return: the quoted value
1172

1173
  """
1174
  if _re_shell_unquoted.match(value):
1175
    return value
1176
  else:
1177
    return "'%s'" % value.replace("'", "'\\''")
1178

    
1179

    
1180
def ShellQuoteArgs(args):
1181
  """Quotes a list of shell arguments.
1182

1183
  @type args: list
1184
  @param args: list of arguments to be quoted
1185
  @rtype: str
1186
  @return: the quoted arguments concatenated with spaces
1187

1188
  """
1189
  return ' '.join([ShellQuote(i) for i in args])
1190

    
1191

    
1192
def TcpPing(target, port, timeout=10, live_port_needed=False, source=None):
1193
  """Simple ping implementation using TCP connect(2).
1194

1195
  Check if the given IP is reachable by doing attempting a TCP connect
1196
  to it.
1197

1198
  @type target: str
1199
  @param target: the IP or hostname to ping
1200
  @type port: int
1201
  @param port: the port to connect to
1202
  @type timeout: int
1203
  @param timeout: the timeout on the connection attempt
1204
  @type live_port_needed: boolean
1205
  @param live_port_needed: whether a closed port will cause the
1206
      function to return failure, as if there was a timeout
1207
  @type source: str or None
1208
  @param source: if specified, will cause the connect to be made
1209
      from this specific source address; failures to bind other
1210
      than C{EADDRNOTAVAIL} will be ignored
1211

1212
  """
1213
  sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
1214

    
1215
  success = False
1216

    
1217
  if source is not None:
1218
    try:
1219
      sock.bind((source, 0))
1220
    except socket.error, (errcode, _):
1221
      if errcode == errno.EADDRNOTAVAIL:
1222
        success = False
1223

    
1224
  sock.settimeout(timeout)
1225

    
1226
  try:
1227
    sock.connect((target, port))
1228
    sock.close()
1229
    success = True
1230
  except socket.timeout:
1231
    success = False
1232
  except socket.error, (errcode, _):
1233
    success = (not live_port_needed) and (errcode == errno.ECONNREFUSED)
1234

    
1235
  return success
1236

    
1237

    
1238
def OwnIpAddress(address):
1239
  """Check if the current host has the the given IP address.
1240

1241
  Currently this is done by TCP-pinging the address from the loopback
1242
  address.
1243

1244
  @type address: string
1245
  @param address: the address to check
1246
  @rtype: bool
1247
  @return: True if we own the address
1248

1249
  """
1250
  return TcpPing(address, constants.DEFAULT_NODED_PORT,
1251
                 source=constants.LOCALHOST_IP_ADDRESS)
1252

    
1253

    
1254
def ListVisibleFiles(path):
1255
  """Returns a list of visible files in a directory.
1256

1257
  @type path: str
1258
  @param path: the directory to enumerate
1259
  @rtype: list
1260
  @return: the list of all files not starting with a dot
1261
  @raise ProgrammerError: if L{path} is not an absolue and normalized path
1262

1263
  """
1264
  if not IsNormAbsPath(path):
1265
    raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1266
                                 " absolute/normalized: '%s'" % path)
1267
  files = [i for i in os.listdir(path) if not i.startswith(".")]
1268
  files.sort()
1269
  return files
1270

    
1271

    
1272
def GetHomeDir(user, default=None):
1273
  """Try to get the homedir of the given user.
1274

1275
  The user can be passed either as a string (denoting the name) or as
1276
  an integer (denoting the user id). If the user is not found, the
1277
  'default' argument is returned, which defaults to None.
1278

1279
  """
1280
  try:
1281
    if isinstance(user, basestring):
1282
      result = pwd.getpwnam(user)
1283
    elif isinstance(user, (int, long)):
1284
      result = pwd.getpwuid(user)
1285
    else:
1286
      raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1287
                                   type(user))
1288
  except KeyError:
1289
    return default
1290
  return result.pw_dir
1291

    
1292

    
1293
def NewUUID():
1294
  """Returns a random UUID.
1295

1296
  @note: This is a Linux-specific method as it uses the /proc
1297
      filesystem.
1298
  @rtype: str
1299

1300
  """
1301
  return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1302

    
1303

    
1304
def GenerateSecret(numbytes=20):
1305
  """Generates a random secret.
1306

1307
  This will generate a pseudo-random secret returning an hex string
1308
  (so that it can be used where an ASCII string is needed).
1309

1310
  @param numbytes: the number of bytes which will be represented by the returned
1311
      string (defaulting to 20, the length of a SHA1 hash)
1312
  @rtype: str
1313
  @return: an hex representation of the pseudo-random sequence
1314

1315
  """
1316
  return os.urandom(numbytes).encode('hex')
1317

    
1318

    
1319
def EnsureDirs(dirs):
1320
  """Make required directories, if they don't exist.
1321

1322
  @param dirs: list of tuples (dir_name, dir_mode)
1323
  @type dirs: list of (string, integer)
1324

1325
  """
1326
  for dir_name, dir_mode in dirs:
1327
    try:
1328
      os.mkdir(dir_name, dir_mode)
1329
    except EnvironmentError, err:
1330
      if err.errno != errno.EEXIST:
1331
        raise errors.GenericError("Cannot create needed directory"
1332
                                  " '%s': %s" % (dir_name, err))
1333
    if not os.path.isdir(dir_name):
1334
      raise errors.GenericError("%s is not a directory" % dir_name)
1335

    
1336

    
1337
def ReadFile(file_name, size=-1):
1338
  """Reads a file.
1339

1340
  @type size: int
1341
  @param size: Read at most size bytes (if negative, entire file)
1342
  @rtype: str
1343
  @return: the (possibly partial) content of the file
1344

1345
  """
1346
  f = open(file_name, "r")
1347
  try:
1348
    return f.read(size)
1349
  finally:
1350
    f.close()
1351

    
1352

    
1353
def WriteFile(file_name, fn=None, data=None,
1354
              mode=None, uid=-1, gid=-1,
1355
              atime=None, mtime=None, close=True,
1356
              dry_run=False, backup=False,
1357
              prewrite=None, postwrite=None):
1358
  """(Over)write a file atomically.
1359

1360
  The file_name and either fn (a function taking one argument, the
1361
  file descriptor, and which should write the data to it) or data (the
1362
  contents of the file) must be passed. The other arguments are
1363
  optional and allow setting the file mode, owner and group, and the
1364
  mtime/atime of the file.
1365

1366
  If the function doesn't raise an exception, it has succeeded and the
1367
  target file has the new contents. If the function has raised an
1368
  exception, an existing target file should be unmodified and the
1369
  temporary file should be removed.
1370

1371
  @type file_name: str
1372
  @param file_name: the target filename
1373
  @type fn: callable
1374
  @param fn: content writing function, called with
1375
      file descriptor as parameter
1376
  @type data: str
1377
  @param data: contents of the file
1378
  @type mode: int
1379
  @param mode: file mode
1380
  @type uid: int
1381
  @param uid: the owner of the file
1382
  @type gid: int
1383
  @param gid: the group of the file
1384
  @type atime: int
1385
  @param atime: a custom access time to be set on the file
1386
  @type mtime: int
1387
  @param mtime: a custom modification time to be set on the file
1388
  @type close: boolean
1389
  @param close: whether to close file after writing it
1390
  @type prewrite: callable
1391
  @param prewrite: function to be called before writing content
1392
  @type postwrite: callable
1393
  @param postwrite: function to be called after writing content
1394

1395
  @rtype: None or int
1396
  @return: None if the 'close' parameter evaluates to True,
1397
      otherwise the file descriptor
1398

1399
  @raise errors.ProgrammerError: if any of the arguments are not valid
1400

1401
  """
1402
  if not os.path.isabs(file_name):
1403
    raise errors.ProgrammerError("Path passed to WriteFile is not"
1404
                                 " absolute: '%s'" % file_name)
1405

    
1406
  if [fn, data].count(None) != 1:
1407
    raise errors.ProgrammerError("fn or data required")
1408

    
1409
  if [atime, mtime].count(None) == 1:
1410
    raise errors.ProgrammerError("Both atime and mtime must be either"
1411
                                 " set or None")
1412

    
1413
  if backup and not dry_run and os.path.isfile(file_name):
1414
    CreateBackup(file_name)
1415

    
1416
  dir_name, base_name = os.path.split(file_name)
1417
  fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1418
  do_remove = True
1419
  # here we need to make sure we remove the temp file, if any error
1420
  # leaves it in place
1421
  try:
1422
    if uid != -1 or gid != -1:
1423
      os.chown(new_name, uid, gid)
1424
    if mode:
1425
      os.chmod(new_name, mode)
1426
    if callable(prewrite):
1427
      prewrite(fd)
1428
    if data is not None:
1429
      os.write(fd, data)
1430
    else:
1431
      fn(fd)
1432
    if callable(postwrite):
1433
      postwrite(fd)
1434
    os.fsync(fd)
1435
    if atime is not None and mtime is not None:
1436
      os.utime(new_name, (atime, mtime))
1437
    if not dry_run:
1438
      os.rename(new_name, file_name)
1439
      do_remove = False
1440
  finally:
1441
    if close:
1442
      os.close(fd)
1443
      result = None
1444
    else:
1445
      result = fd
1446
    if do_remove:
1447
      RemoveFile(new_name)
1448

    
1449
  return result
1450

    
1451

    
1452
def FirstFree(seq, base=0):
1453
  """Returns the first non-existing integer from seq.
1454

1455
  The seq argument should be a sorted list of positive integers. The
1456
  first time the index of an element is smaller than the element
1457
  value, the index will be returned.
1458

1459
  The base argument is used to start at a different offset,
1460
  i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
1461

1462
  Example: C{[0, 1, 3]} will return I{2}.
1463

1464
  @type seq: sequence
1465
  @param seq: the sequence to be analyzed.
1466
  @type base: int
1467
  @param base: use this value as the base index of the sequence
1468
  @rtype: int
1469
  @return: the first non-used index in the sequence
1470

1471
  """
1472
  for idx, elem in enumerate(seq):
1473
    assert elem >= base, "Passed element is higher than base offset"
1474
    if elem > idx + base:
1475
      # idx is not used
1476
      return idx + base
1477
  return None
1478

    
1479

    
1480
def all(seq, pred=bool): # pylint: disable-msg=W0622
1481
  "Returns True if pred(x) is True for every element in the iterable"
1482
  for _ in itertools.ifilterfalse(pred, seq):
1483
    return False
1484
  return True
1485

    
1486

    
1487
def any(seq, pred=bool): # pylint: disable-msg=W0622
1488
  "Returns True if pred(x) is True for at least one element in the iterable"
1489
  for _ in itertools.ifilter(pred, seq):
1490
    return True
1491
  return False
1492

    
1493

    
1494
def partition(seq, pred=bool): # # pylint: disable-msg=W0622
1495
  "Partition a list in two, based on the given predicate"
1496
  return (list(itertools.ifilter(pred, seq)),
1497
          list(itertools.ifilterfalse(pred, seq)))
1498

    
1499

    
1500
def UniqueSequence(seq):
1501
  """Returns a list with unique elements.
1502

1503
  Element order is preserved.
1504

1505
  @type seq: sequence
1506
  @param seq: the sequence with the source elements
1507
  @rtype: list
1508
  @return: list of unique elements from seq
1509

1510
  """
1511
  seen = set()
1512
  return [i for i in seq if i not in seen and not seen.add(i)]
1513

    
1514

    
1515
def NormalizeAndValidateMac(mac):
1516
  """Normalizes and check if a MAC address is valid.
1517

1518
  Checks whether the supplied MAC address is formally correct, only
1519
  accepts colon separated format. Normalize it to all lower.
1520

1521
  @type mac: str
1522
  @param mac: the MAC to be validated
1523
  @rtype: str
1524
  @return: returns the normalized and validated MAC.
1525

1526
  @raise errors.OpPrereqError: If the MAC isn't valid
1527

1528
  """
1529
  mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$", re.I)
1530
  if not mac_check.match(mac):
1531
    raise errors.OpPrereqError("Invalid MAC address specified: %s" %
1532
                               mac, errors.ECODE_INVAL)
1533

    
1534
  return mac.lower()
1535

    
1536

    
1537
def TestDelay(duration):
1538
  """Sleep for a fixed amount of time.
1539

1540
  @type duration: float
1541
  @param duration: the sleep duration
1542
  @rtype: boolean
1543
  @return: False for negative value, True otherwise
1544

1545
  """
1546
  if duration < 0:
1547
    return False, "Invalid sleep duration"
1548
  time.sleep(duration)
1549
  return True, None
1550

    
1551

    
1552
def _CloseFDNoErr(fd, retries=5):
1553
  """Close a file descriptor ignoring errors.
1554

1555
  @type fd: int
1556
  @param fd: the file descriptor
1557
  @type retries: int
1558
  @param retries: how many retries to make, in case we get any
1559
      other error than EBADF
1560

1561
  """
1562
  try:
1563
    os.close(fd)
1564
  except OSError, err:
1565
    if err.errno != errno.EBADF:
1566
      if retries > 0:
1567
        _CloseFDNoErr(fd, retries - 1)
1568
    # else either it's closed already or we're out of retries, so we
1569
    # ignore this and go on
1570

    
1571

    
1572
def CloseFDs(noclose_fds=None):
1573
  """Close file descriptors.
1574

1575
  This closes all file descriptors above 2 (i.e. except
1576
  stdin/out/err).
1577

1578
  @type noclose_fds: list or None
1579
  @param noclose_fds: if given, it denotes a list of file descriptor
1580
      that should not be closed
1581

1582
  """
1583
  # Default maximum for the number of available file descriptors.
1584
  if 'SC_OPEN_MAX' in os.sysconf_names:
1585
    try:
1586
      MAXFD = os.sysconf('SC_OPEN_MAX')
1587
      if MAXFD < 0:
1588
        MAXFD = 1024
1589
    except OSError:
1590
      MAXFD = 1024
1591
  else:
1592
    MAXFD = 1024
1593
  maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
1594
  if (maxfd == resource.RLIM_INFINITY):
1595
    maxfd = MAXFD
1596

    
1597
  # Iterate through and close all file descriptors (except the standard ones)
1598
  for fd in range(3, maxfd):
1599
    if noclose_fds and fd in noclose_fds:
1600
      continue
1601
    _CloseFDNoErr(fd)
1602

    
1603

    
1604
def Daemonize(logfile):
1605
  """Daemonize the current process.
1606

1607
  This detaches the current process from the controlling terminal and
1608
  runs it in the background as a daemon.
1609

1610
  @type logfile: str
1611
  @param logfile: the logfile to which we should redirect stdout/stderr
1612
  @rtype: int
1613
  @return: the value zero
1614

1615
  """
1616
  # pylint: disable-msg=W0212
1617
  # yes, we really want os._exit
1618
  UMASK = 077
1619
  WORKDIR = "/"
1620

    
1621
  # this might fail
1622
  pid = os.fork()
1623
  if (pid == 0):  # The first child.
1624
    os.setsid()
1625
    # this might fail
1626
    pid = os.fork() # Fork a second child.
1627
    if (pid == 0):  # The second child.
1628
      os.chdir(WORKDIR)
1629
      os.umask(UMASK)
1630
    else:
1631
      # exit() or _exit()?  See below.
1632
      os._exit(0) # Exit parent (the first child) of the second child.
1633
  else:
1634
    os._exit(0) # Exit parent of the first child.
1635

    
1636
  for fd in range(3):
1637
    _CloseFDNoErr(fd)
1638
  i = os.open("/dev/null", os.O_RDONLY) # stdin
1639
  assert i == 0, "Can't close/reopen stdin"
1640
  i = os.open(logfile, os.O_WRONLY|os.O_CREAT|os.O_APPEND, 0600) # stdout
1641
  assert i == 1, "Can't close/reopen stdout"
1642
  # Duplicate standard output to standard error.
1643
  os.dup2(1, 2)
1644
  return 0
1645

    
1646

    
1647
def DaemonPidFileName(name):
1648
  """Compute a ganeti pid file absolute path
1649

1650
  @type name: str
1651
  @param name: the daemon name
1652
  @rtype: str
1653
  @return: the full path to the pidfile corresponding to the given
1654
      daemon name
1655

1656
  """
1657
  return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
1658

    
1659

    
1660
def EnsureDaemon(name):
1661
  """Check for and start daemon if not alive.
1662

1663
  """
1664
  result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
1665
  if result.failed:
1666
    logging.error("Can't start daemon '%s', failure %s, output: %s",
1667
                  name, result.fail_reason, result.output)
1668
    return False
1669

    
1670
  return True
1671

    
1672

    
1673
def WritePidFile(name):
1674
  """Write the current process pidfile.
1675

1676
  The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid}
1677

1678
  @type name: str
1679
  @param name: the daemon name to use
1680
  @raise errors.GenericError: if the pid file already exists and
1681
      points to a live process
1682

1683
  """
1684
  pid = os.getpid()
1685
  pidfilename = DaemonPidFileName(name)
1686
  if IsProcessAlive(ReadPidFile(pidfilename)):
1687
    raise errors.GenericError("%s contains a live process" % pidfilename)
1688

    
1689
  WriteFile(pidfilename, data="%d\n" % pid)
1690

    
1691

    
1692
def RemovePidFile(name):
1693
  """Remove the current process pidfile.
1694

1695
  Any errors are ignored.
1696

1697
  @type name: str
1698
  @param name: the daemon name used to derive the pidfile name
1699

1700
  """
1701
  pidfilename = DaemonPidFileName(name)
1702
  # TODO: we could check here that the file contains our pid
1703
  try:
1704
    RemoveFile(pidfilename)
1705
  except: # pylint: disable-msg=W0702
1706
    pass
1707

    
1708

    
1709
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
1710
                waitpid=False):
1711
  """Kill a process given by its pid.
1712

1713
  @type pid: int
1714
  @param pid: The PID to terminate.
1715
  @type signal_: int
1716
  @param signal_: The signal to send, by default SIGTERM
1717
  @type timeout: int
1718
  @param timeout: The timeout after which, if the process is still alive,
1719
                  a SIGKILL will be sent. If not positive, no such checking
1720
                  will be done
1721
  @type waitpid: boolean
1722
  @param waitpid: If true, we should waitpid on this process after
1723
      sending signals, since it's our own child and otherwise it
1724
      would remain as zombie
1725

1726
  """
1727
  def _helper(pid, signal_, wait):
1728
    """Simple helper to encapsulate the kill/waitpid sequence"""
1729
    os.kill(pid, signal_)
1730
    if wait:
1731
      try:
1732
        os.waitpid(pid, os.WNOHANG)
1733
      except OSError:
1734
        pass
1735

    
1736
  if pid <= 0:
1737
    # kill with pid=0 == suicide
1738
    raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
1739

    
1740
  if not IsProcessAlive(pid):
1741
    return
1742

    
1743
  _helper(pid, signal_, waitpid)
1744

    
1745
  if timeout <= 0:
1746
    return
1747

    
1748
  def _CheckProcess():
1749
    if not IsProcessAlive(pid):
1750
      return
1751

    
1752
    try:
1753
      (result_pid, _) = os.waitpid(pid, os.WNOHANG)
1754
    except OSError:
1755
      raise RetryAgain()
1756

    
1757
    if result_pid > 0:
1758
      return
1759

    
1760
    raise RetryAgain()
1761

    
1762
  try:
1763
    # Wait up to $timeout seconds
1764
    Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
1765
  except RetryTimeout:
1766
    pass
1767

    
1768
  if IsProcessAlive(pid):
1769
    # Kill process if it's still alive
1770
    _helper(pid, signal.SIGKILL, waitpid)
1771

    
1772

    
1773
def FindFile(name, search_path, test=os.path.exists):
1774
  """Look for a filesystem object in a given path.
1775

1776
  This is an abstract method to search for filesystem object (files,
1777
  dirs) under a given search path.
1778

1779
  @type name: str
1780
  @param name: the name to look for
1781
  @type search_path: str
1782
  @param search_path: location to start at
1783
  @type test: callable
1784
  @param test: a function taking one argument that should return True
1785
      if the a given object is valid; the default value is
1786
      os.path.exists, causing only existing files to be returned
1787
  @rtype: str or None
1788
  @return: full path to the object if found, None otherwise
1789

1790
  """
1791
  # validate the filename mask
1792
  if constants.EXT_PLUGIN_MASK.match(name) is None:
1793
    logging.critical("Invalid value passed for external script name: '%s'",
1794
                     name)
1795
    return None
1796

    
1797
  for dir_name in search_path:
1798
    # FIXME: investigate switch to PathJoin
1799
    item_name = os.path.sep.join([dir_name, name])
1800
    # check the user test and that we're indeed resolving to the given
1801
    # basename
1802
    if test(item_name) and os.path.basename(item_name) == name:
1803
      return item_name
1804
  return None
1805

    
1806

    
1807
def CheckVolumeGroupSize(vglist, vgname, minsize):
1808
  """Checks if the volume group list is valid.
1809

1810
  The function will check if a given volume group is in the list of
1811
  volume groups and has a minimum size.
1812

1813
  @type vglist: dict
1814
  @param vglist: dictionary of volume group names and their size
1815
  @type vgname: str
1816
  @param vgname: the volume group we should check
1817
  @type minsize: int
1818
  @param minsize: the minimum size we accept
1819
  @rtype: None or str
1820
  @return: None for success, otherwise the error message
1821

1822
  """
1823
  vgsize = vglist.get(vgname, None)
1824
  if vgsize is None:
1825
    return "volume group '%s' missing" % vgname
1826
  elif vgsize < minsize:
1827
    return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
1828
            (vgname, minsize, vgsize))
1829
  return None
1830

    
1831

    
1832
def SplitTime(value):
1833
  """Splits time as floating point number into a tuple.
1834

1835
  @param value: Time in seconds
1836
  @type value: int or float
1837
  @return: Tuple containing (seconds, microseconds)
1838

1839
  """
1840
  (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
1841

    
1842
  assert 0 <= seconds, \
1843
    "Seconds must be larger than or equal to 0, but are %s" % seconds
1844
  assert 0 <= microseconds <= 999999, \
1845
    "Microseconds must be 0-999999, but are %s" % microseconds
1846

    
1847
  return (int(seconds), int(microseconds))
1848

    
1849

    
1850
def MergeTime(timetuple):
1851
  """Merges a tuple into time as a floating point number.
1852

1853
  @param timetuple: Time as tuple, (seconds, microseconds)
1854
  @type timetuple: tuple
1855
  @return: Time as a floating point number expressed in seconds
1856

1857
  """
1858
  (seconds, microseconds) = timetuple
1859

    
1860
  assert 0 <= seconds, \
1861
    "Seconds must be larger than or equal to 0, but are %s" % seconds
1862
  assert 0 <= microseconds <= 999999, \
1863
    "Microseconds must be 0-999999, but are %s" % microseconds
1864

    
1865
  return float(seconds) + (float(microseconds) * 0.000001)
1866

    
1867

    
1868
def GetDaemonPort(daemon_name):
1869
  """Get the daemon port for this cluster.
1870

1871
  Note that this routine does not read a ganeti-specific file, but
1872
  instead uses C{socket.getservbyname} to allow pre-customization of
1873
  this parameter outside of Ganeti.
1874

1875
  @type daemon_name: string
1876
  @param daemon_name: daemon name (in constants.DAEMONS_PORTS)
1877
  @rtype: int
1878

1879
  """
1880
  if daemon_name not in constants.DAEMONS_PORTS:
1881
    raise errors.ProgrammerError("Unknown daemon: %s" % daemon_name)
1882

    
1883
  (proto, default_port) = constants.DAEMONS_PORTS[daemon_name]
1884
  try:
1885
    port = socket.getservbyname(daemon_name, proto)
1886
  except socket.error:
1887
    port = default_port
1888

    
1889
  return port
1890

    
1891

    
1892
def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
1893
                 multithreaded=False, syslog=constants.SYSLOG_USAGE):
1894
  """Configures the logging module.
1895

1896
  @type logfile: str
1897
  @param logfile: the filename to which we should log
1898
  @type debug: integer
1899
  @param debug: if greater than zero, enable debug messages, otherwise
1900
      only those at C{INFO} and above level
1901
  @type stderr_logging: boolean
1902
  @param stderr_logging: whether we should also log to the standard error
1903
  @type program: str
1904
  @param program: the name under which we should log messages
1905
  @type multithreaded: boolean
1906
  @param multithreaded: if True, will add the thread name to the log file
1907
  @type syslog: string
1908
  @param syslog: one of 'no', 'yes', 'only':
1909
      - if no, syslog is not used
1910
      - if yes, syslog is used (in addition to file-logging)
1911
      - if only, only syslog is used
1912
  @raise EnvironmentError: if we can't open the log file and
1913
      syslog/stderr logging is disabled
1914

1915
  """
1916
  fmt = "%(asctime)s: " + program + " pid=%(process)d"
1917
  sft = program + "[%(process)d]:"
1918
  if multithreaded:
1919
    fmt += "/%(threadName)s"
1920
    sft += " (%(threadName)s)"
1921
  if debug:
1922
    fmt += " %(module)s:%(lineno)s"
1923
    # no debug info for syslog loggers
1924
  fmt += " %(levelname)s %(message)s"
1925
  # yes, we do want the textual level, as remote syslog will probably
1926
  # lose the error level, and it's easier to grep for it
1927
  sft += " %(levelname)s %(message)s"
1928
  formatter = logging.Formatter(fmt)
1929
  sys_fmt = logging.Formatter(sft)
1930

    
1931
  root_logger = logging.getLogger("")
1932
  root_logger.setLevel(logging.NOTSET)
1933

    
1934
  # Remove all previously setup handlers
1935
  for handler in root_logger.handlers:
1936
    handler.close()
1937
    root_logger.removeHandler(handler)
1938

    
1939
  if stderr_logging:
1940
    stderr_handler = logging.StreamHandler()
1941
    stderr_handler.setFormatter(formatter)
1942
    if debug:
1943
      stderr_handler.setLevel(logging.NOTSET)
1944
    else:
1945
      stderr_handler.setLevel(logging.CRITICAL)
1946
    root_logger.addHandler(stderr_handler)
1947

    
1948
  if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
1949
    facility = logging.handlers.SysLogHandler.LOG_DAEMON
1950
    syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
1951
                                                    facility)
1952
    syslog_handler.setFormatter(sys_fmt)
1953
    # Never enable debug over syslog
1954
    syslog_handler.setLevel(logging.INFO)
1955
    root_logger.addHandler(syslog_handler)
1956

    
1957
  if syslog != constants.SYSLOG_ONLY:
1958
    # this can fail, if the logging directories are not setup or we have
1959
    # a permisssion problem; in this case, it's best to log but ignore
1960
    # the error if stderr_logging is True, and if false we re-raise the
1961
    # exception since otherwise we could run but without any logs at all
1962
    try:
1963
      logfile_handler = logging.FileHandler(logfile)
1964
      logfile_handler.setFormatter(formatter)
1965
      if debug:
1966
        logfile_handler.setLevel(logging.DEBUG)
1967
      else:
1968
        logfile_handler.setLevel(logging.INFO)
1969
      root_logger.addHandler(logfile_handler)
1970
    except EnvironmentError:
1971
      if stderr_logging or syslog == constants.SYSLOG_YES:
1972
        logging.exception("Failed to enable logging to file '%s'", logfile)
1973
      else:
1974
        # we need to re-raise the exception
1975
        raise
1976

    
1977

    
1978
def IsNormAbsPath(path):
1979
  """Check whether a path is absolute and also normalized
1980

1981
  This avoids things like /dir/../../other/path to be valid.
1982

1983
  """
1984
  return os.path.normpath(path) == path and os.path.isabs(path)
1985

    
1986

    
1987
def PathJoin(*args):
1988
  """Safe-join a list of path components.
1989

1990
  Requirements:
1991
      - the first argument must be an absolute path
1992
      - no component in the path must have backtracking (e.g. /../),
1993
        since we check for normalization at the end
1994

1995
  @param args: the path components to be joined
1996
  @raise ValueError: for invalid paths
1997

1998
  """
1999
  # ensure we're having at least one path passed in
2000
  assert args
2001
  # ensure the first component is an absolute and normalized path name
2002
  root = args[0]
2003
  if not IsNormAbsPath(root):
2004
    raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2005
  result = os.path.join(*args)
2006
  # ensure that the whole path is normalized
2007
  if not IsNormAbsPath(result):
2008
    raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2009
  # check that we're still under the original prefix
2010
  prefix = os.path.commonprefix([root, result])
2011
  if prefix != root:
2012
    raise ValueError("Error: path joining resulted in different prefix"
2013
                     " (%s != %s)" % (prefix, root))
2014
  return result
2015

    
2016

    
2017
def TailFile(fname, lines=20):
2018
  """Return the last lines from a file.
2019

2020
  @note: this function will only read and parse the last 4KB of
2021
      the file; if the lines are very long, it could be that less
2022
      than the requested number of lines are returned
2023

2024
  @param fname: the file name
2025
  @type lines: int
2026
  @param lines: the (maximum) number of lines to return
2027

2028
  """
2029
  fd = open(fname, "r")
2030
  try:
2031
    fd.seek(0, 2)
2032
    pos = fd.tell()
2033
    pos = max(0, pos-4096)
2034
    fd.seek(pos, 0)
2035
    raw_data = fd.read()
2036
  finally:
2037
    fd.close()
2038

    
2039
  rows = raw_data.splitlines()
2040
  return rows[-lines:]
2041

    
2042

    
2043
def _ParseAsn1Generalizedtime(value):
2044
  """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2045

2046
  @type value: string
2047
  @param value: ASN1 GENERALIZEDTIME timestamp
2048

2049
  """
2050
  m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
2051
  if m:
2052
    # We have an offset
2053
    asn1time = m.group(1)
2054
    hours = int(m.group(2))
2055
    minutes = int(m.group(3))
2056
    utcoffset = (60 * hours) + minutes
2057
  else:
2058
    if not value.endswith("Z"):
2059
      raise ValueError("Missing timezone")
2060
    asn1time = value[:-1]
2061
    utcoffset = 0
2062

    
2063
  parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2064

    
2065
  tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2066

    
2067
  return calendar.timegm(tt.utctimetuple())
2068

    
2069

    
2070
def GetX509CertValidity(cert):
2071
  """Returns the validity period of the certificate.
2072

2073
  @type cert: OpenSSL.crypto.X509
2074
  @param cert: X509 certificate object
2075

2076
  """
2077
  # The get_notBefore and get_notAfter functions are only supported in
2078
  # pyOpenSSL 0.7 and above.
2079
  try:
2080
    get_notbefore_fn = cert.get_notBefore
2081
  except AttributeError:
2082
    not_before = None
2083
  else:
2084
    not_before_asn1 = get_notbefore_fn()
2085

    
2086
    if not_before_asn1 is None:
2087
      not_before = None
2088
    else:
2089
      not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2090

    
2091
  try:
2092
    get_notafter_fn = cert.get_notAfter
2093
  except AttributeError:
2094
    not_after = None
2095
  else:
2096
    not_after_asn1 = get_notafter_fn()
2097

    
2098
    if not_after_asn1 is None:
2099
      not_after = None
2100
    else:
2101
      not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2102

    
2103
  return (not_before, not_after)
2104

    
2105

    
2106
def SafeEncode(text):
2107
  """Return a 'safe' version of a source string.
2108

2109
  This function mangles the input string and returns a version that
2110
  should be safe to display/encode as ASCII. To this end, we first
2111
  convert it to ASCII using the 'backslashreplace' encoding which
2112
  should get rid of any non-ASCII chars, and then we process it
2113
  through a loop copied from the string repr sources in the python; we
2114
  don't use string_escape anymore since that escape single quotes and
2115
  backslashes too, and that is too much; and that escaping is not
2116
  stable, i.e. string_escape(string_escape(x)) != string_escape(x).
2117

2118
  @type text: str or unicode
2119
  @param text: input data
2120
  @rtype: str
2121
  @return: a safe version of text
2122

2123
  """
2124
  if isinstance(text, unicode):
2125
    # only if unicode; if str already, we handle it below
2126
    text = text.encode('ascii', 'backslashreplace')
2127
  resu = ""
2128
  for char in text:
2129
    c = ord(char)
2130
    if char  == '\t':
2131
      resu += r'\t'
2132
    elif char == '\n':
2133
      resu += r'\n'
2134
    elif char == '\r':
2135
      resu += r'\'r'
2136
    elif c < 32 or c >= 127: # non-printable
2137
      resu += "\\x%02x" % (c & 0xff)
2138
    else:
2139
      resu += char
2140
  return resu
2141

    
2142

    
2143
def UnescapeAndSplit(text, sep=","):
2144
  """Split and unescape a string based on a given separator.
2145

2146
  This function splits a string based on a separator where the
2147
  separator itself can be escape in order to be an element of the
2148
  elements. The escaping rules are (assuming coma being the
2149
  separator):
2150
    - a plain , separates the elements
2151
    - a sequence \\\\, (double backslash plus comma) is handled as a
2152
      backslash plus a separator comma
2153
    - a sequence \, (backslash plus comma) is handled as a
2154
      non-separator comma
2155

2156
  @type text: string
2157
  @param text: the string to split
2158
  @type sep: string
2159
  @param text: the separator
2160
  @rtype: string
2161
  @return: a list of strings
2162

2163
  """
2164
  # we split the list by sep (with no escaping at this stage)
2165
  slist = text.split(sep)
2166
  # next, we revisit the elements and if any of them ended with an odd
2167
  # number of backslashes, then we join it with the next
2168
  rlist = []
2169
  while slist:
2170
    e1 = slist.pop(0)
2171
    if e1.endswith("\\"):
2172
      num_b = len(e1) - len(e1.rstrip("\\"))
2173
      if num_b % 2 == 1:
2174
        e2 = slist.pop(0)
2175
        # here the backslashes remain (all), and will be reduced in
2176
        # the next step
2177
        rlist.append(e1 + sep + e2)
2178
        continue
2179
    rlist.append(e1)
2180
  # finally, replace backslash-something with something
2181
  rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
2182
  return rlist
2183

    
2184

    
2185
def CommaJoin(names):
2186
  """Nicely join a set of identifiers.
2187

2188
  @param names: set, list or tuple
2189
  @return: a string with the formatted results
2190

2191
  """
2192
  return ", ".join([str(val) for val in names])
2193

    
2194

    
2195
def BytesToMebibyte(value):
2196
  """Converts bytes to mebibytes.
2197

2198
  @type value: int
2199
  @param value: Value in bytes
2200
  @rtype: int
2201
  @return: Value in mebibytes
2202

2203
  """
2204
  return int(round(value / (1024.0 * 1024.0), 0))
2205

    
2206

    
2207
def CalculateDirectorySize(path):
2208
  """Calculates the size of a directory recursively.
2209

2210
  @type path: string
2211
  @param path: Path to directory
2212
  @rtype: int
2213
  @return: Size in mebibytes
2214

2215
  """
2216
  size = 0
2217

    
2218
  for (curpath, _, files) in os.walk(path):
2219
    for filename in files:
2220
      st = os.lstat(PathJoin(curpath, filename))
2221
      size += st.st_size
2222

    
2223
  return BytesToMebibyte(size)
2224

    
2225

    
2226
def GetFilesystemStats(path):
2227
  """Returns the total and free space on a filesystem.
2228

2229
  @type path: string
2230
  @param path: Path on filesystem to be examined
2231
  @rtype: int
2232
  @return: tuple of (Total space, Free space) in mebibytes
2233

2234
  """
2235
  st = os.statvfs(path)
2236

    
2237
  fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
2238
  tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
2239
  return (tsize, fsize)
2240

    
2241

    
2242
def RunInSeparateProcess(fn, *args):
2243
  """Runs a function in a separate process.
2244

2245
  Note: Only boolean return values are supported.
2246

2247
  @type fn: callable
2248
  @param fn: Function to be called
2249
  @rtype: bool
2250
  @return: Function's result
2251

2252
  """
2253
  pid = os.fork()
2254
  if pid == 0:
2255
    # Child process
2256
    try:
2257
      # In case the function uses temporary files
2258
      ResetTempfileModule()
2259

    
2260
      # Call function
2261
      result = int(bool(fn(*args)))
2262
      assert result in (0, 1)
2263
    except: # pylint: disable-msg=W0702
2264
      logging.exception("Error while calling function in separate process")
2265
      # 0 and 1 are reserved for the return value
2266
      result = 33
2267

    
2268
    os._exit(result) # pylint: disable-msg=W0212
2269

    
2270
  # Parent process
2271

    
2272
  # Avoid zombies and check exit code
2273
  (_, status) = os.waitpid(pid, 0)
2274

    
2275
  if os.WIFSIGNALED(status):
2276
    exitcode = None
2277
    signum = os.WTERMSIG(status)
2278
  else:
2279
    exitcode = os.WEXITSTATUS(status)
2280
    signum = None
2281

    
2282
  if not (exitcode in (0, 1) and signum is None):
2283
    raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
2284
                              (exitcode, signum))
2285

    
2286
  return bool(exitcode)
2287

    
2288

    
2289
def LockedMethod(fn):
2290
  """Synchronized object access decorator.
2291

2292
  This decorator is intended to protect access to an object using the
2293
  object's own lock which is hardcoded to '_lock'.
2294

2295
  """
2296
  def _LockDebug(*args, **kwargs):
2297
    if debug_locks:
2298
      logging.debug(*args, **kwargs)
2299

    
2300
  def wrapper(self, *args, **kwargs):
2301
    # pylint: disable-msg=W0212
2302
    assert hasattr(self, '_lock')
2303
    lock = self._lock
2304
    _LockDebug("Waiting for %s", lock)
2305
    lock.acquire()
2306
    try:
2307
      _LockDebug("Acquired %s", lock)
2308
      result = fn(self, *args, **kwargs)
2309
    finally:
2310
      _LockDebug("Releasing %s", lock)
2311
      lock.release()
2312
      _LockDebug("Released %s", lock)
2313
    return result
2314
  return wrapper
2315

    
2316

    
2317
def LockFile(fd):
2318
  """Locks a file using POSIX locks.
2319

2320
  @type fd: int
2321
  @param fd: the file descriptor we need to lock
2322

2323
  """
2324
  try:
2325
    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
2326
  except IOError, err:
2327
    if err.errno == errno.EAGAIN:
2328
      raise errors.LockError("File already locked")
2329
    raise
2330

    
2331

    
2332
def FormatTime(val):
2333
  """Formats a time value.
2334

2335
  @type val: float or None
2336
  @param val: the timestamp as returned by time.time()
2337
  @return: a string value or N/A if we don't have a valid timestamp
2338

2339
  """
2340
  if val is None or not isinstance(val, (int, float)):
2341
    return "N/A"
2342
  # these two codes works on Linux, but they are not guaranteed on all
2343
  # platforms
2344
  return time.strftime("%F %T", time.localtime(val))
2345

    
2346

    
2347
def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
2348
  """Reads the watcher pause file.
2349

2350
  @type filename: string
2351
  @param filename: Path to watcher pause file
2352
  @type now: None, float or int
2353
  @param now: Current time as Unix timestamp
2354
  @type remove_after: int
2355
  @param remove_after: Remove watcher pause file after specified amount of
2356
    seconds past the pause end time
2357

2358
  """
2359
  if now is None:
2360
    now = time.time()
2361

    
2362
  try:
2363
    value = ReadFile(filename)
2364
  except IOError, err:
2365
    if err.errno != errno.ENOENT:
2366
      raise
2367
    value = None
2368

    
2369
  if value is not None:
2370
    try:
2371
      value = int(value)
2372
    except ValueError:
2373
      logging.warning(("Watcher pause file (%s) contains invalid value,"
2374
                       " removing it"), filename)
2375
      RemoveFile(filename)
2376
      value = None
2377

    
2378
    if value is not None:
2379
      # Remove file if it's outdated
2380
      if now > (value + remove_after):
2381
        RemoveFile(filename)
2382
        value = None
2383

    
2384
      elif now > value:
2385
        value = None
2386

    
2387
  return value
2388

    
2389

    
2390
class RetryTimeout(Exception):
2391
  """Retry loop timed out.
2392

2393
  """
2394

    
2395

    
2396
class RetryAgain(Exception):
2397
  """Retry again.
2398

2399
  """
2400

    
2401

    
2402
class _RetryDelayCalculator(object):
2403
  """Calculator for increasing delays.
2404

2405
  """
2406
  __slots__ = [
2407
    "_factor",
2408
    "_limit",
2409
    "_next",
2410
    "_start",
2411
    ]
2412

    
2413
  def __init__(self, start, factor, limit):
2414
    """Initializes this class.
2415

2416
    @type start: float
2417
    @param start: Initial delay
2418
    @type factor: float
2419
    @param factor: Factor for delay increase
2420
    @type limit: float or None
2421
    @param limit: Upper limit for delay or None for no limit
2422

2423
    """
2424
    assert start > 0.0
2425
    assert factor >= 1.0
2426
    assert limit is None or limit >= 0.0
2427

    
2428
    self._start = start
2429
    self._factor = factor
2430
    self._limit = limit
2431

    
2432
    self._next = start
2433

    
2434
  def __call__(self):
2435
    """Returns current delay and calculates the next one.
2436

2437
    """
2438
    current = self._next
2439

    
2440
    # Update for next run
2441
    if self._limit is None or self._next < self._limit:
2442
      self._next = min(self._limit, self._next * self._factor)
2443

    
2444
    return current
2445

    
2446

    
2447
#: Special delay to specify whole remaining timeout
2448
RETRY_REMAINING_TIME = object()
2449

    
2450

    
2451
def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
2452
          _time_fn=time.time):
2453
  """Call a function repeatedly until it succeeds.
2454

2455
  The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
2456
  anymore. Between calls a delay, specified by C{delay}, is inserted. After a
2457
  total of C{timeout} seconds, this function throws L{RetryTimeout}.
2458

2459
  C{delay} can be one of the following:
2460
    - callable returning the delay length as a float
2461
    - Tuple of (start, factor, limit)
2462
    - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
2463
      useful when overriding L{wait_fn} to wait for an external event)
2464
    - A static delay as a number (int or float)
2465

2466
  @type fn: callable
2467
  @param fn: Function to be called
2468
  @param delay: Either a callable (returning the delay), a tuple of (start,
2469
                factor, limit) (see L{_RetryDelayCalculator}),
2470
                L{RETRY_REMAINING_TIME} or a number (int or float)
2471
  @type timeout: float
2472
  @param timeout: Total timeout
2473
  @type wait_fn: callable
2474
  @param wait_fn: Waiting function
2475
  @return: Return value of function
2476

2477
  """
2478
  assert callable(fn)
2479
  assert callable(wait_fn)
2480
  assert callable(_time_fn)
2481

    
2482
  if args is None:
2483
    args = []
2484

    
2485
  end_time = _time_fn() + timeout
2486

    
2487
  if callable(delay):
2488
    # External function to calculate delay
2489
    calc_delay = delay
2490

    
2491
  elif isinstance(delay, (tuple, list)):
2492
    # Increasing delay with optional upper boundary
2493
    (start, factor, limit) = delay
2494
    calc_delay = _RetryDelayCalculator(start, factor, limit)
2495

    
2496
  elif delay is RETRY_REMAINING_TIME:
2497
    # Always use the remaining time
2498
    calc_delay = None
2499

    
2500
  else:
2501
    # Static delay
2502
    calc_delay = lambda: delay
2503

    
2504
  assert calc_delay is None or callable(calc_delay)
2505

    
2506
  while True:
2507
    try:
2508
      # pylint: disable-msg=W0142
2509
      return fn(*args)
2510
    except RetryAgain:
2511
      pass
2512

    
2513
    remaining_time = end_time - _time_fn()
2514

    
2515
    if remaining_time < 0.0:
2516
      raise RetryTimeout()
2517

    
2518
    assert remaining_time >= 0.0
2519

    
2520
    if calc_delay is None:
2521
      wait_fn(remaining_time)
2522
    else:
2523
      current_delay = calc_delay()
2524
      if current_delay > 0.0:
2525
        wait_fn(current_delay)
2526

    
2527

    
2528
class FileLock(object):
2529
  """Utility class for file locks.
2530

2531
  """
2532
  def __init__(self, fd, filename):
2533
    """Constructor for FileLock.
2534

2535
    @type fd: file
2536
    @param fd: File object
2537
    @type filename: str
2538
    @param filename: Path of the file opened at I{fd}
2539

2540
    """
2541
    self.fd = fd
2542
    self.filename = filename
2543

    
2544
  @classmethod
2545
  def Open(cls, filename):
2546
    """Creates and opens a file to be used as a file-based lock.
2547

2548
    @type filename: string
2549
    @param filename: path to the file to be locked
2550

2551
    """
2552
    # Using "os.open" is necessary to allow both opening existing file
2553
    # read/write and creating if not existing. Vanilla "open" will truncate an
2554
    # existing file -or- allow creating if not existing.
2555
    return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
2556
               filename)
2557

    
2558
  def __del__(self):
2559
    self.Close()
2560

    
2561
  def Close(self):
2562
    """Close the file and release the lock.
2563

2564
    """
2565
    if hasattr(self, "fd") and self.fd:
2566
      self.fd.close()
2567
      self.fd = None
2568

    
2569
  def _flock(self, flag, blocking, timeout, errmsg):
2570
    """Wrapper for fcntl.flock.
2571

2572
    @type flag: int
2573
    @param flag: operation flag
2574
    @type blocking: bool
2575
    @param blocking: whether the operation should be done in blocking mode.
2576
    @type timeout: None or float
2577
    @param timeout: for how long the operation should be retried (implies
2578
                    non-blocking mode).
2579
    @type errmsg: string
2580
    @param errmsg: error message in case operation fails.
2581

2582
    """
2583
    assert self.fd, "Lock was closed"
2584
    assert timeout is None or timeout >= 0, \
2585
      "If specified, timeout must be positive"
2586
    assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
2587

    
2588
    # When a timeout is used, LOCK_NB must always be set
2589
    if not (timeout is None and blocking):
2590
      flag |= fcntl.LOCK_NB
2591

    
2592
    if timeout is None:
2593
      self._Lock(self.fd, flag, timeout)
2594
    else:
2595
      try:
2596
        Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
2597
              args=(self.fd, flag, timeout))
2598
      except RetryTimeout:
2599
        raise errors.LockError(errmsg)
2600

    
2601
  @staticmethod
2602
  def _Lock(fd, flag, timeout):
2603
    try:
2604
      fcntl.flock(fd, flag)
2605
    except IOError, err:
2606
      if timeout is not None and err.errno == errno.EAGAIN:
2607
        raise RetryAgain()
2608

    
2609
      logging.exception("fcntl.flock failed")
2610
      raise
2611

    
2612
  def Exclusive(self, blocking=False, timeout=None):
2613
    """Locks the file in exclusive mode.
2614

2615
    @type blocking: boolean
2616
    @param blocking: whether to block and wait until we
2617
        can lock the file or return immediately
2618
    @type timeout: int or None
2619
    @param timeout: if not None, the duration to wait for the lock
2620
        (in blocking mode)
2621

2622
    """
2623
    self._flock(fcntl.LOCK_EX, blocking, timeout,
2624
                "Failed to lock %s in exclusive mode" % self.filename)
2625

    
2626
  def Shared(self, blocking=False, timeout=None):
2627
    """Locks the file in shared mode.
2628

2629
    @type blocking: boolean
2630
    @param blocking: whether to block and wait until we
2631
        can lock the file or return immediately
2632
    @type timeout: int or None
2633
    @param timeout: if not None, the duration to wait for the lock
2634
        (in blocking mode)
2635

2636
    """
2637
    self._flock(fcntl.LOCK_SH, blocking, timeout,
2638
                "Failed to lock %s in shared mode" % self.filename)
2639

    
2640
  def Unlock(self, blocking=True, timeout=None):
2641
    """Unlocks the file.
2642

2643
    According to C{flock(2)}, unlocking can also be a nonblocking
2644
    operation::
2645

2646
      To make a non-blocking request, include LOCK_NB with any of the above
2647
      operations.
2648

2649
    @type blocking: boolean
2650
    @param blocking: whether to block and wait until we
2651
        can lock the file or return immediately
2652
    @type timeout: int or None
2653
    @param timeout: if not None, the duration to wait for the lock
2654
        (in blocking mode)
2655

2656
    """
2657
    self._flock(fcntl.LOCK_UN, blocking, timeout,
2658
                "Failed to unlock %s" % self.filename)
2659

    
2660

    
2661
def SignalHandled(signums):
2662
  """Signal Handled decoration.
2663

2664
  This special decorator installs a signal handler and then calls the target
2665
  function. The function must accept a 'signal_handlers' keyword argument,
2666
  which will contain a dict indexed by signal number, with SignalHandler
2667
  objects as values.
2668

2669
  The decorator can be safely stacked with iself, to handle multiple signals
2670
  with different handlers.
2671

2672
  @type signums: list
2673
  @param signums: signals to intercept
2674

2675
  """
2676
  def wrap(fn):
2677
    def sig_function(*args, **kwargs):
2678
      assert 'signal_handlers' not in kwargs or \
2679
             kwargs['signal_handlers'] is None or \
2680
             isinstance(kwargs['signal_handlers'], dict), \
2681
             "Wrong signal_handlers parameter in original function call"
2682
      if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
2683
        signal_handlers = kwargs['signal_handlers']
2684
      else:
2685
        signal_handlers = {}
2686
        kwargs['signal_handlers'] = signal_handlers
2687
      sighandler = SignalHandler(signums)
2688
      try:
2689
        for sig in signums:
2690
          signal_handlers[sig] = sighandler
2691
        return fn(*args, **kwargs)
2692
      finally:
2693
        sighandler.Reset()
2694
    return sig_function
2695
  return wrap
2696

    
2697

    
2698
class SignalHandler(object):
2699
  """Generic signal handler class.
2700

2701
  It automatically restores the original handler when deconstructed or
2702
  when L{Reset} is called. You can either pass your own handler
2703
  function in or query the L{called} attribute to detect whether the
2704
  signal was sent.
2705

2706
  @type signum: list
2707
  @ivar signum: the signals we handle
2708
  @type called: boolean
2709
  @ivar called: tracks whether any of the signals have been raised
2710

2711
  """
2712
  def __init__(self, signum):
2713
    """Constructs a new SignalHandler instance.
2714

2715
    @type signum: int or list of ints
2716
    @param signum: Single signal number or set of signal numbers
2717

2718
    """
2719
    self.signum = set(signum)
2720
    self.called = False
2721

    
2722
    self._previous = {}
2723
    try:
2724
      for signum in self.signum:
2725
        # Setup handler
2726
        prev_handler = signal.signal(signum, self._HandleSignal)
2727
        try:
2728
          self._previous[signum] = prev_handler
2729
        except:
2730
          # Restore previous handler
2731
          signal.signal(signum, prev_handler)
2732
          raise
2733
    except:
2734
      # Reset all handlers
2735
      self.Reset()
2736
      # Here we have a race condition: a handler may have already been called,
2737
      # but there's not much we can do about it at this point.
2738
      raise
2739

    
2740
  def __del__(self):
2741
    self.Reset()
2742

    
2743
  def Reset(self):
2744
    """Restore previous handler.
2745

2746
    This will reset all the signals to their previous handlers.
2747

2748
    """
2749
    for signum, prev_handler in self._previous.items():
2750
      signal.signal(signum, prev_handler)
2751
      # If successful, remove from dict
2752
      del self._previous[signum]
2753

    
2754
  def Clear(self):
2755
    """Unsets the L{called} flag.
2756

2757
    This function can be used in case a signal may arrive several times.
2758

2759
    """
2760
    self.called = False
2761

    
2762
  # we don't care about arguments, but we leave them named for the future
2763
  def _HandleSignal(self, signum, frame): # pylint: disable-msg=W0613
2764
    """Actual signal handling function.
2765

2766
    """
2767
    # This is not nice and not absolutely atomic, but it appears to be the only
2768
    # solution in Python -- there are no atomic types.
2769
    self.called = True
2770

    
2771

    
2772
class FieldSet(object):
2773
  """A simple field set.
2774

2775
  Among the features are:
2776
    - checking if a string is among a list of static string or regex objects
2777
    - checking if a whole list of string matches
2778
    - returning the matching groups from a regex match
2779

2780
  Internally, all fields are held as regular expression objects.
2781

2782
  """
2783
  def __init__(self, *items):
2784
    self.items = [re.compile("^%s$" % value) for value in items]
2785

    
2786
  def Extend(self, other_set):
2787
    """Extend the field set with the items from another one"""
2788
    self.items.extend(other_set.items)
2789

    
2790
  def Matches(self, field):
2791
    """Checks if a field matches the current set
2792

2793
    @type field: str
2794
    @param field: the string to match
2795
    @return: either None or a regular expression match object
2796

2797
    """
2798
    for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
2799
      return m
2800
    return None
2801

    
2802
  def NonMatching(self, items):
2803
    """Returns the list of fields not matching the current set
2804

2805
    @type items: list
2806
    @param items: the list of fields to check
2807
    @rtype: list
2808
    @return: list of non-matching fields
2809

2810
    """
2811
    return [val for val in items if not self.Matches(val)]