Statistics
| Branch: | Tag: | Revision:

root / lib / utils.py @ bd561702

History | View | Annotate | Download (75.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti utility module.
23

24
This module holds functions that can be used in both daemons (all) and
25
the command line scripts.
26

27
"""
28

    
29

    
30
import os
31
import time
32
import subprocess
33
import re
34
import socket
35
import tempfile
36
import shutil
37
import errno
38
import pwd
39
import itertools
40
import select
41
import fcntl
42
import resource
43
import logging
44
import logging.handlers
45
import signal
46
import datetime
47
import calendar
48

    
49
from cStringIO import StringIO
50

    
51
try:
52
  from hashlib import sha1
53
except ImportError:
54
  import sha
55
  sha1 = sha.new
56

    
57
from ganeti import errors
58
from ganeti import constants
59

    
60

    
61
_locksheld = []
62
_re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
63

    
64
debug_locks = False
65

    
66
#: when set to True, L{RunCmd} is disabled
67
no_fork = False
68

    
69
_RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
70

    
71

    
72
class RunResult(object):
73
  """Holds the result of running external programs.
74

75
  @type exit_code: int
76
  @ivar exit_code: the exit code of the program, or None (if the program
77
      didn't exit())
78
  @type signal: int or None
79
  @ivar signal: the signal that caused the program to finish, or None
80
      (if the program wasn't terminated by a signal)
81
  @type stdout: str
82
  @ivar stdout: the standard output of the program
83
  @type stderr: str
84
  @ivar stderr: the standard error of the program
85
  @type failed: boolean
86
  @ivar failed: True in case the program was
87
      terminated by a signal or exited with a non-zero exit code
88
  @ivar fail_reason: a string detailing the termination reason
89

90
  """
91
  __slots__ = ["exit_code", "signal", "stdout", "stderr",
92
               "failed", "fail_reason", "cmd"]
93

    
94

    
95
  def __init__(self, exit_code, signal_, stdout, stderr, cmd):
96
    self.cmd = cmd
97
    self.exit_code = exit_code
98
    self.signal = signal_
99
    self.stdout = stdout
100
    self.stderr = stderr
101
    self.failed = (signal_ is not None or exit_code != 0)
102

    
103
    if self.signal is not None:
104
      self.fail_reason = "terminated by signal %s" % self.signal
105
    elif self.exit_code is not None:
106
      self.fail_reason = "exited with exit code %s" % self.exit_code
107
    else:
108
      self.fail_reason = "unable to determine termination reason"
109

    
110
    if self.failed:
111
      logging.debug("Command '%s' failed (%s); output: %s",
112
                    self.cmd, self.fail_reason, self.output)
113

    
114
  def _GetOutput(self):
115
    """Returns the combined stdout and stderr for easier usage.
116

117
    """
118
    return self.stdout + self.stderr
119

    
120
  output = property(_GetOutput, None, None, "Return full output")
121

    
122

    
123
def RunCmd(cmd, env=None, output=None, cwd='/', reset_env=False):
124
  """Execute a (shell) command.
125

126
  The command should not read from its standard input, as it will be
127
  closed.
128

129
  @type cmd: string or list
130
  @param cmd: Command to run
131
  @type env: dict
132
  @param env: Additional environment
133
  @type output: str
134
  @param output: if desired, the output of the command can be
135
      saved in a file instead of the RunResult instance; this
136
      parameter denotes the file name (if not None)
137
  @type cwd: string
138
  @param cwd: if specified, will be used as the working
139
      directory for the command; the default will be /
140
  @type reset_env: boolean
141
  @param reset_env: whether to reset or keep the default os environment
142
  @rtype: L{RunResult}
143
  @return: RunResult instance
144
  @raise errors.ProgrammerError: if we call this when forks are disabled
145

146
  """
147
  if no_fork:
148
    raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
149

    
150
  if isinstance(cmd, list):
151
    cmd = [str(val) for val in cmd]
152
    strcmd = " ".join(cmd)
153
    shell = False
154
  else:
155
    strcmd = cmd
156
    shell = True
157
  logging.debug("RunCmd '%s'", strcmd)
158

    
159
  if not reset_env:
160
    cmd_env = os.environ.copy()
161
    cmd_env["LC_ALL"] = "C"
162
  else:
163
    cmd_env = {}
164

    
165
  if env is not None:
166
    cmd_env.update(env)
167

    
168
  try:
169
    if output is None:
170
      out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd)
171
    else:
172
      status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
173
      out = err = ""
174
  except OSError, err:
175
    if err.errno == errno.ENOENT:
176
      raise errors.OpExecError("Can't execute '%s': not found (%s)" %
177
                               (strcmd, err))
178
    else:
179
      raise
180

    
181
  if status >= 0:
182
    exitcode = status
183
    signal_ = None
184
  else:
185
    exitcode = None
186
    signal_ = -status
187

    
188
  return RunResult(exitcode, signal_, out, err, strcmd)
189

    
190

    
191
def _RunCmdPipe(cmd, env, via_shell, cwd):
192
  """Run a command and return its output.
193

194
  @type  cmd: string or list
195
  @param cmd: Command to run
196
  @type env: dict
197
  @param env: The environment to use
198
  @type via_shell: bool
199
  @param via_shell: if we should run via the shell
200
  @type cwd: string
201
  @param cwd: the working directory for the program
202
  @rtype: tuple
203
  @return: (out, err, status)
204

205
  """
206
  poller = select.poll()
207
  child = subprocess.Popen(cmd, shell=via_shell,
208
                           stderr=subprocess.PIPE,
209
                           stdout=subprocess.PIPE,
210
                           stdin=subprocess.PIPE,
211
                           close_fds=True, env=env,
212
                           cwd=cwd)
213

    
214
  child.stdin.close()
215
  poller.register(child.stdout, select.POLLIN)
216
  poller.register(child.stderr, select.POLLIN)
217
  out = StringIO()
218
  err = StringIO()
219
  fdmap = {
220
    child.stdout.fileno(): (out, child.stdout),
221
    child.stderr.fileno(): (err, child.stderr),
222
    }
223
  for fd in fdmap:
224
    status = fcntl.fcntl(fd, fcntl.F_GETFL)
225
    fcntl.fcntl(fd, fcntl.F_SETFL, status | os.O_NONBLOCK)
226

    
227
  while fdmap:
228
    try:
229
      pollresult = poller.poll()
230
    except EnvironmentError, eerr:
231
      if eerr.errno == errno.EINTR:
232
        continue
233
      raise
234
    except select.error, serr:
235
      if serr[0] == errno.EINTR:
236
        continue
237
      raise
238

    
239
    for fd, event in pollresult:
240
      if event & select.POLLIN or event & select.POLLPRI:
241
        data = fdmap[fd][1].read()
242
        # no data from read signifies EOF (the same as POLLHUP)
243
        if not data:
244
          poller.unregister(fd)
245
          del fdmap[fd]
246
          continue
247
        fdmap[fd][0].write(data)
248
      if (event & select.POLLNVAL or event & select.POLLHUP or
249
          event & select.POLLERR):
250
        poller.unregister(fd)
251
        del fdmap[fd]
252

    
253
  out = out.getvalue()
254
  err = err.getvalue()
255

    
256
  status = child.wait()
257
  return out, err, status
258

    
259

    
260
def _RunCmdFile(cmd, env, via_shell, output, cwd):
261
  """Run a command and save its output to a file.
262

263
  @type  cmd: string or list
264
  @param cmd: Command to run
265
  @type env: dict
266
  @param env: The environment to use
267
  @type via_shell: bool
268
  @param via_shell: if we should run via the shell
269
  @type output: str
270
  @param output: the filename in which to save the output
271
  @type cwd: string
272
  @param cwd: the working directory for the program
273
  @rtype: int
274
  @return: the exit status
275

276
  """
277
  fh = open(output, "a")
278
  try:
279
    child = subprocess.Popen(cmd, shell=via_shell,
280
                             stderr=subprocess.STDOUT,
281
                             stdout=fh,
282
                             stdin=subprocess.PIPE,
283
                             close_fds=True, env=env,
284
                             cwd=cwd)
285

    
286
    child.stdin.close()
287
    status = child.wait()
288
  finally:
289
    fh.close()
290
  return status
291

    
292

    
293
def RunParts(dir_name, env=None, reset_env=False):
294
  """Run Scripts or programs in a directory
295

296
  @type dir_name: string
297
  @param dir_name: absolute path to a directory
298
  @type env: dict
299
  @param env: The environment to use
300
  @type reset_env: boolean
301
  @param reset_env: whether to reset or keep the default os environment
302
  @rtype: list of tuples
303
  @return: list of (name, (one of RUNDIR_STATUS), RunResult)
304

305
  """
306
  rr = []
307

    
308
  try:
309
    dir_contents = ListVisibleFiles(dir_name)
310
  except OSError, err:
311
    logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
312
    return rr
313

    
314
  for relname in sorted(dir_contents):
315
    fname = PathJoin(dir_name, relname)
316
    if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
317
            constants.EXT_PLUGIN_MASK.match(relname) is not None):
318
      rr.append((relname, constants.RUNPARTS_SKIP, None))
319
    else:
320
      try:
321
        result = RunCmd([fname], env=env, reset_env=reset_env)
322
      except Exception, err: # pylint: disable-msg=W0703
323
        rr.append((relname, constants.RUNPARTS_ERR, str(err)))
324
      else:
325
        rr.append((relname, constants.RUNPARTS_RUN, result))
326

    
327
  return rr
328

    
329

    
330
def RemoveFile(filename):
331
  """Remove a file ignoring some errors.
332

333
  Remove a file, ignoring non-existing ones or directories. Other
334
  errors are passed.
335

336
  @type filename: str
337
  @param filename: the file to be removed
338

339
  """
340
  try:
341
    os.unlink(filename)
342
  except OSError, err:
343
    if err.errno not in (errno.ENOENT, errno.EISDIR):
344
      raise
345

    
346

    
347
def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
348
  """Renames a file.
349

350
  @type old: string
351
  @param old: Original path
352
  @type new: string
353
  @param new: New path
354
  @type mkdir: bool
355
  @param mkdir: Whether to create target directory if it doesn't exist
356
  @type mkdir_mode: int
357
  @param mkdir_mode: Mode for newly created directories
358

359
  """
360
  try:
361
    return os.rename(old, new)
362
  except OSError, err:
363
    # In at least one use case of this function, the job queue, directory
364
    # creation is very rare. Checking for the directory before renaming is not
365
    # as efficient.
366
    if mkdir and err.errno == errno.ENOENT:
367
      # Create directory and try again
368
      dirname = os.path.dirname(new)
369
      try:
370
        os.makedirs(dirname, mode=mkdir_mode)
371
      except OSError, err:
372
        # Ignore EEXIST. This is only handled in os.makedirs as included in
373
        # Python 2.5 and above.
374
        if err.errno != errno.EEXIST or not os.path.exists(dirname):
375
          raise
376

    
377
      return os.rename(old, new)
378

    
379
    raise
380

    
381

    
382
def ResetTempfileModule():
383
  """Resets the random name generator of the tempfile module.
384

385
  This function should be called after C{os.fork} in the child process to
386
  ensure it creates a newly seeded random generator. Otherwise it would
387
  generate the same random parts as the parent process. If several processes
388
  race for the creation of a temporary file, this could lead to one not getting
389
  a temporary name.
390

391
  """
392
  # pylint: disable-msg=W0212
393
  if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
394
    tempfile._once_lock.acquire()
395
    try:
396
      # Reset random name generator
397
      tempfile._name_sequence = None
398
    finally:
399
      tempfile._once_lock.release()
400
  else:
401
    logging.critical("The tempfile module misses at least one of the"
402
                     " '_once_lock' and '_name_sequence' attributes")
403

    
404

    
405
def _FingerprintFile(filename):
406
  """Compute the fingerprint of a file.
407

408
  If the file does not exist, a None will be returned
409
  instead.
410

411
  @type filename: str
412
  @param filename: the filename to checksum
413
  @rtype: str
414
  @return: the hex digest of the sha checksum of the contents
415
      of the file
416

417
  """
418
  if not (os.path.exists(filename) and os.path.isfile(filename)):
419
    return None
420

    
421
  f = open(filename)
422

    
423
  fp = sha1()
424
  while True:
425
    data = f.read(4096)
426
    if not data:
427
      break
428

    
429
    fp.update(data)
430

    
431
  return fp.hexdigest()
432

    
433

    
434
def FingerprintFiles(files):
435
  """Compute fingerprints for a list of files.
436

437
  @type files: list
438
  @param files: the list of filename to fingerprint
439
  @rtype: dict
440
  @return: a dictionary filename: fingerprint, holding only
441
      existing files
442

443
  """
444
  ret = {}
445

    
446
  for filename in files:
447
    cksum = _FingerprintFile(filename)
448
    if cksum:
449
      ret[filename] = cksum
450

    
451
  return ret
452

    
453

    
454
def ForceDictType(target, key_types, allowed_values=None):
455
  """Force the values of a dict to have certain types.
456

457
  @type target: dict
458
  @param target: the dict to update
459
  @type key_types: dict
460
  @param key_types: dict mapping target dict keys to types
461
                    in constants.ENFORCEABLE_TYPES
462
  @type allowed_values: list
463
  @keyword allowed_values: list of specially allowed values
464

465
  """
466
  if allowed_values is None:
467
    allowed_values = []
468

    
469
  if not isinstance(target, dict):
470
    msg = "Expected dictionary, got '%s'" % target
471
    raise errors.TypeEnforcementError(msg)
472

    
473
  for key in target:
474
    if key not in key_types:
475
      msg = "Unknown key '%s'" % key
476
      raise errors.TypeEnforcementError(msg)
477

    
478
    if target[key] in allowed_values:
479
      continue
480

    
481
    ktype = key_types[key]
482
    if ktype not in constants.ENFORCEABLE_TYPES:
483
      msg = "'%s' has non-enforceable type %s" % (key, ktype)
484
      raise errors.ProgrammerError(msg)
485

    
486
    if ktype == constants.VTYPE_STRING:
487
      if not isinstance(target[key], basestring):
488
        if isinstance(target[key], bool) and not target[key]:
489
          target[key] = ''
490
        else:
491
          msg = "'%s' (value %s) is not a valid string" % (key, target[key])
492
          raise errors.TypeEnforcementError(msg)
493
    elif ktype == constants.VTYPE_BOOL:
494
      if isinstance(target[key], basestring) and target[key]:
495
        if target[key].lower() == constants.VALUE_FALSE:
496
          target[key] = False
497
        elif target[key].lower() == constants.VALUE_TRUE:
498
          target[key] = True
499
        else:
500
          msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
501
          raise errors.TypeEnforcementError(msg)
502
      elif target[key]:
503
        target[key] = True
504
      else:
505
        target[key] = False
506
    elif ktype == constants.VTYPE_SIZE:
507
      try:
508
        target[key] = ParseUnit(target[key])
509
      except errors.UnitParseError, err:
510
        msg = "'%s' (value %s) is not a valid size. error: %s" % \
511
              (key, target[key], err)
512
        raise errors.TypeEnforcementError(msg)
513
    elif ktype == constants.VTYPE_INT:
514
      try:
515
        target[key] = int(target[key])
516
      except (ValueError, TypeError):
517
        msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
518
        raise errors.TypeEnforcementError(msg)
519

    
520

    
521
def IsProcessAlive(pid):
522
  """Check if a given pid exists on the system.
523

524
  @note: zombie status is not handled, so zombie processes
525
      will be returned as alive
526
  @type pid: int
527
  @param pid: the process ID to check
528
  @rtype: boolean
529
  @return: True if the process exists
530

531
  """
532
  if pid <= 0:
533
    return False
534

    
535
  try:
536
    os.stat("/proc/%d/status" % pid)
537
    return True
538
  except EnvironmentError, err:
539
    if err.errno in (errno.ENOENT, errno.ENOTDIR):
540
      return False
541
    raise
542

    
543

    
544
def ReadPidFile(pidfile):
545
  """Read a pid from a file.
546

547
  @type  pidfile: string
548
  @param pidfile: path to the file containing the pid
549
  @rtype: int
550
  @return: The process id, if the file exists and contains a valid PID,
551
           otherwise 0
552

553
  """
554
  try:
555
    raw_data = ReadFile(pidfile)
556
  except EnvironmentError, err:
557
    if err.errno != errno.ENOENT:
558
      logging.exception("Can't read pid file")
559
    return 0
560

    
561
  try:
562
    pid = int(raw_data)
563
  except (TypeError, ValueError), err:
564
    logging.info("Can't parse pid file contents", exc_info=True)
565
    return 0
566

    
567
  return pid
568

    
569

    
570
def MatchNameComponent(key, name_list, case_sensitive=True):
571
  """Try to match a name against a list.
572

573
  This function will try to match a name like test1 against a list
574
  like C{['test1.example.com', 'test2.example.com', ...]}. Against
575
  this list, I{'test1'} as well as I{'test1.example'} will match, but
576
  not I{'test1.ex'}. A multiple match will be considered as no match
577
  at all (e.g. I{'test1'} against C{['test1.example.com',
578
  'test1.example.org']}), except when the key fully matches an entry
579
  (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
580

581
  @type key: str
582
  @param key: the name to be searched
583
  @type name_list: list
584
  @param name_list: the list of strings against which to search the key
585
  @type case_sensitive: boolean
586
  @param case_sensitive: whether to provide a case-sensitive match
587

588
  @rtype: None or str
589
  @return: None if there is no match I{or} if there are multiple matches,
590
      otherwise the element from the list which matches
591

592
  """
593
  if key in name_list:
594
    return key
595

    
596
  re_flags = 0
597
  if not case_sensitive:
598
    re_flags |= re.IGNORECASE
599
    key = key.upper()
600
  mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
601
  names_filtered = []
602
  string_matches = []
603
  for name in name_list:
604
    if mo.match(name) is not None:
605
      names_filtered.append(name)
606
      if not case_sensitive and key == name.upper():
607
        string_matches.append(name)
608

    
609
  if len(string_matches) == 1:
610
    return string_matches[0]
611
  if len(names_filtered) == 1:
612
    return names_filtered[0]
613
  return None
614

    
615

    
616
class HostInfo:
617
  """Class implementing resolver and hostname functionality
618

619
  """
620
  _VALID_NAME_RE = re.compile("^[a-z0-9._-]{1,255}$")
621

    
622
  def __init__(self, name=None):
623
    """Initialize the host name object.
624

625
    If the name argument is not passed, it will use this system's
626
    name.
627

628
    """
629
    if name is None:
630
      name = self.SysName()
631

    
632
    self.query = name
633
    self.name, self.aliases, self.ipaddrs = self.LookupHostname(name)
634
    self.ip = self.ipaddrs[0]
635

    
636
  def ShortName(self):
637
    """Returns the hostname without domain.
638

639
    """
640
    return self.name.split('.')[0]
641

    
642
  @staticmethod
643
  def SysName():
644
    """Return the current system's name.
645

646
    This is simply a wrapper over C{socket.gethostname()}.
647

648
    """
649
    return socket.gethostname()
650

    
651
  @staticmethod
652
  def LookupHostname(hostname):
653
    """Look up hostname
654

655
    @type hostname: str
656
    @param hostname: hostname to look up
657

658
    @rtype: tuple
659
    @return: a tuple (name, aliases, ipaddrs) as returned by
660
        C{socket.gethostbyname_ex}
661
    @raise errors.ResolverError: in case of errors in resolving
662

663
    """
664
    try:
665
      result = socket.gethostbyname_ex(hostname)
666
    except socket.gaierror, err:
667
      # hostname not found in DNS
668
      raise errors.ResolverError(hostname, err.args[0], err.args[1])
669

    
670
    return result
671

    
672
  @classmethod
673
  def NormalizeName(cls, hostname):
674
    """Validate and normalize the given hostname.
675

676
    @attention: the validation is a bit more relaxed than the standards
677
        require; most importantly, we allow underscores in names
678
    @raise errors.OpPrereqError: when the name is not valid
679

680
    """
681
    hostname = hostname.lower()
682
    if (not cls._VALID_NAME_RE.match(hostname) or
683
        # double-dots, meaning empty label
684
        ".." in hostname or
685
        # empty initial label
686
        hostname.startswith(".")):
687
      raise errors.OpPrereqError("Invalid hostname '%s'" % hostname,
688
                                 errors.ECODE_INVAL)
689
    if hostname.endswith("."):
690
      hostname = hostname.rstrip(".")
691
    return hostname
692

    
693

    
694
def GetHostInfo(name=None):
695
  """Lookup host name and raise an OpPrereqError for failures"""
696

    
697
  try:
698
    return HostInfo(name)
699
  except errors.ResolverError, err:
700
    raise errors.OpPrereqError("The given name (%s) does not resolve: %s" %
701
                               (err[0], err[2]), errors.ECODE_RESOLVER)
702

    
703

    
704
def ListVolumeGroups():
705
  """List volume groups and their size
706

707
  @rtype: dict
708
  @return:
709
       Dictionary with keys volume name and values
710
       the size of the volume
711

712
  """
713
  command = "vgs --noheadings --units m --nosuffix -o name,size"
714
  result = RunCmd(command)
715
  retval = {}
716
  if result.failed:
717
    return retval
718

    
719
  for line in result.stdout.splitlines():
720
    try:
721
      name, size = line.split()
722
      size = int(float(size))
723
    except (IndexError, ValueError), err:
724
      logging.error("Invalid output from vgs (%s): %s", err, line)
725
      continue
726

    
727
    retval[name] = size
728

    
729
  return retval
730

    
731

    
732
def BridgeExists(bridge):
733
  """Check whether the given bridge exists in the system
734

735
  @type bridge: str
736
  @param bridge: the bridge name to check
737
  @rtype: boolean
738
  @return: True if it does
739

740
  """
741
  return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
742

    
743

    
744
def NiceSort(name_list):
745
  """Sort a list of strings based on digit and non-digit groupings.
746

747
  Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
748
  will sort the list in the logical order C{['a1', 'a2', 'a10',
749
  'a11']}.
750

751
  The sort algorithm breaks each name in groups of either only-digits
752
  or no-digits. Only the first eight such groups are considered, and
753
  after that we just use what's left of the string.
754

755
  @type name_list: list
756
  @param name_list: the names to be sorted
757
  @rtype: list
758
  @return: a copy of the name list sorted with our algorithm
759

760
  """
761
  _SORTER_BASE = "(\D+|\d+)"
762
  _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
763
                                                  _SORTER_BASE, _SORTER_BASE,
764
                                                  _SORTER_BASE, _SORTER_BASE,
765
                                                  _SORTER_BASE, _SORTER_BASE)
766
  _SORTER_RE = re.compile(_SORTER_FULL)
767
  _SORTER_NODIGIT = re.compile("^\D*$")
768
  def _TryInt(val):
769
    """Attempts to convert a variable to integer."""
770
    if val is None or _SORTER_NODIGIT.match(val):
771
      return val
772
    rval = int(val)
773
    return rval
774

    
775
  to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
776
             for name in name_list]
777
  to_sort.sort()
778
  return [tup[1] for tup in to_sort]
779

    
780

    
781
def TryConvert(fn, val):
782
  """Try to convert a value ignoring errors.
783

784
  This function tries to apply function I{fn} to I{val}. If no
785
  C{ValueError} or C{TypeError} exceptions are raised, it will return
786
  the result, else it will return the original value. Any other
787
  exceptions are propagated to the caller.
788

789
  @type fn: callable
790
  @param fn: function to apply to the value
791
  @param val: the value to be converted
792
  @return: The converted value if the conversion was successful,
793
      otherwise the original value.
794

795
  """
796
  try:
797
    nv = fn(val)
798
  except (ValueError, TypeError):
799
    nv = val
800
  return nv
801

    
802

    
803
def IsValidIP(ip):
804
  """Verifies the syntax of an IPv4 address.
805

806
  This function checks if the IPv4 address passes is valid or not based
807
  on syntax (not IP range, class calculations, etc.).
808

809
  @type ip: str
810
  @param ip: the address to be checked
811
  @rtype: a regular expression match object
812
  @return: a regular expression match object, or None if the
813
      address is not valid
814

815
  """
816
  unit = "(0|[1-9]\d{0,2})"
817
  #TODO: convert and return only boolean
818
  return re.match("^%s\.%s\.%s\.%s$" % (unit, unit, unit, unit), ip)
819

    
820

    
821
def IsValidShellParam(word):
822
  """Verifies is the given word is safe from the shell's p.o.v.
823

824
  This means that we can pass this to a command via the shell and be
825
  sure that it doesn't alter the command line and is passed as such to
826
  the actual command.
827

828
  Note that we are overly restrictive here, in order to be on the safe
829
  side.
830

831
  @type word: str
832
  @param word: the word to check
833
  @rtype: boolean
834
  @return: True if the word is 'safe'
835

836
  """
837
  return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
838

    
839

    
840
def BuildShellCmd(template, *args):
841
  """Build a safe shell command line from the given arguments.
842

843
  This function will check all arguments in the args list so that they
844
  are valid shell parameters (i.e. they don't contain shell
845
  metacharacters). If everything is ok, it will return the result of
846
  template % args.
847

848
  @type template: str
849
  @param template: the string holding the template for the
850
      string formatting
851
  @rtype: str
852
  @return: the expanded command line
853

854
  """
855
  for word in args:
856
    if not IsValidShellParam(word):
857
      raise errors.ProgrammerError("Shell argument '%s' contains"
858
                                   " invalid characters" % word)
859
  return template % args
860

    
861

    
862
def FormatUnit(value, units):
863
  """Formats an incoming number of MiB with the appropriate unit.
864

865
  @type value: int
866
  @param value: integer representing the value in MiB (1048576)
867
  @type units: char
868
  @param units: the type of formatting we should do:
869
      - 'h' for automatic scaling
870
      - 'm' for MiBs
871
      - 'g' for GiBs
872
      - 't' for TiBs
873
  @rtype: str
874
  @return: the formatted value (with suffix)
875

876
  """
877
  if units not in ('m', 'g', 't', 'h'):
878
    raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
879

    
880
  suffix = ''
881

    
882
  if units == 'm' or (units == 'h' and value < 1024):
883
    if units == 'h':
884
      suffix = 'M'
885
    return "%d%s" % (round(value, 0), suffix)
886

    
887
  elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
888
    if units == 'h':
889
      suffix = 'G'
890
    return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
891

    
892
  else:
893
    if units == 'h':
894
      suffix = 'T'
895
    return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
896

    
897

    
898
def ParseUnit(input_string):
899
  """Tries to extract number and scale from the given string.
900

901
  Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
902
  [UNIT]}. If no unit is specified, it defaults to MiB. Return value
903
  is always an int in MiB.
904

905
  """
906
  m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
907
  if not m:
908
    raise errors.UnitParseError("Invalid format")
909

    
910
  value = float(m.groups()[0])
911

    
912
  unit = m.groups()[1]
913
  if unit:
914
    lcunit = unit.lower()
915
  else:
916
    lcunit = 'm'
917

    
918
  if lcunit in ('m', 'mb', 'mib'):
919
    # Value already in MiB
920
    pass
921

    
922
  elif lcunit in ('g', 'gb', 'gib'):
923
    value *= 1024
924

    
925
  elif lcunit in ('t', 'tb', 'tib'):
926
    value *= 1024 * 1024
927

    
928
  else:
929
    raise errors.UnitParseError("Unknown unit: %s" % unit)
930

    
931
  # Make sure we round up
932
  if int(value) < value:
933
    value += 1
934

    
935
  # Round up to the next multiple of 4
936
  value = int(value)
937
  if value % 4:
938
    value += 4 - value % 4
939

    
940
  return value
941

    
942

    
943
def AddAuthorizedKey(file_name, key):
944
  """Adds an SSH public key to an authorized_keys file.
945

946
  @type file_name: str
947
  @param file_name: path to authorized_keys file
948
  @type key: str
949
  @param key: string containing key
950

951
  """
952
  key_fields = key.split()
953

    
954
  f = open(file_name, 'a+')
955
  try:
956
    nl = True
957
    for line in f:
958
      # Ignore whitespace changes
959
      if line.split() == key_fields:
960
        break
961
      nl = line.endswith('\n')
962
    else:
963
      if not nl:
964
        f.write("\n")
965
      f.write(key.rstrip('\r\n'))
966
      f.write("\n")
967
      f.flush()
968
  finally:
969
    f.close()
970

    
971

    
972
def RemoveAuthorizedKey(file_name, key):
973
  """Removes an SSH public key from an authorized_keys file.
974

975
  @type file_name: str
976
  @param file_name: path to authorized_keys file
977
  @type key: str
978
  @param key: string containing key
979

980
  """
981
  key_fields = key.split()
982

    
983
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
984
  try:
985
    out = os.fdopen(fd, 'w')
986
    try:
987
      f = open(file_name, 'r')
988
      try:
989
        for line in f:
990
          # Ignore whitespace changes while comparing lines
991
          if line.split() != key_fields:
992
            out.write(line)
993

    
994
        out.flush()
995
        os.rename(tmpname, file_name)
996
      finally:
997
        f.close()
998
    finally:
999
      out.close()
1000
  except:
1001
    RemoveFile(tmpname)
1002
    raise
1003

    
1004

    
1005
def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1006
  """Sets the name of an IP address and hostname in /etc/hosts.
1007

1008
  @type file_name: str
1009
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1010
  @type ip: str
1011
  @param ip: the IP address
1012
  @type hostname: str
1013
  @param hostname: the hostname to be added
1014
  @type aliases: list
1015
  @param aliases: the list of aliases to add for the hostname
1016

1017
  """
1018
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1019
  # Ensure aliases are unique
1020
  aliases = UniqueSequence([hostname] + aliases)[1:]
1021

    
1022
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1023
  try:
1024
    out = os.fdopen(fd, 'w')
1025
    try:
1026
      f = open(file_name, 'r')
1027
      try:
1028
        for line in f:
1029
          fields = line.split()
1030
          if fields and not fields[0].startswith('#') and ip == fields[0]:
1031
            continue
1032
          out.write(line)
1033

    
1034
        out.write("%s\t%s" % (ip, hostname))
1035
        if aliases:
1036
          out.write(" %s" % ' '.join(aliases))
1037
        out.write('\n')
1038

    
1039
        out.flush()
1040
        os.fsync(out)
1041
        os.chmod(tmpname, 0644)
1042
        os.rename(tmpname, file_name)
1043
      finally:
1044
        f.close()
1045
    finally:
1046
      out.close()
1047
  except:
1048
    RemoveFile(tmpname)
1049
    raise
1050

    
1051

    
1052
def AddHostToEtcHosts(hostname):
1053
  """Wrapper around SetEtcHostsEntry.
1054

1055
  @type hostname: str
1056
  @param hostname: a hostname that will be resolved and added to
1057
      L{constants.ETC_HOSTS}
1058

1059
  """
1060
  hi = HostInfo(name=hostname)
1061
  SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
1062

    
1063

    
1064
def RemoveEtcHostsEntry(file_name, hostname):
1065
  """Removes a hostname from /etc/hosts.
1066

1067
  IP addresses without names are removed from the file.
1068

1069
  @type file_name: str
1070
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1071
  @type hostname: str
1072
  @param hostname: the hostname to be removed
1073

1074
  """
1075
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1076
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1077
  try:
1078
    out = os.fdopen(fd, 'w')
1079
    try:
1080
      f = open(file_name, 'r')
1081
      try:
1082
        for line in f:
1083
          fields = line.split()
1084
          if len(fields) > 1 and not fields[0].startswith('#'):
1085
            names = fields[1:]
1086
            if hostname in names:
1087
              while hostname in names:
1088
                names.remove(hostname)
1089
              if names:
1090
                out.write("%s %s\n" % (fields[0], ' '.join(names)))
1091
              continue
1092

    
1093
          out.write(line)
1094

    
1095
        out.flush()
1096
        os.fsync(out)
1097
        os.chmod(tmpname, 0644)
1098
        os.rename(tmpname, file_name)
1099
      finally:
1100
        f.close()
1101
    finally:
1102
      out.close()
1103
  except:
1104
    RemoveFile(tmpname)
1105
    raise
1106

    
1107

    
1108
def RemoveHostFromEtcHosts(hostname):
1109
  """Wrapper around RemoveEtcHostsEntry.
1110

1111
  @type hostname: str
1112
  @param hostname: hostname that will be resolved and its
1113
      full and shot name will be removed from
1114
      L{constants.ETC_HOSTS}
1115

1116
  """
1117
  hi = HostInfo(name=hostname)
1118
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
1119
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
1120

    
1121

    
1122
def TimestampForFilename():
1123
  """Returns the current time formatted for filenames.
1124

1125
  The format doesn't contain colons as some shells and applications them as
1126
  separators.
1127

1128
  """
1129
  return time.strftime("%Y-%m-%d_%H_%M_%S")
1130

    
1131

    
1132
def CreateBackup(file_name):
1133
  """Creates a backup of a file.
1134

1135
  @type file_name: str
1136
  @param file_name: file to be backed up
1137
  @rtype: str
1138
  @return: the path to the newly created backup
1139
  @raise errors.ProgrammerError: for invalid file names
1140

1141
  """
1142
  if not os.path.isfile(file_name):
1143
    raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1144
                                file_name)
1145

    
1146
  prefix = ("%s.backup-%s." %
1147
            (os.path.basename(file_name), TimestampForFilename()))
1148
  dir_name = os.path.dirname(file_name)
1149

    
1150
  fsrc = open(file_name, 'rb')
1151
  try:
1152
    (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1153
    fdst = os.fdopen(fd, 'wb')
1154
    try:
1155
      logging.debug("Backing up %s at %s", file_name, backup_name)
1156
      shutil.copyfileobj(fsrc, fdst)
1157
    finally:
1158
      fdst.close()
1159
  finally:
1160
    fsrc.close()
1161

    
1162
  return backup_name
1163

    
1164

    
1165
def ShellQuote(value):
1166
  """Quotes shell argument according to POSIX.
1167

1168
  @type value: str
1169
  @param value: the argument to be quoted
1170
  @rtype: str
1171
  @return: the quoted value
1172

1173
  """
1174
  if _re_shell_unquoted.match(value):
1175
    return value
1176
  else:
1177
    return "'%s'" % value.replace("'", "'\\''")
1178

    
1179

    
1180
def ShellQuoteArgs(args):
1181
  """Quotes a list of shell arguments.
1182

1183
  @type args: list
1184
  @param args: list of arguments to be quoted
1185
  @rtype: str
1186
  @return: the quoted arguments concatenated with spaces
1187

1188
  """
1189
  return ' '.join([ShellQuote(i) for i in args])
1190

    
1191

    
1192
def TcpPing(target, port, timeout=10, live_port_needed=False, source=None):
1193
  """Simple ping implementation using TCP connect(2).
1194

1195
  Check if the given IP is reachable by doing attempting a TCP connect
1196
  to it.
1197

1198
  @type target: str
1199
  @param target: the IP or hostname to ping
1200
  @type port: int
1201
  @param port: the port to connect to
1202
  @type timeout: int
1203
  @param timeout: the timeout on the connection attempt
1204
  @type live_port_needed: boolean
1205
  @param live_port_needed: whether a closed port will cause the
1206
      function to return failure, as if there was a timeout
1207
  @type source: str or None
1208
  @param source: if specified, will cause the connect to be made
1209
      from this specific source address; failures to bind other
1210
      than C{EADDRNOTAVAIL} will be ignored
1211

1212
  """
1213
  sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
1214

    
1215
  success = False
1216

    
1217
  if source is not None:
1218
    try:
1219
      sock.bind((source, 0))
1220
    except socket.error, (errcode, _):
1221
      if errcode == errno.EADDRNOTAVAIL:
1222
        success = False
1223

    
1224
  sock.settimeout(timeout)
1225

    
1226
  try:
1227
    sock.connect((target, port))
1228
    sock.close()
1229
    success = True
1230
  except socket.timeout:
1231
    success = False
1232
  except socket.error, (errcode, _):
1233
    success = (not live_port_needed) and (errcode == errno.ECONNREFUSED)
1234

    
1235
  return success
1236

    
1237

    
1238
def OwnIpAddress(address):
1239
  """Check if the current host has the the given IP address.
1240

1241
  Currently this is done by TCP-pinging the address from the loopback
1242
  address.
1243

1244
  @type address: string
1245
  @param address: the address to check
1246
  @rtype: bool
1247
  @return: True if we own the address
1248

1249
  """
1250
  return TcpPing(address, constants.DEFAULT_NODED_PORT,
1251
                 source=constants.LOCALHOST_IP_ADDRESS)
1252

    
1253

    
1254
def ListVisibleFiles(path):
1255
  """Returns a list of visible files in a directory.
1256

1257
  @type path: str
1258
  @param path: the directory to enumerate
1259
  @rtype: list
1260
  @return: the list of all files not starting with a dot
1261
  @raise ProgrammerError: if L{path} is not an absolue and normalized path
1262

1263
  """
1264
  if not IsNormAbsPath(path):
1265
    raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1266
                                 " absolute/normalized: '%s'" % path)
1267
  files = [i for i in os.listdir(path) if not i.startswith(".")]
1268
  files.sort()
1269
  return files
1270

    
1271

    
1272
def GetHomeDir(user, default=None):
1273
  """Try to get the homedir of the given user.
1274

1275
  The user can be passed either as a string (denoting the name) or as
1276
  an integer (denoting the user id). If the user is not found, the
1277
  'default' argument is returned, which defaults to None.
1278

1279
  """
1280
  try:
1281
    if isinstance(user, basestring):
1282
      result = pwd.getpwnam(user)
1283
    elif isinstance(user, (int, long)):
1284
      result = pwd.getpwuid(user)
1285
    else:
1286
      raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1287
                                   type(user))
1288
  except KeyError:
1289
    return default
1290
  return result.pw_dir
1291

    
1292

    
1293
def NewUUID():
1294
  """Returns a random UUID.
1295

1296
  @note: This is a Linux-specific method as it uses the /proc
1297
      filesystem.
1298
  @rtype: str
1299

1300
  """
1301
  return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1302

    
1303

    
1304
def GenerateSecret(numbytes=20):
1305
  """Generates a random secret.
1306

1307
  This will generate a pseudo-random secret returning an hex string
1308
  (so that it can be used where an ASCII string is needed).
1309

1310
  @param numbytes: the number of bytes which will be represented by the returned
1311
      string (defaulting to 20, the length of a SHA1 hash)
1312
  @rtype: str
1313
  @return: an hex representation of the pseudo-random sequence
1314

1315
  """
1316
  return os.urandom(numbytes).encode('hex')
1317

    
1318

    
1319
def EnsureDirs(dirs):
1320
  """Make required directories, if they don't exist.
1321

1322
  @param dirs: list of tuples (dir_name, dir_mode)
1323
  @type dirs: list of (string, integer)
1324

1325
  """
1326
  for dir_name, dir_mode in dirs:
1327
    try:
1328
      os.mkdir(dir_name, dir_mode)
1329
    except EnvironmentError, err:
1330
      if err.errno != errno.EEXIST:
1331
        raise errors.GenericError("Cannot create needed directory"
1332
                                  " '%s': %s" % (dir_name, err))
1333
    if not os.path.isdir(dir_name):
1334
      raise errors.GenericError("%s is not a directory" % dir_name)
1335

    
1336

    
1337
def ReadFile(file_name, size=-1):
1338
  """Reads a file.
1339

1340
  @type size: int
1341
  @param size: Read at most size bytes (if negative, entire file)
1342
  @rtype: str
1343
  @return: the (possibly partial) content of the file
1344

1345
  """
1346
  f = open(file_name, "r")
1347
  try:
1348
    return f.read(size)
1349
  finally:
1350
    f.close()
1351

    
1352

    
1353
def WriteFile(file_name, fn=None, data=None,
1354
              mode=None, uid=-1, gid=-1,
1355
              atime=None, mtime=None, close=True,
1356
              dry_run=False, backup=False,
1357
              prewrite=None, postwrite=None):
1358
  """(Over)write a file atomically.
1359

1360
  The file_name and either fn (a function taking one argument, the
1361
  file descriptor, and which should write the data to it) or data (the
1362
  contents of the file) must be passed. The other arguments are
1363
  optional and allow setting the file mode, owner and group, and the
1364
  mtime/atime of the file.
1365

1366
  If the function doesn't raise an exception, it has succeeded and the
1367
  target file has the new contents. If the function has raised an
1368
  exception, an existing target file should be unmodified and the
1369
  temporary file should be removed.
1370

1371
  @type file_name: str
1372
  @param file_name: the target filename
1373
  @type fn: callable
1374
  @param fn: content writing function, called with
1375
      file descriptor as parameter
1376
  @type data: str
1377
  @param data: contents of the file
1378
  @type mode: int
1379
  @param mode: file mode
1380
  @type uid: int
1381
  @param uid: the owner of the file
1382
  @type gid: int
1383
  @param gid: the group of the file
1384
  @type atime: int
1385
  @param atime: a custom access time to be set on the file
1386
  @type mtime: int
1387
  @param mtime: a custom modification time to be set on the file
1388
  @type close: boolean
1389
  @param close: whether to close file after writing it
1390
  @type prewrite: callable
1391
  @param prewrite: function to be called before writing content
1392
  @type postwrite: callable
1393
  @param postwrite: function to be called after writing content
1394

1395
  @rtype: None or int
1396
  @return: None if the 'close' parameter evaluates to True,
1397
      otherwise the file descriptor
1398

1399
  @raise errors.ProgrammerError: if any of the arguments are not valid
1400

1401
  """
1402
  if not os.path.isabs(file_name):
1403
    raise errors.ProgrammerError("Path passed to WriteFile is not"
1404
                                 " absolute: '%s'" % file_name)
1405

    
1406
  if [fn, data].count(None) != 1:
1407
    raise errors.ProgrammerError("fn or data required")
1408

    
1409
  if [atime, mtime].count(None) == 1:
1410
    raise errors.ProgrammerError("Both atime and mtime must be either"
1411
                                 " set or None")
1412

    
1413
  if backup and not dry_run and os.path.isfile(file_name):
1414
    CreateBackup(file_name)
1415

    
1416
  dir_name, base_name = os.path.split(file_name)
1417
  fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1418
  do_remove = True
1419
  # here we need to make sure we remove the temp file, if any error
1420
  # leaves it in place
1421
  try:
1422
    if uid != -1 or gid != -1:
1423
      os.chown(new_name, uid, gid)
1424
    if mode:
1425
      os.chmod(new_name, mode)
1426
    if callable(prewrite):
1427
      prewrite(fd)
1428
    if data is not None:
1429
      os.write(fd, data)
1430
    else:
1431
      fn(fd)
1432
    if callable(postwrite):
1433
      postwrite(fd)
1434
    os.fsync(fd)
1435
    if atime is not None and mtime is not None:
1436
      os.utime(new_name, (atime, mtime))
1437
    if not dry_run:
1438
      os.rename(new_name, file_name)
1439
      do_remove = False
1440
  finally:
1441
    if close:
1442
      os.close(fd)
1443
      result = None
1444
    else:
1445
      result = fd
1446
    if do_remove:
1447
      RemoveFile(new_name)
1448

    
1449
  return result
1450

    
1451

    
1452
def FirstFree(seq, base=0):
1453
  """Returns the first non-existing integer from seq.
1454

1455
  The seq argument should be a sorted list of positive integers. The
1456
  first time the index of an element is smaller than the element
1457
  value, the index will be returned.
1458

1459
  The base argument is used to start at a different offset,
1460
  i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
1461

1462
  Example: C{[0, 1, 3]} will return I{2}.
1463

1464
  @type seq: sequence
1465
  @param seq: the sequence to be analyzed.
1466
  @type base: int
1467
  @param base: use this value as the base index of the sequence
1468
  @rtype: int
1469
  @return: the first non-used index in the sequence
1470

1471
  """
1472
  for idx, elem in enumerate(seq):
1473
    assert elem >= base, "Passed element is higher than base offset"
1474
    if elem > idx + base:
1475
      # idx is not used
1476
      return idx + base
1477
  return None
1478

    
1479

    
1480
try:
1481
  all = all # pylint: disable-msg=W0622
1482
except NameError:
1483
  def all(seq, pred=bool): # pylint: disable-msg=W0622
1484
    "Returns True if pred(x) is True for every element in the iterable"
1485
    for _ in itertools.ifilterfalse(pred, seq):
1486
      return False
1487
    return True
1488

    
1489

    
1490
try:
1491
  any = any # pylint: disable-msg=W0622
1492
except NameError:
1493
  def any(seq, pred=bool): # pylint: disable-msg=W0622
1494
    "Returns True if pred(x) is True for at least one element in the iterable"
1495
    for _ in itertools.ifilter(pred, seq):
1496
      return True
1497
    return False
1498

    
1499

    
1500
def partition(seq, pred=bool): # # pylint: disable-msg=W0622
1501
  "Partition a list in two, based on the given predicate"
1502
  return (list(itertools.ifilter(pred, seq)),
1503
          list(itertools.ifilterfalse(pred, seq)))
1504

    
1505

    
1506
def UniqueSequence(seq):
1507
  """Returns a list with unique elements.
1508

1509
  Element order is preserved.
1510

1511
  @type seq: sequence
1512
  @param seq: the sequence with the source elements
1513
  @rtype: list
1514
  @return: list of unique elements from seq
1515

1516
  """
1517
  seen = set()
1518
  return [i for i in seq if i not in seen and not seen.add(i)]
1519

    
1520

    
1521
def NormalizeAndValidateMac(mac):
1522
  """Normalizes and check if a MAC address is valid.
1523

1524
  Checks whether the supplied MAC address is formally correct, only
1525
  accepts colon separated format. Normalize it to all lower.
1526

1527
  @type mac: str
1528
  @param mac: the MAC to be validated
1529
  @rtype: str
1530
  @return: returns the normalized and validated MAC.
1531

1532
  @raise errors.OpPrereqError: If the MAC isn't valid
1533

1534
  """
1535
  mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$", re.I)
1536
  if not mac_check.match(mac):
1537
    raise errors.OpPrereqError("Invalid MAC address specified: %s" %
1538
                               mac, errors.ECODE_INVAL)
1539

    
1540
  return mac.lower()
1541

    
1542

    
1543
def TestDelay(duration):
1544
  """Sleep for a fixed amount of time.
1545

1546
  @type duration: float
1547
  @param duration: the sleep duration
1548
  @rtype: boolean
1549
  @return: False for negative value, True otherwise
1550

1551
  """
1552
  if duration < 0:
1553
    return False, "Invalid sleep duration"
1554
  time.sleep(duration)
1555
  return True, None
1556

    
1557

    
1558
def _CloseFDNoErr(fd, retries=5):
1559
  """Close a file descriptor ignoring errors.
1560

1561
  @type fd: int
1562
  @param fd: the file descriptor
1563
  @type retries: int
1564
  @param retries: how many retries to make, in case we get any
1565
      other error than EBADF
1566

1567
  """
1568
  try:
1569
    os.close(fd)
1570
  except OSError, err:
1571
    if err.errno != errno.EBADF:
1572
      if retries > 0:
1573
        _CloseFDNoErr(fd, retries - 1)
1574
    # else either it's closed already or we're out of retries, so we
1575
    # ignore this and go on
1576

    
1577

    
1578
def CloseFDs(noclose_fds=None):
1579
  """Close file descriptors.
1580

1581
  This closes all file descriptors above 2 (i.e. except
1582
  stdin/out/err).
1583

1584
  @type noclose_fds: list or None
1585
  @param noclose_fds: if given, it denotes a list of file descriptor
1586
      that should not be closed
1587

1588
  """
1589
  # Default maximum for the number of available file descriptors.
1590
  if 'SC_OPEN_MAX' in os.sysconf_names:
1591
    try:
1592
      MAXFD = os.sysconf('SC_OPEN_MAX')
1593
      if MAXFD < 0:
1594
        MAXFD = 1024
1595
    except OSError:
1596
      MAXFD = 1024
1597
  else:
1598
    MAXFD = 1024
1599
  maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
1600
  if (maxfd == resource.RLIM_INFINITY):
1601
    maxfd = MAXFD
1602

    
1603
  # Iterate through and close all file descriptors (except the standard ones)
1604
  for fd in range(3, maxfd):
1605
    if noclose_fds and fd in noclose_fds:
1606
      continue
1607
    _CloseFDNoErr(fd)
1608

    
1609

    
1610
def Daemonize(logfile):
1611
  """Daemonize the current process.
1612

1613
  This detaches the current process from the controlling terminal and
1614
  runs it in the background as a daemon.
1615

1616
  @type logfile: str
1617
  @param logfile: the logfile to which we should redirect stdout/stderr
1618
  @rtype: int
1619
  @return: the value zero
1620

1621
  """
1622
  # pylint: disable-msg=W0212
1623
  # yes, we really want os._exit
1624
  UMASK = 077
1625
  WORKDIR = "/"
1626

    
1627
  # this might fail
1628
  pid = os.fork()
1629
  if (pid == 0):  # The first child.
1630
    os.setsid()
1631
    # this might fail
1632
    pid = os.fork() # Fork a second child.
1633
    if (pid == 0):  # The second child.
1634
      os.chdir(WORKDIR)
1635
      os.umask(UMASK)
1636
    else:
1637
      # exit() or _exit()?  See below.
1638
      os._exit(0) # Exit parent (the first child) of the second child.
1639
  else:
1640
    os._exit(0) # Exit parent of the first child.
1641

    
1642
  for fd in range(3):
1643
    _CloseFDNoErr(fd)
1644
  i = os.open("/dev/null", os.O_RDONLY) # stdin
1645
  assert i == 0, "Can't close/reopen stdin"
1646
  i = os.open(logfile, os.O_WRONLY|os.O_CREAT|os.O_APPEND, 0600) # stdout
1647
  assert i == 1, "Can't close/reopen stdout"
1648
  # Duplicate standard output to standard error.
1649
  os.dup2(1, 2)
1650
  return 0
1651

    
1652

    
1653
def DaemonPidFileName(name):
1654
  """Compute a ganeti pid file absolute path
1655

1656
  @type name: str
1657
  @param name: the daemon name
1658
  @rtype: str
1659
  @return: the full path to the pidfile corresponding to the given
1660
      daemon name
1661

1662
  """
1663
  return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
1664

    
1665

    
1666
def EnsureDaemon(name):
1667
  """Check for and start daemon if not alive.
1668

1669
  """
1670
  result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
1671
  if result.failed:
1672
    logging.error("Can't start daemon '%s', failure %s, output: %s",
1673
                  name, result.fail_reason, result.output)
1674
    return False
1675

    
1676
  return True
1677

    
1678

    
1679
def WritePidFile(name):
1680
  """Write the current process pidfile.
1681

1682
  The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid}
1683

1684
  @type name: str
1685
  @param name: the daemon name to use
1686
  @raise errors.GenericError: if the pid file already exists and
1687
      points to a live process
1688

1689
  """
1690
  pid = os.getpid()
1691
  pidfilename = DaemonPidFileName(name)
1692
  if IsProcessAlive(ReadPidFile(pidfilename)):
1693
    raise errors.GenericError("%s contains a live process" % pidfilename)
1694

    
1695
  WriteFile(pidfilename, data="%d\n" % pid)
1696

    
1697

    
1698
def RemovePidFile(name):
1699
  """Remove the current process pidfile.
1700

1701
  Any errors are ignored.
1702

1703
  @type name: str
1704
  @param name: the daemon name used to derive the pidfile name
1705

1706
  """
1707
  pidfilename = DaemonPidFileName(name)
1708
  # TODO: we could check here that the file contains our pid
1709
  try:
1710
    RemoveFile(pidfilename)
1711
  except: # pylint: disable-msg=W0702
1712
    pass
1713

    
1714

    
1715
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
1716
                waitpid=False):
1717
  """Kill a process given by its pid.
1718

1719
  @type pid: int
1720
  @param pid: The PID to terminate.
1721
  @type signal_: int
1722
  @param signal_: The signal to send, by default SIGTERM
1723
  @type timeout: int
1724
  @param timeout: The timeout after which, if the process is still alive,
1725
                  a SIGKILL will be sent. If not positive, no such checking
1726
                  will be done
1727
  @type waitpid: boolean
1728
  @param waitpid: If true, we should waitpid on this process after
1729
      sending signals, since it's our own child and otherwise it
1730
      would remain as zombie
1731

1732
  """
1733
  def _helper(pid, signal_, wait):
1734
    """Simple helper to encapsulate the kill/waitpid sequence"""
1735
    os.kill(pid, signal_)
1736
    if wait:
1737
      try:
1738
        os.waitpid(pid, os.WNOHANG)
1739
      except OSError:
1740
        pass
1741

    
1742
  if pid <= 0:
1743
    # kill with pid=0 == suicide
1744
    raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
1745

    
1746
  if not IsProcessAlive(pid):
1747
    return
1748

    
1749
  _helper(pid, signal_, waitpid)
1750

    
1751
  if timeout <= 0:
1752
    return
1753

    
1754
  def _CheckProcess():
1755
    if not IsProcessAlive(pid):
1756
      return
1757

    
1758
    try:
1759
      (result_pid, _) = os.waitpid(pid, os.WNOHANG)
1760
    except OSError:
1761
      raise RetryAgain()
1762

    
1763
    if result_pid > 0:
1764
      return
1765

    
1766
    raise RetryAgain()
1767

    
1768
  try:
1769
    # Wait up to $timeout seconds
1770
    Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
1771
  except RetryTimeout:
1772
    pass
1773

    
1774
  if IsProcessAlive(pid):
1775
    # Kill process if it's still alive
1776
    _helper(pid, signal.SIGKILL, waitpid)
1777

    
1778

    
1779
def FindFile(name, search_path, test=os.path.exists):
1780
  """Look for a filesystem object in a given path.
1781

1782
  This is an abstract method to search for filesystem object (files,
1783
  dirs) under a given search path.
1784

1785
  @type name: str
1786
  @param name: the name to look for
1787
  @type search_path: str
1788
  @param search_path: location to start at
1789
  @type test: callable
1790
  @param test: a function taking one argument that should return True
1791
      if the a given object is valid; the default value is
1792
      os.path.exists, causing only existing files to be returned
1793
  @rtype: str or None
1794
  @return: full path to the object if found, None otherwise
1795

1796
  """
1797
  # validate the filename mask
1798
  if constants.EXT_PLUGIN_MASK.match(name) is None:
1799
    logging.critical("Invalid value passed for external script name: '%s'",
1800
                     name)
1801
    return None
1802

    
1803
  for dir_name in search_path:
1804
    # FIXME: investigate switch to PathJoin
1805
    item_name = os.path.sep.join([dir_name, name])
1806
    # check the user test and that we're indeed resolving to the given
1807
    # basename
1808
    if test(item_name) and os.path.basename(item_name) == name:
1809
      return item_name
1810
  return None
1811

    
1812

    
1813
def CheckVolumeGroupSize(vglist, vgname, minsize):
1814
  """Checks if the volume group list is valid.
1815

1816
  The function will check if a given volume group is in the list of
1817
  volume groups and has a minimum size.
1818

1819
  @type vglist: dict
1820
  @param vglist: dictionary of volume group names and their size
1821
  @type vgname: str
1822
  @param vgname: the volume group we should check
1823
  @type minsize: int
1824
  @param minsize: the minimum size we accept
1825
  @rtype: None or str
1826
  @return: None for success, otherwise the error message
1827

1828
  """
1829
  vgsize = vglist.get(vgname, None)
1830
  if vgsize is None:
1831
    return "volume group '%s' missing" % vgname
1832
  elif vgsize < minsize:
1833
    return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
1834
            (vgname, minsize, vgsize))
1835
  return None
1836

    
1837

    
1838
def SplitTime(value):
1839
  """Splits time as floating point number into a tuple.
1840

1841
  @param value: Time in seconds
1842
  @type value: int or float
1843
  @return: Tuple containing (seconds, microseconds)
1844

1845
  """
1846
  (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
1847

    
1848
  assert 0 <= seconds, \
1849
    "Seconds must be larger than or equal to 0, but are %s" % seconds
1850
  assert 0 <= microseconds <= 999999, \
1851
    "Microseconds must be 0-999999, but are %s" % microseconds
1852

    
1853
  return (int(seconds), int(microseconds))
1854

    
1855

    
1856
def MergeTime(timetuple):
1857
  """Merges a tuple into time as a floating point number.
1858

1859
  @param timetuple: Time as tuple, (seconds, microseconds)
1860
  @type timetuple: tuple
1861
  @return: Time as a floating point number expressed in seconds
1862

1863
  """
1864
  (seconds, microseconds) = timetuple
1865

    
1866
  assert 0 <= seconds, \
1867
    "Seconds must be larger than or equal to 0, but are %s" % seconds
1868
  assert 0 <= microseconds <= 999999, \
1869
    "Microseconds must be 0-999999, but are %s" % microseconds
1870

    
1871
  return float(seconds) + (float(microseconds) * 0.000001)
1872

    
1873

    
1874
def GetDaemonPort(daemon_name):
1875
  """Get the daemon port for this cluster.
1876

1877
  Note that this routine does not read a ganeti-specific file, but
1878
  instead uses C{socket.getservbyname} to allow pre-customization of
1879
  this parameter outside of Ganeti.
1880

1881
  @type daemon_name: string
1882
  @param daemon_name: daemon name (in constants.DAEMONS_PORTS)
1883
  @rtype: int
1884

1885
  """
1886
  if daemon_name not in constants.DAEMONS_PORTS:
1887
    raise errors.ProgrammerError("Unknown daemon: %s" % daemon_name)
1888

    
1889
  (proto, default_port) = constants.DAEMONS_PORTS[daemon_name]
1890
  try:
1891
    port = socket.getservbyname(daemon_name, proto)
1892
  except socket.error:
1893
    port = default_port
1894

    
1895
  return port
1896

    
1897

    
1898
def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
1899
                 multithreaded=False, syslog=constants.SYSLOG_USAGE):
1900
  """Configures the logging module.
1901

1902
  @type logfile: str
1903
  @param logfile: the filename to which we should log
1904
  @type debug: integer
1905
  @param debug: if greater than zero, enable debug messages, otherwise
1906
      only those at C{INFO} and above level
1907
  @type stderr_logging: boolean
1908
  @param stderr_logging: whether we should also log to the standard error
1909
  @type program: str
1910
  @param program: the name under which we should log messages
1911
  @type multithreaded: boolean
1912
  @param multithreaded: if True, will add the thread name to the log file
1913
  @type syslog: string
1914
  @param syslog: one of 'no', 'yes', 'only':
1915
      - if no, syslog is not used
1916
      - if yes, syslog is used (in addition to file-logging)
1917
      - if only, only syslog is used
1918
  @raise EnvironmentError: if we can't open the log file and
1919
      syslog/stderr logging is disabled
1920

1921
  """
1922
  fmt = "%(asctime)s: " + program + " pid=%(process)d"
1923
  sft = program + "[%(process)d]:"
1924
  if multithreaded:
1925
    fmt += "/%(threadName)s"
1926
    sft += " (%(threadName)s)"
1927
  if debug:
1928
    fmt += " %(module)s:%(lineno)s"
1929
    # no debug info for syslog loggers
1930
  fmt += " %(levelname)s %(message)s"
1931
  # yes, we do want the textual level, as remote syslog will probably
1932
  # lose the error level, and it's easier to grep for it
1933
  sft += " %(levelname)s %(message)s"
1934
  formatter = logging.Formatter(fmt)
1935
  sys_fmt = logging.Formatter(sft)
1936

    
1937
  root_logger = logging.getLogger("")
1938
  root_logger.setLevel(logging.NOTSET)
1939

    
1940
  # Remove all previously setup handlers
1941
  for handler in root_logger.handlers:
1942
    handler.close()
1943
    root_logger.removeHandler(handler)
1944

    
1945
  if stderr_logging:
1946
    stderr_handler = logging.StreamHandler()
1947
    stderr_handler.setFormatter(formatter)
1948
    if debug:
1949
      stderr_handler.setLevel(logging.NOTSET)
1950
    else:
1951
      stderr_handler.setLevel(logging.CRITICAL)
1952
    root_logger.addHandler(stderr_handler)
1953

    
1954
  if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
1955
    facility = logging.handlers.SysLogHandler.LOG_DAEMON
1956
    syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
1957
                                                    facility)
1958
    syslog_handler.setFormatter(sys_fmt)
1959
    # Never enable debug over syslog
1960
    syslog_handler.setLevel(logging.INFO)
1961
    root_logger.addHandler(syslog_handler)
1962

    
1963
  if syslog != constants.SYSLOG_ONLY:
1964
    # this can fail, if the logging directories are not setup or we have
1965
    # a permisssion problem; in this case, it's best to log but ignore
1966
    # the error if stderr_logging is True, and if false we re-raise the
1967
    # exception since otherwise we could run but without any logs at all
1968
    try:
1969
      logfile_handler = logging.FileHandler(logfile)
1970
      logfile_handler.setFormatter(formatter)
1971
      if debug:
1972
        logfile_handler.setLevel(logging.DEBUG)
1973
      else:
1974
        logfile_handler.setLevel(logging.INFO)
1975
      root_logger.addHandler(logfile_handler)
1976
    except EnvironmentError:
1977
      if stderr_logging or syslog == constants.SYSLOG_YES:
1978
        logging.exception("Failed to enable logging to file '%s'", logfile)
1979
      else:
1980
        # we need to re-raise the exception
1981
        raise
1982

    
1983

    
1984
def IsNormAbsPath(path):
1985
  """Check whether a path is absolute and also normalized
1986

1987
  This avoids things like /dir/../../other/path to be valid.
1988

1989
  """
1990
  return os.path.normpath(path) == path and os.path.isabs(path)
1991

    
1992

    
1993
def PathJoin(*args):
1994
  """Safe-join a list of path components.
1995

1996
  Requirements:
1997
      - the first argument must be an absolute path
1998
      - no component in the path must have backtracking (e.g. /../),
1999
        since we check for normalization at the end
2000

2001
  @param args: the path components to be joined
2002
  @raise ValueError: for invalid paths
2003

2004
  """
2005
  # ensure we're having at least one path passed in
2006
  assert args
2007
  # ensure the first component is an absolute and normalized path name
2008
  root = args[0]
2009
  if not IsNormAbsPath(root):
2010
    raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2011
  result = os.path.join(*args)
2012
  # ensure that the whole path is normalized
2013
  if not IsNormAbsPath(result):
2014
    raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2015
  # check that we're still under the original prefix
2016
  prefix = os.path.commonprefix([root, result])
2017
  if prefix != root:
2018
    raise ValueError("Error: path joining resulted in different prefix"
2019
                     " (%s != %s)" % (prefix, root))
2020
  return result
2021

    
2022

    
2023
def TailFile(fname, lines=20):
2024
  """Return the last lines from a file.
2025

2026
  @note: this function will only read and parse the last 4KB of
2027
      the file; if the lines are very long, it could be that less
2028
      than the requested number of lines are returned
2029

2030
  @param fname: the file name
2031
  @type lines: int
2032
  @param lines: the (maximum) number of lines to return
2033

2034
  """
2035
  fd = open(fname, "r")
2036
  try:
2037
    fd.seek(0, 2)
2038
    pos = fd.tell()
2039
    pos = max(0, pos-4096)
2040
    fd.seek(pos, 0)
2041
    raw_data = fd.read()
2042
  finally:
2043
    fd.close()
2044

    
2045
  rows = raw_data.splitlines()
2046
  return rows[-lines:]
2047

    
2048

    
2049
def _ParseAsn1Generalizedtime(value):
2050
  """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2051

2052
  @type value: string
2053
  @param value: ASN1 GENERALIZEDTIME timestamp
2054

2055
  """
2056
  m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
2057
  if m:
2058
    # We have an offset
2059
    asn1time = m.group(1)
2060
    hours = int(m.group(2))
2061
    minutes = int(m.group(3))
2062
    utcoffset = (60 * hours) + minutes
2063
  else:
2064
    if not value.endswith("Z"):
2065
      raise ValueError("Missing timezone")
2066
    asn1time = value[:-1]
2067
    utcoffset = 0
2068

    
2069
  parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2070

    
2071
  tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2072

    
2073
  return calendar.timegm(tt.utctimetuple())
2074

    
2075

    
2076
def GetX509CertValidity(cert):
2077
  """Returns the validity period of the certificate.
2078

2079
  @type cert: OpenSSL.crypto.X509
2080
  @param cert: X509 certificate object
2081

2082
  """
2083
  # The get_notBefore and get_notAfter functions are only supported in
2084
  # pyOpenSSL 0.7 and above.
2085
  try:
2086
    get_notbefore_fn = cert.get_notBefore
2087
  except AttributeError:
2088
    not_before = None
2089
  else:
2090
    not_before_asn1 = get_notbefore_fn()
2091

    
2092
    if not_before_asn1 is None:
2093
      not_before = None
2094
    else:
2095
      not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2096

    
2097
  try:
2098
    get_notafter_fn = cert.get_notAfter
2099
  except AttributeError:
2100
    not_after = None
2101
  else:
2102
    not_after_asn1 = get_notafter_fn()
2103

    
2104
    if not_after_asn1 is None:
2105
      not_after = None
2106
    else:
2107
      not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2108

    
2109
  return (not_before, not_after)
2110

    
2111

    
2112
def SafeEncode(text):
2113
  """Return a 'safe' version of a source string.
2114

2115
  This function mangles the input string and returns a version that
2116
  should be safe to display/encode as ASCII. To this end, we first
2117
  convert it to ASCII using the 'backslashreplace' encoding which
2118
  should get rid of any non-ASCII chars, and then we process it
2119
  through a loop copied from the string repr sources in the python; we
2120
  don't use string_escape anymore since that escape single quotes and
2121
  backslashes too, and that is too much; and that escaping is not
2122
  stable, i.e. string_escape(string_escape(x)) != string_escape(x).
2123

2124
  @type text: str or unicode
2125
  @param text: input data
2126
  @rtype: str
2127
  @return: a safe version of text
2128

2129
  """
2130
  if isinstance(text, unicode):
2131
    # only if unicode; if str already, we handle it below
2132
    text = text.encode('ascii', 'backslashreplace')
2133
  resu = ""
2134
  for char in text:
2135
    c = ord(char)
2136
    if char  == '\t':
2137
      resu += r'\t'
2138
    elif char == '\n':
2139
      resu += r'\n'
2140
    elif char == '\r':
2141
      resu += r'\'r'
2142
    elif c < 32 or c >= 127: # non-printable
2143
      resu += "\\x%02x" % (c & 0xff)
2144
    else:
2145
      resu += char
2146
  return resu
2147

    
2148

    
2149
def UnescapeAndSplit(text, sep=","):
2150
  """Split and unescape a string based on a given separator.
2151

2152
  This function splits a string based on a separator where the
2153
  separator itself can be escape in order to be an element of the
2154
  elements. The escaping rules are (assuming coma being the
2155
  separator):
2156
    - a plain , separates the elements
2157
    - a sequence \\\\, (double backslash plus comma) is handled as a
2158
      backslash plus a separator comma
2159
    - a sequence \, (backslash plus comma) is handled as a
2160
      non-separator comma
2161

2162
  @type text: string
2163
  @param text: the string to split
2164
  @type sep: string
2165
  @param text: the separator
2166
  @rtype: string
2167
  @return: a list of strings
2168

2169
  """
2170
  # we split the list by sep (with no escaping at this stage)
2171
  slist = text.split(sep)
2172
  # next, we revisit the elements and if any of them ended with an odd
2173
  # number of backslashes, then we join it with the next
2174
  rlist = []
2175
  while slist:
2176
    e1 = slist.pop(0)
2177
    if e1.endswith("\\"):
2178
      num_b = len(e1) - len(e1.rstrip("\\"))
2179
      if num_b % 2 == 1:
2180
        e2 = slist.pop(0)
2181
        # here the backslashes remain (all), and will be reduced in
2182
        # the next step
2183
        rlist.append(e1 + sep + e2)
2184
        continue
2185
    rlist.append(e1)
2186
  # finally, replace backslash-something with something
2187
  rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
2188
  return rlist
2189

    
2190

    
2191
def CommaJoin(names):
2192
  """Nicely join a set of identifiers.
2193

2194
  @param names: set, list or tuple
2195
  @return: a string with the formatted results
2196

2197
  """
2198
  return ", ".join([str(val) for val in names])
2199

    
2200

    
2201
def BytesToMebibyte(value):
2202
  """Converts bytes to mebibytes.
2203

2204
  @type value: int
2205
  @param value: Value in bytes
2206
  @rtype: int
2207
  @return: Value in mebibytes
2208

2209
  """
2210
  return int(round(value / (1024.0 * 1024.0), 0))
2211

    
2212

    
2213
def CalculateDirectorySize(path):
2214
  """Calculates the size of a directory recursively.
2215

2216
  @type path: string
2217
  @param path: Path to directory
2218
  @rtype: int
2219
  @return: Size in mebibytes
2220

2221
  """
2222
  size = 0
2223

    
2224
  for (curpath, _, files) in os.walk(path):
2225
    for filename in files:
2226
      st = os.lstat(PathJoin(curpath, filename))
2227
      size += st.st_size
2228

    
2229
  return BytesToMebibyte(size)
2230

    
2231

    
2232
def GetFilesystemStats(path):
2233
  """Returns the total and free space on a filesystem.
2234

2235
  @type path: string
2236
  @param path: Path on filesystem to be examined
2237
  @rtype: int
2238
  @return: tuple of (Total space, Free space) in mebibytes
2239

2240
  """
2241
  st = os.statvfs(path)
2242

    
2243
  fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
2244
  tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
2245
  return (tsize, fsize)
2246

    
2247

    
2248
def RunInSeparateProcess(fn, *args):
2249
  """Runs a function in a separate process.
2250

2251
  Note: Only boolean return values are supported.
2252

2253
  @type fn: callable
2254
  @param fn: Function to be called
2255
  @rtype: bool
2256
  @return: Function's result
2257

2258
  """
2259
  pid = os.fork()
2260
  if pid == 0:
2261
    # Child process
2262
    try:
2263
      # In case the function uses temporary files
2264
      ResetTempfileModule()
2265

    
2266
      # Call function
2267
      result = int(bool(fn(*args)))
2268
      assert result in (0, 1)
2269
    except: # pylint: disable-msg=W0702
2270
      logging.exception("Error while calling function in separate process")
2271
      # 0 and 1 are reserved for the return value
2272
      result = 33
2273

    
2274
    os._exit(result) # pylint: disable-msg=W0212
2275

    
2276
  # Parent process
2277

    
2278
  # Avoid zombies and check exit code
2279
  (_, status) = os.waitpid(pid, 0)
2280

    
2281
  if os.WIFSIGNALED(status):
2282
    exitcode = None
2283
    signum = os.WTERMSIG(status)
2284
  else:
2285
    exitcode = os.WEXITSTATUS(status)
2286
    signum = None
2287

    
2288
  if not (exitcode in (0, 1) and signum is None):
2289
    raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
2290
                              (exitcode, signum))
2291

    
2292
  return bool(exitcode)
2293

    
2294

    
2295
def LockedMethod(fn):
2296
  """Synchronized object access decorator.
2297

2298
  This decorator is intended to protect access to an object using the
2299
  object's own lock which is hardcoded to '_lock'.
2300

2301
  """
2302
  def _LockDebug(*args, **kwargs):
2303
    if debug_locks:
2304
      logging.debug(*args, **kwargs)
2305

    
2306
  def wrapper(self, *args, **kwargs):
2307
    # pylint: disable-msg=W0212
2308
    assert hasattr(self, '_lock')
2309
    lock = self._lock
2310
    _LockDebug("Waiting for %s", lock)
2311
    lock.acquire()
2312
    try:
2313
      _LockDebug("Acquired %s", lock)
2314
      result = fn(self, *args, **kwargs)
2315
    finally:
2316
      _LockDebug("Releasing %s", lock)
2317
      lock.release()
2318
      _LockDebug("Released %s", lock)
2319
    return result
2320
  return wrapper
2321

    
2322

    
2323
def LockFile(fd):
2324
  """Locks a file using POSIX locks.
2325

2326
  @type fd: int
2327
  @param fd: the file descriptor we need to lock
2328

2329
  """
2330
  try:
2331
    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
2332
  except IOError, err:
2333
    if err.errno == errno.EAGAIN:
2334
      raise errors.LockError("File already locked")
2335
    raise
2336

    
2337

    
2338
def FormatTime(val):
2339
  """Formats a time value.
2340

2341
  @type val: float or None
2342
  @param val: the timestamp as returned by time.time()
2343
  @return: a string value or N/A if we don't have a valid timestamp
2344

2345
  """
2346
  if val is None or not isinstance(val, (int, float)):
2347
    return "N/A"
2348
  # these two codes works on Linux, but they are not guaranteed on all
2349
  # platforms
2350
  return time.strftime("%F %T", time.localtime(val))
2351

    
2352

    
2353
def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
2354
  """Reads the watcher pause file.
2355

2356
  @type filename: string
2357
  @param filename: Path to watcher pause file
2358
  @type now: None, float or int
2359
  @param now: Current time as Unix timestamp
2360
  @type remove_after: int
2361
  @param remove_after: Remove watcher pause file after specified amount of
2362
    seconds past the pause end time
2363

2364
  """
2365
  if now is None:
2366
    now = time.time()
2367

    
2368
  try:
2369
    value = ReadFile(filename)
2370
  except IOError, err:
2371
    if err.errno != errno.ENOENT:
2372
      raise
2373
    value = None
2374

    
2375
  if value is not None:
2376
    try:
2377
      value = int(value)
2378
    except ValueError:
2379
      logging.warning(("Watcher pause file (%s) contains invalid value,"
2380
                       " removing it"), filename)
2381
      RemoveFile(filename)
2382
      value = None
2383

    
2384
    if value is not None:
2385
      # Remove file if it's outdated
2386
      if now > (value + remove_after):
2387
        RemoveFile(filename)
2388
        value = None
2389

    
2390
      elif now > value:
2391
        value = None
2392

    
2393
  return value
2394

    
2395

    
2396
class RetryTimeout(Exception):
2397
  """Retry loop timed out.
2398

2399
  """
2400

    
2401

    
2402
class RetryAgain(Exception):
2403
  """Retry again.
2404

2405
  """
2406

    
2407

    
2408
class _RetryDelayCalculator(object):
2409
  """Calculator for increasing delays.
2410

2411
  """
2412
  __slots__ = [
2413
    "_factor",
2414
    "_limit",
2415
    "_next",
2416
    "_start",
2417
    ]
2418

    
2419
  def __init__(self, start, factor, limit):
2420
    """Initializes this class.
2421

2422
    @type start: float
2423
    @param start: Initial delay
2424
    @type factor: float
2425
    @param factor: Factor for delay increase
2426
    @type limit: float or None
2427
    @param limit: Upper limit for delay or None for no limit
2428

2429
    """
2430
    assert start > 0.0
2431
    assert factor >= 1.0
2432
    assert limit is None or limit >= 0.0
2433

    
2434
    self._start = start
2435
    self._factor = factor
2436
    self._limit = limit
2437

    
2438
    self._next = start
2439

    
2440
  def __call__(self):
2441
    """Returns current delay and calculates the next one.
2442

2443
    """
2444
    current = self._next
2445

    
2446
    # Update for next run
2447
    if self._limit is None or self._next < self._limit:
2448
      self._next = min(self._limit, self._next * self._factor)
2449

    
2450
    return current
2451

    
2452

    
2453
#: Special delay to specify whole remaining timeout
2454
RETRY_REMAINING_TIME = object()
2455

    
2456

    
2457
def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
2458
          _time_fn=time.time):
2459
  """Call a function repeatedly until it succeeds.
2460

2461
  The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
2462
  anymore. Between calls a delay, specified by C{delay}, is inserted. After a
2463
  total of C{timeout} seconds, this function throws L{RetryTimeout}.
2464

2465
  C{delay} can be one of the following:
2466
    - callable returning the delay length as a float
2467
    - Tuple of (start, factor, limit)
2468
    - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
2469
      useful when overriding L{wait_fn} to wait for an external event)
2470
    - A static delay as a number (int or float)
2471

2472
  @type fn: callable
2473
  @param fn: Function to be called
2474
  @param delay: Either a callable (returning the delay), a tuple of (start,
2475
                factor, limit) (see L{_RetryDelayCalculator}),
2476
                L{RETRY_REMAINING_TIME} or a number (int or float)
2477
  @type timeout: float
2478
  @param timeout: Total timeout
2479
  @type wait_fn: callable
2480
  @param wait_fn: Waiting function
2481
  @return: Return value of function
2482

2483
  """
2484
  assert callable(fn)
2485
  assert callable(wait_fn)
2486
  assert callable(_time_fn)
2487

    
2488
  if args is None:
2489
    args = []
2490

    
2491
  end_time = _time_fn() + timeout
2492

    
2493
  if callable(delay):
2494
    # External function to calculate delay
2495
    calc_delay = delay
2496

    
2497
  elif isinstance(delay, (tuple, list)):
2498
    # Increasing delay with optional upper boundary
2499
    (start, factor, limit) = delay
2500
    calc_delay = _RetryDelayCalculator(start, factor, limit)
2501

    
2502
  elif delay is RETRY_REMAINING_TIME:
2503
    # Always use the remaining time
2504
    calc_delay = None
2505

    
2506
  else:
2507
    # Static delay
2508
    calc_delay = lambda: delay
2509

    
2510
  assert calc_delay is None or callable(calc_delay)
2511

    
2512
  while True:
2513
    try:
2514
      # pylint: disable-msg=W0142
2515
      return fn(*args)
2516
    except RetryAgain:
2517
      pass
2518

    
2519
    remaining_time = end_time - _time_fn()
2520

    
2521
    if remaining_time < 0.0:
2522
      raise RetryTimeout()
2523

    
2524
    assert remaining_time >= 0.0
2525

    
2526
    if calc_delay is None:
2527
      wait_fn(remaining_time)
2528
    else:
2529
      current_delay = calc_delay()
2530
      if current_delay > 0.0:
2531
        wait_fn(current_delay)
2532

    
2533

    
2534
class FileLock(object):
2535
  """Utility class for file locks.
2536

2537
  """
2538
  def __init__(self, fd, filename):
2539
    """Constructor for FileLock.
2540

2541
    @type fd: file
2542
    @param fd: File object
2543
    @type filename: str
2544
    @param filename: Path of the file opened at I{fd}
2545

2546
    """
2547
    self.fd = fd
2548
    self.filename = filename
2549

    
2550
  @classmethod
2551
  def Open(cls, filename):
2552
    """Creates and opens a file to be used as a file-based lock.
2553

2554
    @type filename: string
2555
    @param filename: path to the file to be locked
2556

2557
    """
2558
    # Using "os.open" is necessary to allow both opening existing file
2559
    # read/write and creating if not existing. Vanilla "open" will truncate an
2560
    # existing file -or- allow creating if not existing.
2561
    return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
2562
               filename)
2563

    
2564
  def __del__(self):
2565
    self.Close()
2566

    
2567
  def Close(self):
2568
    """Close the file and release the lock.
2569

2570
    """
2571
    if hasattr(self, "fd") and self.fd:
2572
      self.fd.close()
2573
      self.fd = None
2574

    
2575
  def _flock(self, flag, blocking, timeout, errmsg):
2576
    """Wrapper for fcntl.flock.
2577

2578
    @type flag: int
2579
    @param flag: operation flag
2580
    @type blocking: bool
2581
    @param blocking: whether the operation should be done in blocking mode.
2582
    @type timeout: None or float
2583
    @param timeout: for how long the operation should be retried (implies
2584
                    non-blocking mode).
2585
    @type errmsg: string
2586
    @param errmsg: error message in case operation fails.
2587

2588
    """
2589
    assert self.fd, "Lock was closed"
2590
    assert timeout is None or timeout >= 0, \
2591
      "If specified, timeout must be positive"
2592
    assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
2593

    
2594
    # When a timeout is used, LOCK_NB must always be set
2595
    if not (timeout is None and blocking):
2596
      flag |= fcntl.LOCK_NB
2597

    
2598
    if timeout is None:
2599
      self._Lock(self.fd, flag, timeout)
2600
    else:
2601
      try:
2602
        Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
2603
              args=(self.fd, flag, timeout))
2604
      except RetryTimeout:
2605
        raise errors.LockError(errmsg)
2606

    
2607
  @staticmethod
2608
  def _Lock(fd, flag, timeout):
2609
    try:
2610
      fcntl.flock(fd, flag)
2611
    except IOError, err:
2612
      if timeout is not None and err.errno == errno.EAGAIN:
2613
        raise RetryAgain()
2614

    
2615
      logging.exception("fcntl.flock failed")
2616
      raise
2617

    
2618
  def Exclusive(self, blocking=False, timeout=None):
2619
    """Locks the file in exclusive mode.
2620

2621
    @type blocking: boolean
2622
    @param blocking: whether to block and wait until we
2623
        can lock the file or return immediately
2624
    @type timeout: int or None
2625
    @param timeout: if not None, the duration to wait for the lock
2626
        (in blocking mode)
2627

2628
    """
2629
    self._flock(fcntl.LOCK_EX, blocking, timeout,
2630
                "Failed to lock %s in exclusive mode" % self.filename)
2631

    
2632
  def Shared(self, blocking=False, timeout=None):
2633
    """Locks the file in shared mode.
2634

2635
    @type blocking: boolean
2636
    @param blocking: whether to block and wait until we
2637
        can lock the file or return immediately
2638
    @type timeout: int or None
2639
    @param timeout: if not None, the duration to wait for the lock
2640
        (in blocking mode)
2641

2642
    """
2643
    self._flock(fcntl.LOCK_SH, blocking, timeout,
2644
                "Failed to lock %s in shared mode" % self.filename)
2645

    
2646
  def Unlock(self, blocking=True, timeout=None):
2647
    """Unlocks the file.
2648

2649
    According to C{flock(2)}, unlocking can also be a nonblocking
2650
    operation::
2651

2652
      To make a non-blocking request, include LOCK_NB with any of the above
2653
      operations.
2654

2655
    @type blocking: boolean
2656
    @param blocking: whether to block and wait until we
2657
        can lock the file or return immediately
2658
    @type timeout: int or None
2659
    @param timeout: if not None, the duration to wait for the lock
2660
        (in blocking mode)
2661

2662
    """
2663
    self._flock(fcntl.LOCK_UN, blocking, timeout,
2664
                "Failed to unlock %s" % self.filename)
2665

    
2666

    
2667
def SignalHandled(signums):
2668
  """Signal Handled decoration.
2669

2670
  This special decorator installs a signal handler and then calls the target
2671
  function. The function must accept a 'signal_handlers' keyword argument,
2672
  which will contain a dict indexed by signal number, with SignalHandler
2673
  objects as values.
2674

2675
  The decorator can be safely stacked with iself, to handle multiple signals
2676
  with different handlers.
2677

2678
  @type signums: list
2679
  @param signums: signals to intercept
2680

2681
  """
2682
  def wrap(fn):
2683
    def sig_function(*args, **kwargs):
2684
      assert 'signal_handlers' not in kwargs or \
2685
             kwargs['signal_handlers'] is None or \
2686
             isinstance(kwargs['signal_handlers'], dict), \
2687
             "Wrong signal_handlers parameter in original function call"
2688
      if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
2689
        signal_handlers = kwargs['signal_handlers']
2690
      else:
2691
        signal_handlers = {}
2692
        kwargs['signal_handlers'] = signal_handlers
2693
      sighandler = SignalHandler(signums)
2694
      try:
2695
        for sig in signums:
2696
          signal_handlers[sig] = sighandler
2697
        return fn(*args, **kwargs)
2698
      finally:
2699
        sighandler.Reset()
2700
    return sig_function
2701
  return wrap
2702

    
2703

    
2704
class SignalHandler(object):
2705
  """Generic signal handler class.
2706

2707
  It automatically restores the original handler when deconstructed or
2708
  when L{Reset} is called. You can either pass your own handler
2709
  function in or query the L{called} attribute to detect whether the
2710
  signal was sent.
2711

2712
  @type signum: list
2713
  @ivar signum: the signals we handle
2714
  @type called: boolean
2715
  @ivar called: tracks whether any of the signals have been raised
2716

2717
  """
2718
  def __init__(self, signum):
2719
    """Constructs a new SignalHandler instance.
2720

2721
    @type signum: int or list of ints
2722
    @param signum: Single signal number or set of signal numbers
2723

2724
    """
2725
    self.signum = set(signum)
2726
    self.called = False
2727

    
2728
    self._previous = {}
2729
    try:
2730
      for signum in self.signum:
2731
        # Setup handler
2732
        prev_handler = signal.signal(signum, self._HandleSignal)
2733
        try:
2734
          self._previous[signum] = prev_handler
2735
        except:
2736
          # Restore previous handler
2737
          signal.signal(signum, prev_handler)
2738
          raise
2739
    except:
2740
      # Reset all handlers
2741
      self.Reset()
2742
      # Here we have a race condition: a handler may have already been called,
2743
      # but there's not much we can do about it at this point.
2744
      raise
2745

    
2746
  def __del__(self):
2747
    self.Reset()
2748

    
2749
  def Reset(self):
2750
    """Restore previous handler.
2751

2752
    This will reset all the signals to their previous handlers.
2753

2754
    """
2755
    for signum, prev_handler in self._previous.items():
2756
      signal.signal(signum, prev_handler)
2757
      # If successful, remove from dict
2758
      del self._previous[signum]
2759

    
2760
  def Clear(self):
2761
    """Unsets the L{called} flag.
2762

2763
    This function can be used in case a signal may arrive several times.
2764

2765
    """
2766
    self.called = False
2767

    
2768
  # we don't care about arguments, but we leave them named for the future
2769
  def _HandleSignal(self, signum, frame): # pylint: disable-msg=W0613
2770
    """Actual signal handling function.
2771

2772
    """
2773
    # This is not nice and not absolutely atomic, but it appears to be the only
2774
    # solution in Python -- there are no atomic types.
2775
    self.called = True
2776

    
2777

    
2778
class FieldSet(object):
2779
  """A simple field set.
2780

2781
  Among the features are:
2782
    - checking if a string is among a list of static string or regex objects
2783
    - checking if a whole list of string matches
2784
    - returning the matching groups from a regex match
2785

2786
  Internally, all fields are held as regular expression objects.
2787

2788
  """
2789
  def __init__(self, *items):
2790
    self.items = [re.compile("^%s$" % value) for value in items]
2791

    
2792
  def Extend(self, other_set):
2793
    """Extend the field set with the items from another one"""
2794
    self.items.extend(other_set.items)
2795

    
2796
  def Matches(self, field):
2797
    """Checks if a field matches the current set
2798

2799
    @type field: str
2800
    @param field: the string to match
2801
    @return: either None or a regular expression match object
2802

2803
    """
2804
    for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
2805
      return m
2806
    return None
2807

    
2808
  def NonMatching(self, items):
2809
    """Returns the list of fields not matching the current set
2810

2811
    @type items: list
2812
    @param items: the list of fields to check
2813
    @rtype: list
2814
    @return: list of non-matching fields
2815

2816
    """
2817
    return [val for val in items if not self.Matches(val)]