MB = 2 ** 20
size = (dev.meta['SIZE'] + MB - 1) // MB
- shrinked = 'shrinked' in session and session['shrinked'] == True
+ shrinked = 'shrinked' in session and session['shrinked']
postfix = " (shrinked)" if shrinked else ''
title = "OS: %s, Distro: %s, Size: %dMB%s" % \
image_os = session['image_os']
# Is the image already shrinked?
- if 'shrinked' in session and session['shrinked'] == True:
+ if 'shrinked' in session and session['shrinked']:
msg = "It seems you have shrinked the image. Running system " \
"preparation tasks on a shrinked image is dangerous."
d = session['dialog']
dev = session['device']
- shrinked = 'shrinked' in session and session['shrinked'] == True
+ shrinked = 'shrinked' in session and session['shrinked']
if shrinked:
d.msgbox("You have already shrinked your image!")
snapshot = disk.snapshot()
dev = disk.get_device(snapshot)
-
metadata = {}
for (key, value) in dev.meta.items():
metadata[str(key)] = str(value)
sourcedev = self._losetup(self.source)
elif not stat.S_ISBLK(mode):
raise ValueError("Invalid media source. Only block devices, "
- "regular files and directories are supported.")
+ "regular files and directories are supported.")
else:
self.out.success('looks like a block device')
snapshot = uuid.uuid4().hex
tablefd, table = tempfile.mkstemp()
try:
- os.write(tablefd, "0 %d snapshot %s %s n 8" % \
- (int(size), sourcedev, cowdev))
+ os.write(tablefd, "0 %d snapshot %s %s n 8" %
+ (int(size), sourcedev, cowdev))
dmsetup('create', snapshot, table)
self._add_cleanup(dmsetup, 'remove', snapshot)
# Sometimes dmsetup remove fails with Device or resource busy,
# feature of libguestfs, it's better to disable it.
self.g.set_recovery_proc(0)
version = self.g.version()
- if version['major'] > 1 or (version['major'] == 1 and
- (version['minor'] >= 18 or \
- (version['minor'] == 17 and version['release'] >= 14))):
+ if version['major'] > 1 or \
+ (version['major'] == 1 and (version['minor'] >= 18 or
+ (version['minor'] == 17 and
+ version['release'] >= 14))):
self.g.set_recovery_proc(1)
self.out.output("Enabling recovery proc")
self.progressbar = self.out.Progress(100, "Launching helper VM",
"percent")
eh = self.g.set_event_callback(self.progress_callback,
- guestfs.EVENT_PROGRESS)
+ guestfs.EVENT_PROGRESS)
self.g.launch()
self.guestfs_enabled = True
self.g.delete_event_callback(eh)
raise FatalError("No operating system found")
if len(roots) > 1:
raise FatalError("Multiple operating systems found."
- "We only support images with one filesystem.")
+ "We only support images with one filesystem.")
self.root = roots[0]
self.guestfs_device = self.g.part_to_dev(self.root)
self.meta['SIZE'] = self.g.blockdev_getsize64(self.guestfs_device)
self.meta['PARTITION_TABLE'] = \
- self.g.part_get_parttype(self.guestfs_device)
+ self.g.part_get_parttype(self.guestfs_device)
self.ostype = self.g.inspect_get_type(self.root)
self.distro = self.g.inspect_get_distro(self.root)
def _last_partition(self):
if self.meta['PARTITION_TABLE'] not in 'msdos' 'gpt':
msg = "Unsupported partition table: %s. Only msdos and gpt " \
- "partition tables are supported" % self.meta['PARTITION_TABLE']
+ "partition tables are supported" % self.meta['PARTITION_TABLE']
raise FatalError(msg)
- is_extended = lambda p: self.g.part_get_mbr_id(
- self.guestfs_device, p['part_num']) == 5
- is_logical = lambda p: self.meta['PARTITION_TABLE'] != 'msdos' and \
- p['part_num'] > 4
+ is_extended = lambda p: \
+ self.g.part_get_mbr_id(self.guestfs_device, p['part_num']) == 5
+ is_logical = lambda p: \
+ self.meta['PARTITION_TABLE'] != 'msdos' and p['part_num'] > 4
partitions = self.g.part_list(self.guestfs_device)
last_partition = partitions[-1]
ATTENTION: make sure unmount is called before shrink
"""
- get_fstype = lambda p: self.g.vfs_type("%s%d" % \
- (self.guestfs_device, p['part_num']))
- is_logical = lambda p: self.meta['PARTITION_TABLE'] == 'msdos' and \
- p['part_num'] > 4
- is_extended = lambda p: self.meta['PARTITION_TABLE'] == 'msdos' and \
- self.g.part_get_mbr_id(self.guestfs_device, p['part_num']) == 5
+ get_fstype = lambda p: \
+ self.g.vfs_type("%s%d" % (self.guestfs_device, p['part_num']))
+ is_logical = lambda p: \
+ self.meta['PARTITION_TABLE'] == 'msdos' and p['part_num'] > 4
+ is_extended = lambda p: \
+ self.meta['PARTITION_TABLE'] == 'msdos' and \
+ self.g.part_get_mbr_id(self.guestfs_device, p['part_num']) == 5
part_add = lambda ptype, start, stop: \
- self.g.part_add(self.guestfs_device, ptype, start, stop)
+ self.g.part_add(self.guestfs_device, ptype, start, stop)
part_del = lambda p: self.g.part_del(self.guestfs_device, p)
part_get_id = lambda p: self.g.part_get_mbr_id(self.guestfs_device, p)
- part_set_id = lambda p, id: self.g.part_set_mbr_id(
- self.guestfs_device, p, id)
- part_get_bootable = lambda p: self.g.part_get_bootable(
- self.guestfs_device, p)
- part_set_bootable = lambda p, bootable: self.g.part_set_bootable(
- self.guestfs_device, p, bootable)
+ part_set_id = lambda p, id: \
+ self.g.part_set_mbr_id(self.guestfs_device, p, id)
+ part_get_bootable = lambda p: \
+ self.g.part_get_bootable(self.guestfs_device, p)
+ part_set_bootable = lambda p, bootable: \
+ self.g.part_set_bootable(self.guestfs_device, p, bootable)
MB = 2 ** 20
if fstype == 'swap':
self.meta['SWAP'] = "%d:%s" % \
- (last_part['part_num'],
- (last_part['part_size'] + MB - 1) // MB)
+ (last_part['part_num'],
+ (last_part['part_size'] + MB - 1) // MB)
part_del(last_part['part_num'])
continue
elif is_extended(last_part):
def pack(self):
return struct.pack(self.format,
- self.status,
- self.start,
- self.type,
- self.end,
- self.first_sector,
- self.sector_count
- )
+ self.status,
+ self.start,
+ self.type,
+ self.end,
+ self.first_sector,
+ self.sector_count)
@staticmethod
def size():
start = self.unpack_chs(self.start)
end = self.unpack_chs(self.end)
return "%d %s %d %s %d %d" % (self.status, start, self.type, end,
- self.first_sector, self.sector_count)
+ self.first_sector, self.sector_count)
def unpack_chs(self, chs):
"""Unpacks a CHS address string to a tuple."""
"""
def __init__(self, block):
raw_part = {}
- self.code_area, \
- raw_part[0], \
- raw_part[1], \
- raw_part[2], \
- raw_part[3], \
- self.signature = struct.unpack(self.format, block)
+ (self.code_area,
+ raw_part[0],
+ raw_part[1],
+ raw_part[2],
+ raw_part[3],
+ self.signature) = struct.unpack(self.format, block)
self.part = {}
for i in range(4):
def pack(self):
"""Packs an MBR to a binary string."""
return struct.pack(self.format,
- self.code_area,
- self.part[0].pack(),
- self.part[1].pack(),
- self.part[2].pack(),
- self.part[3].pack(),
- self.signature
- )
+ self.code_area,
+ self.part[0].pack(),
+ self.part[1].pack(),
+ self.part[2].pack(),
+ self.part[3].pack(),
+ self.signature)
def __str__(self):
ret = ""
for i in range(4):
ret += "Partition %d: %s\n" % (i, self.part[i])
- ret += "Signature: %s %s\n" % (
- hex(ord(self.signature[0])), hex(ord(self.signature[1])))
+ ret += "Signature: %s %s\n" % (hex(ord(self.signature[0])),
+ hex(ord(self.signature[1])))
return ret
"""
def __init__(self, block):
- self.signature, \
- self.revision, \
- self.hdr_size, \
- self.header_crc32, \
- self.current_lba, \
- self.backup_lba, \
- self.first_usable_lba, \
- self.last_usable_lba, \
- self.uuid, \
- self.part_entry_start, \
- self.part_count, \
- self.part_entry_size, \
- self.part_crc32 = struct.unpack(self.format, block)
+ (self.signature,
+ self.revision,
+ self.hdr_size,
+ self.header_crc32,
+ self.current_lba,
+ self.backup_lba,
+ self.first_usable_lba,
+ self.last_usable_lba,
+ self.uuid,
+ self.part_entry_start,
+ self.part_count,
+ self.part_entry_size,
+ self.part_crc32) = struct.unpack(self.format, block)
def pack(self):
"""Packs a GPT Header to a binary string."""
return struct.pack(self.format,
- self.signature, \
- self.revision, \
- self.hdr_size, \
- self.header_crc32, \
- self.current_lba, \
- self.backup_lba, \
- self.first_usable_lba, \
- self.last_usable_lba, \
- self.uuid, \
- self.part_entry_start, \
- self.part_count, \
- self.part_entry_size, \
- self.part_crc32
- )
+ self.signature,
+ self.revision,
+ self.hdr_size,
+ self.header_crc32,
+ self.current_lba,
+ self.backup_lba,
+ self.first_usable_lba,
+ self.last_usable_lba,
+ self.uuid,
+ self.part_entry_start,
+ self.part_count,
+ self.part_entry_size,
+ self.part_crc32)
@staticmethod
def size():
return struct.calcsize(GPTPartitionTable.GPTHeader.format)
def __str__(self):
- return \
- "Signature: %s\n" % self.signature + \
- "Revision: %r\n" % self.revision + \
- "Header Size: %d\n" % self.hdr_size + \
- "CRC32: %d\n" % self.header_crc32 + \
- "Current LBA: %d\n" % self.current_lba + \
- "Backup LBA: %d\n" % self.backup_lba + \
- "First Usable LBA: %d\n" % self.first_usable_lba + \
- "Last Usable LBA: %d\n" % self.last_usable_lba + \
- "Disk GUID: %s\n" % uuid.UUID(bytes=self.uuid) + \
- "Partition entries starting LBA: %d\n" % self.part_entry_start + \
- "Number of Partition entries: %d\n" % self.part_count + \
- "Size of a partition entry: %d\n" % self.part_entry_size + \
- "CRC32 of partition array: %s\n" % self.part_crc32
+ return "Signature: %s\n" % self.signature + \
+ "Revision: %r\n" % self.revision + \
+ "Header Size: %d\n" % self.hdr_size + \
+ "CRC32: %d\n" % self.header_crc32 + \
+ "Current LBA: %d\n" % self.current_lba + \
+ "Backup LBA: %d\n" % self.backup_lba + \
+ "First Usable LBA: %d\n" % self.first_usable_lba + \
+ "Last Usable LBA: %d\n" % self.last_usable_lba + \
+ "Disk GUID: %s\n" % uuid.UUID(bytes=self.uuid) + \
+ "Partition entries starting LBA: %d\n" % \
+ self.part_entry_start + \
+ "Number of Partition entries: %d\n" % self.part_count + \
+ "Size of a partition entry: %d\n" % self.part_entry_size + \
+ "CRC32 of partition array: %s\n" % self.part_crc32
def __init__(self, disk):
self.disk = disk
# Partition entries (LBA 2...34)
d.seek(self.primary.part_entry_start * BLOCKSIZE)
entries_size = self.primary.part_count * \
- self.primary.part_entry_size
+ self.primary.part_entry_size
self.part_entries = d.read(entries_size)
# Secondary GPT Header (LBA -1)
# new_size is at least: size + Partition Entries + Secondary GPT Header
new_size = aligned if aligned <= old_size else \
- size + len(self.part_entries) + BLOCKSIZE
+ size + len(self.part_entries) + BLOCKSIZE
assert new_size <= old_size, "The secodary GPT fits in the device"
self.primary.backup_lba = lba_count - 1 # LBA-1
self.primary.last_usable_lba = lba_count - 34 # LBA-34
self.primary.header_crc32 = \
- binascii.crc32(self.primary.pack()) & 0xffffffff
+ binascii.crc32(self.primary.pack()) & 0xffffffff
# Fix Secondary header
self.secondary.header_crc32 = 0
self.secondary.last_usable_lba = lba_count - 34 # LBA-34
self.secondary.part_entry_start = lba_count - 33 # LBA-33
self.secondary.header_crc32 = \
- binascii.crc32(self.secondary.pack()) & 0xffffffff
+ binascii.crc32(self.secondary.pack()) & 0xffffffff
# Copy the new partition table back to the device
with open(self.disk, "wb") as d:
import sys
import os
+
def get_help_file(name):
dirname = os.path.dirname(sys.modules[__name__].__file__)
return "%s%s%s.rst" % (dirname, os.sep, name)
pithos_url = config.get('storage', 'url')
self.container = CONTAINER
self.pithos_client = PithosClient(pithos_url, token, self.account,
- self.container)
+ self.container)
image_url = config.get('image', 'url')
self.image_client = ImageClient(image_url, token)
# Convert all metadata to strings
str_metadata = {}
for (key, value) in metadata.iteritems():
- str_metadata[str(key)]=str(value)
+ str_metadata[str(key)] = str(value)
params = {'is_public': 'true', 'disk_format': 'diskdump'}
self.image_client.register(name, location, params, str_metadata)
from image_creator.disk import Disk
from image_creator.util import get_command, FatalError, MD5
from image_creator.output.cli import SilentOutput, SimpleOutput, \
- OutputWthProgress
+ OutputWthProgress
from image_creator.os_type import os_cls
from image_creator.kamaki_wrapper import Kamaki, ClientError
import sys
else None
parser.add_option("-o", "--outfile", type="string", dest="outfile",
- default=None, action="callback", callback=check_writable_dir,
- help="dump image to FILE", metavar="FILE")
+ default=None, action="callback",
+ callback=check_writable_dir, help="dump image to FILE",
+ metavar="FILE")
parser.add_option("-f", "--force", dest="force", default=False,
- action="store_true", help="overwrite output files if they exist")
+ action="store_true",
+ help="overwrite output files if they exist")
parser.add_option("-s", "--silent", dest="silent", default=False,
- help="silent mode, only output errors", action="store_true")
+ help="silent mode, only output errors",
+ action="store_true")
parser.add_option("-u", "--upload", dest="upload", type="string",
- default=False, help="upload the image to pithos with name FILENAME",
- metavar="FILENAME")
+ default=False,
+ help="upload the image to pithos with name FILENAME",
+ metavar="FILENAME")
parser.add_option("-r", "--register", dest="register", type="string",
- default=False, help="register the image to ~okeanos as IMAGENAME",
- metavar="IMAGENAME")
+ default=False,
+ help="register the image to ~okeanos as IMAGENAME",
+ metavar="IMAGENAME")
parser.add_option("-a", "--account", dest="account", type="string",
- default=account,
- help="Use this ACCOUNT when uploading/registring images [Default: %s]"\
- % account)
+ default=account, help="Use this ACCOUNT when "
+ "uploading/registring images [Default: %s]" % account)
parser.add_option("-m", "--metadata", dest="metadata", default=[],
- help="Add custom KEY=VALUE metadata to the image", action="append",
- metavar="KEY=VALUE")
+ help="Add custom KEY=VALUE metadata to the image",
+ action="append", metavar="KEY=VALUE")
parser.add_option("-t", "--token", dest="token", type="string",
- default=token,
- help="Use this token when uploading/registring images [Default: %s]"\
- % token)
+ default=token, help="Use this token when "
+ "uploading/registring images [Default: %s]" % token)
parser.add_option("--print-sysprep", dest="print_sysprep", default=False,
- help="print the enabled and disabled system preparation operations "
- "for this input media", action="store_true")
+ help="print the enabled and disabled system preparation "
+ "operations for this input media", action="store_true")
parser.add_option("--enable-sysprep", dest="enabled_syspreps", default=[],
- help="run SYSPREP operation on the input media",
- action="append", metavar="SYSPREP")
+ help="run SYSPREP operation on the input media",
+ action="append", metavar="SYSPREP")
parser.add_option("--disable-sysprep", dest="disabled_syspreps",
- help="prevent SYSPREP operation from running on the input media",
- default=[], action="append", metavar="SYSPREP")
+ help="prevent SYSPREP operation from running on the "
+ "input media", default=[], action="append",
+ metavar="SYSPREP")
parser.add_option("--no-sysprep", dest="sysprep", default=True,
- help="don't perform system preperation", action="store_false")
+ help="don't perform system preperation",
+ action="store_false")
parser.add_option("--no-shrink", dest="shrink", default=True,
- help="don't shrink any partition", action="store_false")
+ help="don't shrink any partition", action="store_false")
options, args = parser.parse_args(input_args)
if not os.path.exists(options.source):
raise FatalError("Input media `%s' is not accessible" % options.source)
- if options.register and options.upload == False:
+ if options.register and not options.upload:
raise FatalError("You also need to set -u when -r option is set")
if options.upload and options.account is None:
raise FatalError("Image uploading cannot be performed. No ~okeanos "
- "account name is specified. Use -a to set an account name.")
+ "account name is specified. Use -a to set an account "
+ "name.")
if options.upload and options.token is None:
raise FatalError("Image uploading cannot be performed. No ~okeanos "
- "token is specified. User -t to set a token.")
+ "token is specified. User -t to set a token.")
meta = {}
for m in options.metadata:
try:
key, value = m.split('=', 1)
except ValueError:
- raise FatalError("Metadata option: `%s' is not in "\
- "KEY=VALUE format." % m)
+ raise FatalError("Metadata option: `%s' is not in "
+ "KEY=VALUE format." % m)
meta[key] = value
options.metadata = meta
def image_creator():
options = parse_options(sys.argv[1:])
- if options.outfile is None and not options.upload \
- and not options.print_sysprep:
- raise FatalError("At least one of `-o', `-u' or `--print-sysprep' " \
- "must be set")
+ if options.outfile is None and not options.upload and not \
+ options.print_sysprep:
+ raise FatalError("At least one of `-o', `-u' or `--print-sysprep' "
+ "must be set")
if options.silent:
out = SilentOutput()
else:
out = OutputWthProgress(True) if sys.stderr.isatty() else \
- SimpleOutput(False)
+ SimpleOutput(False)
title = 'snf-image-creator %s' % version
out.output(title)
out.output('=' * len(title))
if os.geteuid() != 0:
- raise FatalError("You must run %s as root" \
- % os.path.basename(sys.argv[0]))
+ raise FatalError("You must run %s as root"
+ % os.path.basename(sys.argv[0]))
if not options.force and options.outfile is not None:
for extension in ('', '.meta', '.md5sum'):
filename = "%s%s" % (options.outfile, extension)
if os.path.exists(filename):
raise FatalError("Output file %s exists "
- "(use --force to overwrite it)." % filename)
+ "(use --force to overwrite it)." % filename)
disk = Disk(options.source, out)
try:
checksum = md5.compute(snapshot, size)
metastring = '\n'.join(
- ['%s=%s' % (key, value) for (key, value) in metadata.items()])
+ ['%s=%s' % (key, value) for (key, value) in metadata.items()])
metastring += '\n'
if options.outfile is not None:
out.output('Dumping md5sum file...', False)
with open('%s.%s' % (options.outfile, 'md5sum'), 'w') as f:
- f.write('%s %s\n' % (checksum, \
- os.path.basename(options.outfile)))
+ f.write('%s %s\n' % (checksum,
+ os.path.basename(options.outfile)))
out.success('done')
# Destroy the device. We only need the snapshot from now on
kamaki = Kamaki(options.account, options.token, out)
with open(snapshot, 'rb') as f:
uploaded_obj = kamaki.upload(f, size, options.upload,
- "(1/4) Calculating block hashes",
- "(2/4) Uploading missing blocks")
+ "(1/4) Calculating block "
+ "hashes",
+ "(2/4) Uploading missing "
+ "blocks")
out.output("(3/4) Uploading metadata file...", False)
kamaki.upload(StringIO.StringIO(metastring),
remote_path="%s.%s" % (options.upload, 'meta'))
out.success('done')
out.output("(4/4) Uploading md5sum file...", False)
- md5sumstr = '%s %s\n' % (
- checksum, os.path.basename(options.upload))
+ md5sumstr = '%s %s\n' % (checksum,
+ os.path.basename(options.upload))
kamaki.upload(StringIO.StringIO(md5sumstr),
size=len(md5sumstr),
remote_path="%s.%s" % (options.upload, 'md5sum'))
module = None
classname = None
try:
- module = __import__("image_creator.os_type.%s"
- % distro, fromlist=['image_creator.os_type'])
+ module = __import__("image_creator.os_type.%s" % distro,
+ fromlist=['image_creator.os_type'])
classname = distro.capitalize()
except ImportError:
- module = __import__("image_creator.os_type.%s"
- % osfamily, fromlist=['image_creator.os_type'])
+ module = __import__("image_creator.os_type.%s" % osfamily,
+ fromlist=['image_creator.os_type'])
classname = osfamily.capitalize()
return getattr(module, classname)
def list_syspreps(self):
- objs = [getattr(self, name) for name in dir(self) \
- if not name.startswith('_')]
+ objs = [getattr(self, name) for name in dir(self)
+ if not name.startswith('_')]
return [x for x in objs if self._is_sysprep(x)]
self.out.output('Fixing acpid powerdown action')
powerbtn_action = '#!/bin/sh\n\nPATH=/sbin:/bin:/usr/bin\n' \
- 'shutdown -h now \"Power button pressed\"\n'
+ 'shutdown -h now "Power button pressed"\n'
events_dir = '/etc/acpi/events'
if not self.g.is_dir(events_dir):
if action:
if not self.g.is_file(action):
self.out.warn("Acpid action file: %s does not exist" %
- action)
+ action)
return
- self.g.copy_file_to_file(action, \
- "%s.orig.snf-image-creator-%d" % (action, time.time()))
+ self.g.copy_file_to_file(action,
+ "%s.orig.snf-image-creator-%d" %
+ (action, time.time()))
self.g.write(action, powerbtn_action)
return
else:
- self.out.warn(
- "Acpid event file %s does not contain and action")
+ self.out.warn("Acpid event file %s does not contain and "
+ "action")
return
elif event.strip() == ".*":
- self.out.warn(
- "Found action `.*'. Don't know how to handle this. " \
- "Please edit `%s' image file manually to make the " \
- "system immediatelly shutdown when an power button acpi " \
- "event occures." % action.strip().split()[0])
+ self.out.warn("Found action `.*'. Don't know how to handle "
+ "this. Please edit `%s' image file manually to "
+ "make the system immediatelly shutdown when an "
+ "power button acpi event occures." %
+ action.strip().split()[0])
return
@sysprep()
"""
if print_header:
- self.out.output(
- 'Replacing fstab & grub non-persistent device appearences')
+ self.out.output("Replacing fstab & grub non-persistent device "
+ "appearences")
# convert all devices in fstab to persistent
persistent_root = self._persistent_fstab()
# In slackware the metadata about installed packages are
# stored in /var/log/packages. Clearing all /var/log files
# will destroy the package management system.
- self.foreach_file('/var/log', self.g.truncate, ftype='r', \
- exclude='/var/log/packages')
+ self.foreach_file('/var/log', self.g.truncate, ftype='r',
+ exclude='/var/log/packages')
# vim: set sta sts=4 shiftwidth=4 sw=4 et ai :
for app in apps:
if app['app_name'] == 'kubuntu-desktop':
self.meta['OS'] = 'kubuntu'
- self.meta['DESCRIPTION'] = \
- self.meta['DESCRIPTION'].replace('Ubuntu', 'Kubuntu')
+ descr = self.meta['DESCRIPTION'].replace('Ubuntu', 'Kubuntu')
+ self.meta['DESCRIPTION'] = descr
break
# vim: set sta sts=4 shiftwidth=4 sw=4 et ai :
"""Remove all user accounts with id greater than 1000"""
if print_header:
- self.out.output(
- 'Removing all user accounts with id greater than 1000')
+ self.out.output("Removing all user accounts with id greater than "
+ "1000")
# Remove users from /etc/passwd
passwd = []
"""Remove all passwords and lock all user accounts"""
if print_header:
- self.out.output(
- 'Cleaning up passwords & locking all user accounts')
+ self.out.output("Cleaning up passwords & locking all user "
+ "accounts")
shadow = []
homedirs = ['/root'] + self.ls('/home/')
if print_header:
- self.out.output('Removing sensitive user data under %s' % " ".
- join(homedirs))
+ self.out.output("Removing sensitive user data under %s" %
+ " ".join(homedirs))
for homedir in homedirs:
for data in self.sensitive_userdata: