--- /dev/null
+Copyright 2012, 2013 GRNET S.A. All rights reserved.
+
+Redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following
+conditions are met:
+
+ 1. Redistributions of source code must retain the above
+ copyright notice, this list of conditions and the following
+ disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials
+ provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
+OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+The views and conclusions contained in the software and
+documentation are those of the authors and should not be
+interpreted as representing official policies, either expressed
+or implied, of GRNET S.A.
^^^^^^^^
snf-image-creator is a simple command-line tool for creating OS images. The
-original media from which the image is created, can be a block device or a
-regular file that represents a hard disk. Given a media file, snf-image-creator
-will create a snapshot for it and will run a number of system preparation
-operations on the snapshot, before the image is created.
+original media, the image is created from, can be a block device or a regular
+file that represents a hard disk.
Snapshotting
============
snf-image-creator works on snapshots of the original media. Any changes made by
the program do not affect the original media.
-Preparation
-===========
+Image Preparation
+=================
-Some of the system preparation operations are OS specific. snf-image-creator
-will use heuristics to detect the OS of the media and determine which
-operations should perform on it. The main purpose of running them is to:
+During the image creation, a number of system preparation operations are
+applied on the media snapshot. Some of those are OS specific. snf-image-creator
+will use heuristics to detect the OS and determine which operations to apply.
+Those operations will:
* Shrink the image
* Clear out sensitive user data (passwords, ssh keys, history files, etc.)
Creation
========
-The program can either dump the image file locally or directly upload it to
-pithos and register it with `okeanos <http://www.okeanos.grnet.gr>`_.
+The program can either dump the image file locally or use
+`./kamaki <https://code.grnet.gr/projects/kamaki>`_ to directly upload and
+register it on a `Synnefo <https://code.grnet.gr/projects/synnefo>`_
+deployment.
Image Format
============
-The images the program creates are in diskdump format. This is the recommended
-format for `snf-image <https://code.grnet.gr/projects/snf-image>`_, the Ganeti
-OS Definition used by `Synnefo <https://code.grnet.gr/projects/synnefo>`_.
+The extracted images are in diskdump format. This is the recommended format for
+`snf-image <https://code.grnet.gr/projects/snf-image>`_, the Ganeti OS
+Definition used by `Synnefo <https://code.grnet.gr/projects/synnefo>`_.
# interpreted as representing official policies, either expressed
# or implied, of GRNET S.A.
-__version__ = '0.1'
+__version__ = '0.1.1'
# vim: set sta sts=4 shiftwidth=4 sw=4 et ai :
--- /dev/null
+# Copyright 2012 GRNET S.A. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or
+# without modification, are permitted provided that the following
+# conditions are met:
+#
+# 1. Redistributions of source code must retain the above
+# copyright notice, this list of conditions and the following
+# disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and
+# documentation are those of the authors and should not be
+# interpreted as representing official policies, either expressed
+# or implied, of GRNET S.A.
+
+import os
+import re
+import tempfile
+from collections import namedtuple
+
+import parted
+
+from image_creator.rsync import Rsync
+from image_creator.util import get_command
+from image_creator.util import FatalError
+from image_creator.util import try_fail_repeat
+
+findfs = get_command('findfs')
+dd = get_command('dd')
+dmsetup = get_command('dmsetup')
+losetup = get_command('losetup')
+mount = get_command('mount')
+umount = get_command('umount')
+blkid = get_command('blkid')
+
+MKFS_OPTS = {'ext2': ['-F'],
+ 'ext3': ['-F'],
+ 'ext4': ['-F'],
+ 'reiserfs': ['-ff'],
+ 'btrfs': [],
+ 'minix': [],
+ 'xfs': ['-f'],
+ 'jfs': ['-f'],
+ 'ntfs': ['-F'],
+ 'msdos': [],
+ 'vfat': []}
+
+
+class BundleVolume(object):
+ """This class can be used to create an image out of the running system"""
+
+ def __init__(self, out, meta):
+ """Create an instance of the BundleVolume class."""
+ self.out = out
+ self.meta = meta
+
+ self.out.output('Searching for root device ...', False)
+ root = self._get_root_partition()
+
+ if root.startswith("UUID=") or root.startswith("LABEL="):
+ root = findfs(root).stdout.strip()
+
+ if not re.match('/dev/[hsv]d[a-z][1-9]*$', root):
+ raise FatalError("Don't know how to handle root device: %s" % root)
+
+ out.success(root)
+
+ disk_file = re.split('[0-9]', root)[0]
+ device = parted.Device(disk_file)
+ self.disk = parted.Disk(device)
+
+ def _read_fstable(self, f):
+
+ if not os.path.isfile(f):
+ raise FatalError("Unable to open: `%s'. File is missing." % f)
+
+ FileSystemTableEntry = namedtuple('FileSystemTableEntry',
+ 'dev mpoint fs opts freq passno')
+ with open(f) as table:
+ for line in iter(table):
+ entry = line.split('#')[0].strip().split()
+ if len(entry) != 6:
+ continue
+ yield FileSystemTableEntry(*entry)
+
+ def _get_root_partition(self):
+ for entry in self._read_fstable('/etc/fstab'):
+ if entry.mpoint == '/':
+ return entry.dev
+
+ raise FatalError("Unable to find root device in /etc/fstab")
+
+ def _is_mpoint(self, path):
+ for entry in self._read_fstable('/proc/mounts'):
+ if entry.mpoint == path:
+ return True
+ return False
+
+ def _get_mount_options(self, device):
+ for entry in self._read_fstable('/proc/mounts'):
+ if not entry.dev.startswith('/'):
+ continue
+
+ if os.path.realpath(entry.dev) == os.path.realpath(device):
+ return entry
+
+ return None
+
+ def _create_partition_table(self, image):
+
+ if self.disk.type != 'msdos':
+ raise FatalError('Only msdos partition tables are supported')
+
+ # Copy the MBR and the space between the MBR and the first partition.
+ # In Grub version 1 Stage 1.5 is located there.
+ first_sector = self.disk.getPrimaryPartitions()[0].geometry.start
+
+ dd('if=%s' % self.disk.device.path, 'of=%s' % image,
+ 'bs=%d' % self.disk.device.sectorSize,
+ 'count=%d' % first_sector, 'conv=notrunc')
+
+ # Create the Extended boot records (EBRs) in the image
+ extended = self.disk.getExtendedPartition()
+ if not extended:
+ return
+
+ # Extended boot records precede the logical partitions they describe
+ logical = self.disk.getLogicalPartitions()
+ start = extended.geometry.start
+ for i in range(len(logical)):
+ end = logical[i].geometry.start - 1
+ dd('if=%s' % self.disk.device.path, 'of=%s' % image,
+ 'count=%d' % (end - start + 1), 'conv=notrunc',
+ 'seek=%d' % start, 'skip=%d' % start)
+ start = logical[i].geometry.end + 1
+
+ def _get_partitions(self, disk):
+ Partition = namedtuple('Partition', 'num start end type fs')
+
+ partitions = []
+ for p in disk.partitions:
+ num = p.number
+ start = p.geometry.start
+ end = p.geometry.end
+ ptype = p.type
+ fs = p.fileSystem.type if p.fileSystem is not None else ''
+ partitions.append(Partition(num, start, end, ptype, fs))
+
+ return partitions
+
+ def _shrink_partitions(self, image):
+
+ new_end = self.disk.device.getLength()
+
+ image_dev = parted.Device(image)
+ image_disk = parted.Disk(image_dev)
+
+ is_extended = lambda p: p.type == parted.PARTITION_EXTENDED
+ is_logical = lambda p: p.type == parted.PARTITION_LOGICAL
+
+ partitions = self._get_partitions(self.disk)
+
+ last = partitions[-1]
+ if last.fs == 'linux-swap(v1)':
+ MB = 2 ** 20
+ size = (last.end - last.start + 1) * self.disk.device.sectorSize
+ self.meta['SWAP'] = "%d:%s" % (last.num, (size + MB - 1) // MB)
+
+ image_disk.deletePartition(
+ image_disk.getPartitionBySector(last.start))
+ image_disk.commit()
+
+ if is_logical(last) and last.num == 5:
+ extended = image_disk.getExtendedPartition()
+ image_disk.deletePartition(extended)
+ image_disk.commit()
+ partitions.remove(filter(is_extended, partitions)[0])
+
+ partitions.remove(last)
+ last = partitions[-1]
+
+ # Leave 2048 blocks at the end
+ new_end = last.end + 2048
+
+ mount_options = self._get_mount_options(
+ self.disk.getPartitionBySector(last.start).path)
+ if mount_options is not None:
+ stat = os.statvfs(mount_options.mpoint)
+ # Shrink the last partition. The new size should be the size of the
+ # occupied blocks
+ blcks = stat.f_blocks - stat.f_bavail
+ new_size = (blcks * stat.f_frsize) // self.disk.device.sectorSize
+
+ # Add 10% just to be on the safe side
+ part_end = last.start + (new_size * 11) // 10
+ # Align to 2048
+ part_end = ((part_end + 2047) // 2048) * 2048
+
+ image_disk.setPartitionGeometry(
+ image_disk.getPartitionBySector(last.start),
+ parted.Constraint(device=image_disk.device),
+ start=last.start, end=part_end)
+ image_disk.commit()
+
+ # Parted may have changed this for better alignment
+ part_end = image_disk.getPartitionBySector(last.start).geometry.end
+ last = last._replace(end=part_end)
+ partitions[-1] = last
+
+ # Leave 2048 blocks at the end.
+ new_end = part_end + 2048
+
+ if last.type == parted.PARTITION_LOGICAL:
+ # Fix the extended partition
+ extended = disk.getExtendedPartition()
+
+ image_disk.setPartitionGeometry(
+ extended, parted.Constraint(device=img_dev),
+ ext.geometry.start, end=last.end)
+ image_disk.commit()
+
+ image_dev.destroy()
+ return new_end
+
+ def _map_partition(self, dev, num, start, end):
+ name = os.path.basename(dev)
+ tablefd, table = tempfile.mkstemp()
+ try:
+ size = end - start + 1
+ os.write(tablefd, "0 %d linear %s %d" % (size, dev, start))
+ dmsetup('create', "%sp%d" % (name, num), table)
+ finally:
+ os.unlink(table)
+
+ return "/dev/mapper/%sp%d" % (name, num)
+
+ def _unmap_partition(self, dev):
+ if not os.path.exists(dev):
+ return
+
+ try_fail_repeat(dmsetup, 'remove', dev.split('/dev/mapper/')[1])
+
+ def _mount(self, target, devs):
+
+ devs.sort(key=lambda d: d[1])
+ for dev, mpoint in devs:
+ absmpoint = os.path.abspath(target + mpoint)
+ if not os.path.exists(absmpoint):
+ os.makedirs(absmpoint)
+ mount(dev, absmpoint)
+
+ def _umount_all(self, target):
+ mpoints = []
+ for entry in self._read_fstable('/proc/mounts'):
+ if entry.mpoint.startswith(os.path.abspath(target)):
+ mpoints.append(entry.mpoint)
+
+ mpoints.sort()
+ for mpoint in reversed(mpoints):
+ try_fail_repeat(umount, mpoint)
+
+ def _to_exclude(self):
+ excluded = ['/tmp', '/var/tmp']
+ local_filesystems = MKFS_OPTS.keys() + ['rootfs']
+ for entry in self._read_fstable('/proc/mounts'):
+ if entry.fs in local_filesystems:
+ continue
+
+ mpoint = entry.mpoint
+ if mpoint in excluded:
+ continue
+
+ descendants = filter(
+ lambda p: p.startswith(mpoint + '/'), excluded)
+ if len(descendants):
+ for d in descendants:
+ excluded.remove(d)
+ excluded.append(mpoint)
+ continue
+
+ dirname = mpoint
+ basename = ''
+ found_ancestor = False
+ while dirname != '/':
+ (dirname, basename) = os.path.split(dirname)
+ if dirname in excluded:
+ found_ancestor = True
+ break
+
+ if not found_ancestor:
+ excluded.append(mpoint)
+
+ return map(lambda d: d + "/*", excluded)
+
+ def _replace_uuids(self, target, new_uuid):
+
+ files = ['/etc/fstab',
+ '/boot/grub/grub.cfg',
+ '/boot/grub/menu.lst',
+ '/boot/grub/grub.conf']
+
+ orig = dict(map(
+ lambda p: (
+ p.number,
+ blkid('-s', 'UUID', '-o', 'value', p.path).stdout.strip()),
+ self.disk.partitions))
+
+ for f in map(lambda f: target + f, files):
+
+ if not os.path.exists(f):
+ continue
+
+ with open(f, 'r') as src:
+ lines = src.readlines()
+ with open(f, 'w') as dest:
+ for line in lines:
+ for i, uuid in new_uuid.items():
+ line = re.sub(orig[i], uuid, line)
+ dest.write(line)
+
+ def _create_filesystems(self, image):
+
+ filesystem = {}
+ for p in self.disk.partitions:
+ filesystem[p.number] = self._get_mount_options(p.path)
+
+ partitions = self._get_partitions(parted.Disk(parted.Device(image)))
+ unmounted = filter(lambda p: filesystem[p.num] is None, partitions)
+ mounted = filter(lambda p: filesystem[p.num] is not None, partitions)
+
+ # For partitions that are not mounted right now, we can simply dd them
+ # into the image.
+ for p in unmounted:
+ dd('if=%s' % self.disk.device.path, 'of=%s' % image,
+ 'count=%d' % (p.end - p.start + 1), 'conv=notrunc',
+ 'seek=%d' % p.start, 'skip=%d' % p.start)
+
+ loop = str(losetup('-f', '--show', image)).strip()
+ mapped = {}
+ try:
+ for p in mounted:
+ i = p.num
+ mapped[i] = self._map_partition(loop, i, p.start, p.end)
+
+ new_uuid = {}
+ # Create the file systems
+ for i, dev in mapped.iteritems():
+ fs = filesystem[i].fs
+ self.out.output('Creating %s filesystem on partition %d ... ' %
+ (fs, i), False)
+ get_command('mkfs.%s' % fs)(*(MKFS_OPTS[fs] + [dev]))
+ self.out.success('done')
+ new_uuid[i] = blkid(
+ '-s', 'UUID', '-o', 'value', dev).stdout.strip()
+
+ target = tempfile.mkdtemp()
+ try:
+ absmpoints = self._mount(target,
+ [(mapped[i], filesystem[i].mpoint)
+ for i in mapped.keys()])
+ exclude = self._to_exclude() + [image]
+
+ rsync = Rsync(self.out)
+
+ # Excluded paths need to be relative to the source
+ for excl in map(lambda p: os.path.relpath(p, '/'), exclude):
+ rsync.exclude(excl)
+
+ rsync.archive().hard_links().xattrs().sparse().acls()
+ rsync.run('/', target, 'host', 'tmp image')
+
+ # We need to replace the old UUID referencies with the new
+ # ones in grub configuration files and /etc/fstab for file
+ # systems that have been recreated.
+ self._replace_uuids(target, new_uuid)
+
+ finally:
+ self._umount_all(target)
+ os.rmdir(target)
+ finally:
+ for dev in mapped.values():
+ self._unmap_partition(dev)
+ losetup('-d', loop)
+
+ def create_image(self, image):
+ """Given an image filename, this method will create an image out of the
+ running system.
+ """
+
+ size = self.disk.device.getLength() * self.disk.device.sectorSize
+
+ # Create sparse file to host the image
+ fd = os.open(image, os.O_WRONLY | os.O_CREAT)
+ try:
+ os.ftruncate(fd, size)
+ finally:
+ os.close(fd)
+
+ self._create_partition_table(image)
+
+ end_sector = self._shrink_partitions(image)
+
+ size = (end_sector + 1) * self.disk.device.sectorSize
+
+ # Truncate image to the new size.
+ fd = os.open(image, os.O_RDWR)
+ try:
+ os.ftruncate(fd, size)
+ finally:
+ os.close(fd)
+
+ # Check if the available space is enough to host the image
+ dirname = os.path.dirname(image)
+ self.out.output("Examining available space in %s ..." % dirname, False)
+ stat = os.statvfs(dirname)
+ available = stat.f_bavail * stat.f_frsize
+ if available <= size:
+ raise FatalError('Not enough space in %s to host the image' %
+ dirname)
+ self.out.success("sufficient")
+
+ self._create_filesystems(image)
+
+ return image
+
+# vim: set sta sts=4 shiftwidth=4 sw=4 et ai :
import dialog
import sys
import os
+import stat
import textwrap
import signal
import optparse
def select_file(d, media):
- root = os.sep
+ default = os.getcwd() + os.sep
while 1:
if media is not None:
if not os.path.exists(media):
d.msgbox("The file `%s' you choose does not exist." % media,
width=SMALL_WIDTH)
else:
- break
+ mode = os.stat(media).st_mode
+ if not stat.S_ISDIR(mode):
+ break
+ default = media
- (code, media) = d.fselect(root, 10, 50,
- title="Please select input media")
+ (code, media) = d.fselect(default, 10, 60, extra_button=1,
+ title="Please select an input media.",
+ extra_label="Bundle Host")
if code in (d.DIALOG_CANCEL, d.DIALOG_ESC):
if confirm_exit(d, "You canceled the media selection dialog box."):
sys.exit(0)
else:
media = None
continue
+ elif code == d.DIALOG_EXTRA:
+ return '/'
return media
"(1/4) Calculating block hashes",
"(2/4) Uploading missing blocks")
- out.output("(3/4) Uploading metadata file...", False)
+ out.output("(3/4) Uploading metadata file ...", False)
kamaki.upload(StringIO.StringIO(metastring), size=len(metastring),
remote_path="%s.%s" % (name, 'meta'))
out.success('done')
- out.output("(4/4) Uploading md5sum file...", False)
+ out.output("(4/4) Uploading md5sum file ...", False)
md5sumstr = '%s %s\n' % (session['checksum'], name)
kamaki.upload(StringIO.StringIO(md5sumstr), size=len(md5sumstr),
remote_path="%s.%s" % (name, 'md5sum'))
out.success('done')
out.output()
- out.output('Registering image with ~okeanos...', False)
+ out.output('Registering image with ~okeanos ...', False)
kamaki.register(wizard['ImageName'], pithos_file, metadata)
out.success('done')
out.output()
from image_creator.util import get_command
from image_creator.util import FatalError
+from image_creator.util import try_fail_repeat
from image_creator.gpt import GPTPartitionTable
+from image_creator.bundle_volume import BundleVolume
+
import stat
import os
import tempfile
import re
import sys
import guestfs
-import time
from sendfile import sendfile
def __init__(self, source, output):
"""Create a new Disk instance out of a source media. The source
- media can be an image file, a block device or a directory."""
+ media can be an image file, a block device or a directory.
+ """
self._cleanup_jobs = []
self._devices = []
self.source = source
self.out = output
+ self.meta = {}
def _add_cleanup(self, job, *args):
self._cleanup_jobs.append((job, args))
def _losetup(self, fname):
loop = losetup('-f', '--show', fname)
loop = loop.strip() # remove the new-line char
- self._add_cleanup(losetup, '-d', loop)
+ self._add_cleanup(try_fail_repeat, losetup, '-d', loop)
return loop
def _dir_to_disk(self):
- raise FatalError("Using a directory as media source is not supported "
- "yet!")
+ if self.source == '/':
+ bundle = BundleVolume(self.out, self.meta)
+ image = '/var/tmp/%s.diskdump' % uuid.uuid4().hex
+
+ def check_unlink(path):
+ if os.path.exists(path):
+ os.unlink(path)
+
+ self._add_cleanup(check_unlink, image)
+ bundle.create_image(image)
+ return self._losetup(image)
+ raise FatalError("Using a directory as media source is supported")
def cleanup(self):
"""Cleanup internal data. This needs to be called before the
instance.
"""
- self.out.output("Examining source media `%s'..." % self.source, False)
+ self.out.output("Examining source media `%s' ..." % self.source, False)
sourcedev = self.source
mode = os.stat(self.source).st_mode
if stat.S_ISDIR(mode):
self.out.success('looks like a directory')
- return self._losetup(self._dir_to_disk())
+ return self._dir_to_disk()
elif stat.S_ISREG(mode):
self.out.success('looks like an image file')
sourcedev = self._losetup(self.source)
os.write(tablefd, "0 %d snapshot %s %s n 8" %
(int(size), sourcedev, cowdev))
dmsetup('create', snapshot, table)
- self._add_cleanup(dmsetup, 'remove', snapshot)
- # Sometimes dmsetup remove fails with Device or resource busy,
- # although everything is cleaned up and the snapshot is not
- # used by anyone. Add a 2 seconds delay to be on the safe side.
- self._add_cleanup(time.sleep, 2)
+ self._add_cleanup(try_fail_repeat, dmsetup, 'remove', snapshot)
finally:
os.unlink(table)
as created by the device-mapper.
"""
- def __init__(self, device, output, bootable=True):
+ def __init__(self, device, output, bootable=True, meta={}):
"""Create a new DiskDevice."""
self.real_device = device
self.out = output
self.bootable = bootable
+ self.meta = meta
self.progress_bar = None
self.guestfs_device = None
self.size = 0
- self.meta = {}
self.g = guestfs.GuestFS()
self.g.add_drive_opts(self.real_device, readonly=0)
def enable(self):
"""Enable a newly created DiskDevice"""
- self.progressbar = self.out.Progress(100, "Launching helper VM",
- "percent")
- eh = self.g.set_event_callback(self.progress_callback,
- guestfs.EVENT_PROGRESS)
+
+ self.out.output('Launching helper VM (may take a while) ...', False)
+ # self.progressbar = self.out.Progress(100, "Launching helper VM",
+ # "percent")
+ # eh = self.g.set_event_callback(self.progress_callback,
+ # guestfs.EVENT_PROGRESS)
self.g.launch()
self.guestfs_enabled = True
- self.g.delete_event_callback(eh)
- self.progressbar.success('done')
- self.progressbar = None
+ # self.g.delete_event_callback(eh)
+ # self.progressbar.success('done')
+ # self.progressbar = None
+ self.out.success('done')
- self.out.output('Inspecting Operating System...', False)
+ self.out.output('Inspecting Operating System ...', False)
roots = self.g.inspect_os()
if len(roots) == 0:
raise FatalError("No operating system found")
# Close the guestfs handler if open
self.g.close()
- def progress_callback(self, ev, eh, buf, array):
- position = array[2]
- total = array[3]
-
- self.progressbar.goto((position * 100) // total)
+# def progress_callback(self, ev, eh, buf, array):
+# position = array[2]
+# total = array[3]
+#
+# self.progressbar.goto((position * 100) // total)
def mount(self, readonly=False):
"""Mount all disk partitions in a correct order."""
mount = self.g.mount_ro if readonly else self.g.mount
msg = " read-only" if readonly else ""
- self.out.output("Mounting the media%s..." % msg, False)
+ self.out.output("Mounting the media%s ..." % msg, False)
mps = self.g.inspect_get_mountpoints(self.root)
# Sort the keys to mount the fs in a correct order.
raise FatalError(msg)
is_extended = lambda p: \
- self.g.part_get_mbr_id(self.guestfs_device, p['part_num']) == 5
+ self.g.part_get_mbr_id(self.guestfs_device, p['part_num']) \
+ in (0x5, 0xf)
is_logical = lambda p: \
- self.meta['PARTITION_TABLE'] != 'msdos' and p['part_num'] > 4
+ self.meta['PARTITION_TABLE'] == 'msdos' and p['part_num'] > 4
partitions = self.g.part_list(self.guestfs_device)
last_partition = partitions[-1]
if is_logical(last_partition):
# The disk contains extended and logical partitions....
- extended = [p for p in partitions if is_extended(p)][0]
+ extended = filter(is_extended, partitions)[0]
last_primary = [p for p in partitions if p['part_num'] <= 4][-1]
# check if extended is the last primary partition
self.meta['PARTITION_TABLE'] == 'msdos' and p['part_num'] > 4
is_extended = lambda p: \
self.meta['PARTITION_TABLE'] == 'msdos' and \
- self.g.part_get_mbr_id(self.guestfs_device, p['part_num']) == 5
+ self.g.part_get_mbr_id(self.guestfs_device, p['part_num']) \
+ in (0x5, 0xf)
part_add = lambda ptype, start, stop: \
self.g.part_add(self.guestfs_device, ptype, start, stop)
MB = 2 ** 20
- self.out.output("Shrinking image (this may take a while)...", False)
+ self.out.output("Shrinking image (this may take a while) ...", False)
sector_size = self.g.blockdev_getss(self.guestfs_device)
self.g.resize2fs_M(part_dev)
out = self.g.tune2fs_l(part_dev)
- block_size = int(
- filter(lambda x: x[0] == 'Block size', out)[0][1])
- block_cnt = int(
- filter(lambda x: x[0] == 'Block count', out)[0][1])
+ block_size = int(filter(lambda x: x[0] == 'Block size', out)[0][1])
+ block_cnt = int(filter(lambda x: x[0] == 'Block count', out)[0][1])
start = last_part['part_start'] / sector_size
end = start + (block_size * block_cnt) / sector_size - 1
'num': partition['part_num'],
'start': partition['part_start'] / sector_size,
'end': partition['part_end'] / sector_size,
- 'id': part_get_(partition['part_num']),
+ 'id': part_get_id(partition['part_num']),
'bootable': part_get_bootable(partition['part_num'])
})
logical[-1]['end'] = end # new end after resize
# Recreate the extended partition
- extended = [p for p in partitions if self._is_extended(p)][0]
+ extended = filter(is_extended, partitions)[0]
part_del(extended['part_num'])
- part_add('e', extended['part_start'], end)
+ part_add('e', extended['part_start'] / sector_size, end)
# Create all the logical partitions back
for l in logical:
while left > 0:
length = min(left, blocksize)
_, sent = sendfile(dst.fileno(), src.fileno(), offset,
- length)
+ length)
offset += sent
left -= sent
progressbar.goto((size - left) // MB)
if options.outfile is not None:
dev.dump(options.outfile)
- out.output('Dumping metadata file...', False)
+ out.output('Dumping metadata file ...', False)
with open('%s.%s' % (options.outfile, 'meta'), 'w') as f:
f.write(metastring)
out.success('done')
- out.output('Dumping md5sum file...', False)
+ out.output('Dumping md5sum file ...', False)
with open('%s.%s' % (options.outfile, 'md5sum'), 'w') as f:
f.write('%s %s\n' % (checksum,
os.path.basename(options.outfile)))
size=len(metastring),
remote_path="%s.%s" % (options.upload, 'meta'))
out.success('done')
- out.output("(4/4) Uploading md5sum file...", False)
+ out.output("(4/4) Uploading md5sum file ...", False)
md5sumstr = '%s %s\n' % (checksum,
os.path.basename(options.upload))
kamaki.upload(StringIO.StringIO(md5sumstr),
out.output()
if options.register:
- out.output('Registering image with ~okeanos...', False)
+ out.output('Registering image with ~okeanos ...', False)
kamaki.register(options.register, uploaded_obj, metadata)
out.success('done')
out.output()
from image_creator.os_type.unix import Unix, sysprep
-class Hard(Unix):
+class Hurd(Unix):
pass
# vim: set sta sts=4 shiftwidth=4 sw=4 et ai :
sensitive_userdata = [
'.bash_history',
'.gnupg',
- '.ssh'
+ '.ssh',
+ '.kamakirc',
+ '.kamaki.history'
]
def __init__(self, rootdev, ghandler, output):
self.start()
def success(self, result):
- self.output.output("\r%s...\033[K" % self.title, False)
+ self.output.output("\r%s ...\033[K" % self.title, False)
self.output.success(result)
# vim: set sta sts=4 shiftwidth=4 sw=4 et ai :
def __init__(self, size, title, bar_type='default'):
self.output.size = size
self.bar_type = bar_type
- self.output.msg = "%s..." % title
+ self.output.msg = "%s ..." % title
self.goto(0)
def _postfix(self):
--- /dev/null
+# Copyright 2012 GRNET S.A. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or
+# without modification, are permitted provided that the following
+# conditions are met:
+#
+# 1. Redistributions of source code must retain the above
+# copyright notice, this list of conditions and the following
+# disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and
+# documentation are those of the authors and should not be
+# interpreted as representing official policies, either expressed
+# or implied, of GRNET S.A.
+
+import subprocess
+import time
+import signal
+
+from image_creator.util import FatalError
+
+
+class Rsync:
+ """Wrapper class for the rsync command"""
+
+ def __init__(self, output):
+ """Create an instance """
+ self._out = output
+ self._exclude = []
+ self._options = ['-v']
+
+ def archive(self):
+ """Enable the archive option"""
+ self._options.append('-a')
+ return self
+
+ def xattrs(self):
+ """Preserve extended attributes"""
+ self._options.append('-X')
+ return self
+
+ def hard_links(self):
+ """Preserve hard links"""
+ self._options.append('-H')
+ return self
+
+ def acls(self):
+ """Preserve ACLs"""
+ self._options.append('-A')
+ return self
+
+ def sparse(self):
+ """Handle sparse files efficiently"""
+ self._options.append('-S')
+ return self
+
+ def exclude(self, pattern):
+ """Add an exclude pattern"""
+ self._exclude.append(pattern)
+ return self
+
+ def reset(self):
+ """Reset all rsync options"""
+ self._exclude = []
+ self._options = ['-v']
+
+ def run(self, src, dest, slabel='source', dlabel='destination'):
+ """Run the actual command"""
+ cmd = []
+ cmd.append('rsync')
+ cmd.extend(self._options)
+ for i in self._exclude:
+ cmd.extend(['--exclude', i])
+
+ self._out.output("Calculating total number of %s files ..." % slabel,
+ False)
+
+ # If you don't specify a destination, rsync will list the source files.
+ dry_run = subprocess.Popen(cmd + [src], shell=False,
+ stdout=subprocess.PIPE, bufsize=0)
+ try:
+ total = 0
+ for line in iter(dry_run.stdout.readline, b''):
+ total += 1
+ finally:
+ dry_run.communicate()
+ if dry_run.returncode != 0:
+ raise FatalError("rsync failed")
+
+ self._out.success("%d" % total)
+
+ progress = self._out.Progress(total, "Copying %s files to %s" %
+ (slabel, dlabel))
+ run = subprocess.Popen(cmd + [src, dest], shell=False,
+ stdout=subprocess.PIPE, bufsize=0)
+ try:
+ t = time.time()
+ i = 0
+ for line in iter(run.stdout.readline, b''):
+ i += 1
+ current = time.time()
+ if current - t > 0.1:
+ t = current
+ progress.goto(i)
+
+ progress.success('done')
+
+ finally:
+ run.poll()
+ if run.returncode is None:
+ run.send_signal(signal.SIGHUP)
+ run.communicate()
+ if run.returncode != 0:
+ raise FatalError("rsync failed")
+
+
+# vim: set sta sts=4 shiftwidth=4 sw=4 et ai :
import sys
import sh
import hashlib
+import time
class FatalError(Exception):
return find_sbin_command(command, e)
+def try_fail_repeat(command, *args):
+
+ times = (0.1, 0.5, 1, 2)
+ i = iter(times)
+ while True:
+ try:
+ command(*args)
+ return
+ except sh.ErrorReturnCode:
+ try:
+ wait = i.next()
+ except StopIteration:
+ break
+ time.sleep(wait)
+
+ raise FatalError("Command: `%s %s' failed" % (command, " ".join(args)))
+
+
class MD5:
def __init__(self, output):
self.out = output
def compute(self, filename, size):
-
MB = 2 ** 20
BLOCKSIZE = 4 * MB # 4MB