summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorjianzhong.fang <jz.fang@samsung.com>2016-05-17 09:19:07 +0800
committerSoonKyu Park <sk7.park@samsung.com>2016-06-16 12:58:10 +0900
commit69e2f2491838c9e51bf8ccaabfd07bb78d70d7fa (patch)
tree3533ea08ccf0839efb6f5e0473c2dea26dcc9c3d
parent43f037fd958996e76a900985eedb23530ba9579d (diff)
downloadmic-69e2f2491838c9e51bf8ccaabfd07bb78d70d7fa.tar.gz
mic-69e2f2491838c9e51bf8ccaabfd07bb78d70d7fa.tar.bz2
mic-69e2f2491838c9e51bf8ccaabfd07bb78d70d7fa.zip
Revert "Drop mic raw image format support"
Change-Id: I2752f5c804f215ba96b5cfbabfefb1c383bb5803
-rw-r--r--doc/man.rst11
-rw-r--r--doc/usage.rst15
-rwxr-xr-xmic/imager/raw.py602
-rwxr-xr-xmic/utils/BmapCreate.py354
-rwxr-xr-xmic/utils/Filemap.py520
-rwxr-xr-xplugins/imager/raw_plugin.py260
-rwxr-xr-xtools/mic13
7 files changed, 1772 insertions, 3 deletions
diff --git a/doc/man.rst b/doc/man.rst
index c58e2f6..aeba63d 100644
--- a/doc/man.rst
+++ b/doc/man.rst
@@ -29,6 +29,7 @@ Subcommands:
| auto auto detect image type from magic header
| fs create fs image, which is also chroot directory
| loop create loop image, including multi-partitions
+ | raw create raw image, containing multi-partitions
| qcow create qcow image
Options:
@@ -59,6 +60,12 @@ Options for loop image:
--compress-image=COMPRESS_IMAGE compress all loop images with 'gz' or 'bz2' or 'lzo'
--compress-disk-image=COMPRESS_DISK_IMAGE same with --compress-image
+Options for raw image:
+ --compress-image=COMPRESS_IMAGE compress all raw images with 'gz' or 'bz2'
+ --compress-disk-image=COMPRESS_DISK_IMAGE same with --compress-image
+ --generate-bmap=GENERATE_BMAP also generate the block map file
+ --fstab-entry=FSTAB_ENTRY Set fstab entry, 'name' means using device names, 'uuid' means using filesystem uuid
+
Examples:
| mic create loop tizen.ks
@@ -126,9 +133,9 @@ panic. This issue impact all openSUSE distributions: 12.1, 11.4, 11.3, etc
REPORTING BUGS
==============
-The source code is tracked in github.com:
+The source code is tracked in review.tizen.org:
- https://github.com/01org/mic
+ https://review.tizen.org/git/tools/mic
The bug is registered in tizen.org:
diff --git a/doc/usage.rst b/doc/usage.rst
index aed950b..1dda0a0 100644
--- a/doc/usage.rst
+++ b/doc/usage.rst
@@ -35,6 +35,12 @@ Image formulation support
* For a configuration with multiple partitions, which is specified in the kickstartfile, mic will generate multiple loop images
* And multiple loop images can be packed into a single archive file
+- Raw
+
+ * “raw” format means something like hard disk dumping
+ * Including partition table and all the partitions
+ * The image is bootable directly
+
- fs
* “fs” means file-system
@@ -59,6 +65,7 @@ Create
auto auto detect image type from magic header
fs create fs image, which is also a chroot directory
loop create loop image, including multi-partitions
+ raw create raw image, containing multi-partitions
qcow create qcow image
- <ksfile>:
@@ -114,7 +121,7 @@ In Tizen, the released image will have a ks file along with image. For example,
::
- --compress-image=COMPRESS_IMAGE (for loop)
+ --compress-image=COMPRESS_IMAGE (for loop & raw)
Sets the disk image compression. Note: The available
values might depend on the used filesystem type.
--compress-disk-image=COMPRESS_IMAGE
@@ -123,6 +130,12 @@ In Tizen, the released image will have a ks file along with image. For example,
Whether to shrink loop images to minimal size
--include-src (for fs)
Generate a image with source rpms included
+ --generate-bmap (for raw)
+ Generate the block map file
+ --fstab-entry (for raw)
+ Set fstab entry, 'name' means using device names,
+ 'uuid' means using filesystem uuid
+
- Examples:
::
diff --git a/mic/imager/raw.py b/mic/imager/raw.py
new file mode 100755
index 0000000..649292b
--- /dev/null
+++ b/mic/imager/raw.py
@@ -0,0 +1,602 @@
+#!/usr/bin/python -tt
+#
+# Copyright (c) 2011 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import os
+import stat
+import shutil
+
+from mic import kickstart, msger
+from mic.utils import fs_related, runner, misc
+from mic.utils.partitionedfs import PartitionedMount
+from mic.utils.errors import CreatorError, MountError
+from mic.imager.baseimager import BaseImageCreator
+from mic.archive import packing, compressing
+
+class RawImageCreator(BaseImageCreator):
+ """Installs a system into a file containing a partitioned disk image.
+
+ ApplianceImageCreator is an advanced ImageCreator subclass; a sparse file
+ is formatted with a partition table, each partition loopback mounted
+ and the system installed into an virtual disk. The disk image can
+ subsequently be booted in a virtual machine or accessed with kpartx
+ """
+ img_format = 'raw'
+
+ def __init__(self, creatoropts=None, pkgmgr=None, compress_image=None, generate_bmap=None, fstab_entry="uuid"):
+ """Initialize a ApplianceImageCreator instance.
+
+ This method takes the same arguments as ImageCreator.__init__()
+ """
+ BaseImageCreator.__init__(self, creatoropts, pkgmgr)
+
+ self.__instloop = None
+ self.__imgdir = None
+ self.__disks = {}
+ self.__disk_format = "raw"
+ self._disk_names = []
+ self._ptable_format = self.ks.handler.bootloader.ptable
+ self.vmem = 512
+ self.vcpu = 1
+ self.checksum = False
+ self.use_uuid = fstab_entry == "uuid"
+ self.appliance_version = None
+ self.appliance_release = None
+ self.compress_image = compress_image
+ self.bmap_needed = generate_bmap
+ self._need_extlinux = not kickstart.use_installerfw(self.ks, "bootloader")
+ #self.getsource = False
+ #self.listpkg = False
+
+ self._dep_checks.extend(["sync", "kpartx", "parted"])
+ if self._need_extlinux:
+ self._dep_checks.extend(["extlinux"])
+
+ def configure(self, repodata = None):
+ import subprocess
+ def chroot():
+ os.chroot(self._instroot)
+ os.chdir("/")
+
+ if os.path.exists(self._instroot + "/usr/bin/Xorg"):
+ subprocess.call(["/bin/chmod", "u+s", "/usr/bin/Xorg"],
+ preexec_fn = chroot)
+
+ BaseImageCreator.configure(self, repodata)
+
+ def _get_fstab(self):
+ s = ""
+ for mp in self.__instloop.mount_order:
+ p = None
+ for p1 in self.__instloop.partitions:
+ if p1['mountpoint'] == mp:
+ p = p1
+ break
+
+ if self.use_uuid and p['uuid']:
+ device = "UUID=%s" % p['uuid']
+ else:
+ device = "/dev/%s%-d" % (p['disk_name'], p['num'])
+
+ s += "%(device)s %(mountpoint)s %(fstype)s %(fsopts)s 0 0\n" % {
+ 'device': device,
+ 'mountpoint': p['mountpoint'],
+ 'fstype': p['fstype'],
+ 'fsopts': "defaults,noatime" if not p['fsopts'] else p['fsopts']}
+
+ if p['mountpoint'] == "/":
+ for subvol in self.__instloop.subvolumes:
+ if subvol['mountpoint'] == "/":
+ continue
+ s += "%(device)s %(mountpoint)s %(fstype)s %(fsopts)s 0 0\n" % {
+ 'device': "/dev/%s%-d" % (p['disk_name'], p['num']),
+ 'mountpoint': subvol['mountpoint'],
+ 'fstype': p['fstype'],
+ 'fsopts': "defaults,noatime" if not subvol['fsopts'] else subvol['fsopts']}
+
+ s += "devpts /dev/pts devpts gid=5,mode=620 0 0\n"
+ s += "tmpfs /dev/shm tmpfs defaults 0 0\n"
+ s += "proc /proc proc defaults 0 0\n"
+ s += "sysfs /sys sysfs defaults 0 0\n"
+ return s
+
+ def _create_mkinitrd_config(self):
+ """write to tell which modules to be included in initrd"""
+
+ mkinitrd = ""
+ mkinitrd += "PROBE=\"no\"\n"
+ mkinitrd += "MODULES+=\"ext3 ata_piix sd_mod libata scsi_mod\"\n"
+ mkinitrd += "rootfs=\"ext3\"\n"
+ mkinitrd += "rootopts=\"defaults\"\n"
+
+ msger.debug("Writing mkinitrd config %s/etc/sysconfig/mkinitrd" \
+ % self._instroot)
+ os.makedirs(self._instroot + "/etc/sysconfig/",mode=644)
+ cfg = open(self._instroot + "/etc/sysconfig/mkinitrd", "w")
+ cfg.write(mkinitrd)
+ cfg.close()
+
+ def _get_parts(self):
+ if not self.ks:
+ raise CreatorError("Failed to get partition info, "
+ "please check your kickstart setting.")
+
+ # Set a default partition if no partition is given out
+ if not self.ks.handler.partition.partitions:
+ partstr = "part / --size 1900 --ondisk sda --fstype=ext3"
+ args = partstr.split()
+ pd = self.ks.handler.partition.parse(args[1:])
+ if pd not in self.ks.handler.partition.partitions:
+ self.ks.handler.partition.partitions.append(pd)
+
+ # partitions list from kickstart file
+ return kickstart.get_partitions(self.ks)
+
+ def get_disk_names(self):
+ """ Returns a list of physical target disk names (e.g., 'sdb') which
+ will be created. """
+
+ if self._disk_names:
+ return self._disk_names
+
+ #get partition info from ks handler
+ parts = self._get_parts()
+
+ for i in range(len(parts)):
+ if parts[i].disk:
+ disk_name = parts[i].disk
+ else:
+ raise CreatorError("Failed to create disks, no --ondisk "
+ "specified in partition line of ks file")
+
+ if parts[i].mountpoint and not parts[i].fstype:
+ raise CreatorError("Failed to create disks, no --fstype "
+ "specified for partition with mountpoint "
+ "'%s' in the ks file")
+
+ self._disk_names.append(disk_name)
+
+ return self._disk_names
+
+ def _full_name(self, name, extention):
+ """ Construct full file name for a file we generate. """
+ return "%s-%s.%s" % (self.name, name, extention)
+
+ def _full_path(self, path, name, extention):
+ """ Construct full file path to a file we generate. """
+ return os.path.join(path, self._full_name(name, extention))
+
+ #
+ # Actual implemention
+ #
+ def _mount_instroot(self, base_on = None):
+ parts = self._get_parts()
+ self.__instloop = PartitionedMount(self._instroot)
+
+ for p in parts:
+ self.__instloop.add_partition(int(p.size),
+ p.disk,
+ p.mountpoint,
+ p.fstype,
+ p.label,
+ fsopts = p.fsopts,
+ boot = p.active,
+ align = p.align,
+ part_type = p.part_type)
+
+ self.__instloop.layout_partitions(self._ptable_format)
+
+ # Create the disks
+ self.__imgdir = self._mkdtemp()
+ for disk_name, disk in self.__instloop.disks.items():
+ full_path = self._full_path(self.__imgdir, disk_name, "raw")
+ msger.debug("Adding disk %s as %s with size %s bytes" \
+ % (disk_name, full_path, disk['min_size']))
+
+ disk_obj = fs_related.SparseLoopbackDisk(full_path,
+ disk['min_size'])
+ self.__disks[disk_name] = disk_obj
+ self.__instloop.add_disk(disk_name, disk_obj)
+
+ self.__instloop.mount()
+ self._create_mkinitrd_config()
+
+ def mount(self, base_on = None, cachedir = None):
+ """
+ This method calls the base class' 'mount()' method and then creates
+ block device nodes corresponding to the image's partitions in the image
+ itself. Namely, the image has /dev/loopX device corresponding to the
+ entire image, and per-partition /dev/mapper/* devices.
+
+ We copy these files to image's "/dev" directory in order to enable
+ scripts which run in the image chroot environment to access own raw
+ partitions. For example, this can be used to install the bootloader to
+ the MBR (say, from an installer framework plugin).
+ """
+
+ def copy_devnode(src, dest):
+ """A helper function for copying device nodes."""
+
+ if not src:
+ return
+
+ stat_obj = os.stat(src)
+ assert stat.S_ISBLK(stat_obj.st_mode)
+
+ os.mknod(dest, stat_obj.st_mode,
+ os.makedev(os.major(stat_obj.st_rdev),
+ os.minor(stat_obj.st_rdev)))
+ # os.mknod uses process umask may create a nod with different
+ # permissions, so we use os.chmod to make sure permissions are
+ # correct.
+ os.chmod(dest, stat_obj.st_mode)
+
+ BaseImageCreator.mount(self, base_on, cachedir)
+
+ # Copy the disk loop devices
+ for name in self.__disks.keys():
+ loopdev = self.__disks[name].device
+ copy_devnode(loopdev, self._instroot + loopdev)
+
+ # Copy per-partition dm nodes
+ os.mkdir(self._instroot + "/dev/mapper", os.stat("/dev/mapper").st_mode)
+ for p in self.__instloop.partitions:
+ copy_devnode(p['mapper_device'],
+ self._instroot + p['mapper_device'])
+ copy_devnode(p['mpath_device'],
+ self._instroot + p['mpath_device'])
+
+ def unmount(self):
+ """
+ Remove loop/dm device nodes which we created in 'mount()' and call the
+ base class' 'unmount()' method.
+ """
+
+ for p in self.__instloop.partitions:
+ if p['mapper_device']:
+ path = self._instroot + p['mapper_device']
+ if os.path.exists(path):
+ os.unlink(path)
+ if p['mpath_device']:
+ path = self._instroot + p['mpath_device']
+ if os.path.exists(path):
+ os.unlink(path)
+
+ path = self._instroot + "/dev/mapper"
+ if os.path.exists(path):
+ shutil.rmtree(path, ignore_errors=True)
+
+ for name in self.__disks.keys():
+ if self.__disks[name].device:
+ path = self._instroot + self.__disks[name].device
+ if os.path.exists(path):
+ os.unlink(path)
+
+ BaseImageCreator.unmount(self)
+
+ def _get_required_packages(self):
+ required_packages = BaseImageCreator._get_required_packages(self)
+ if self._need_extlinux:
+ if not self.target_arch or not self.target_arch.startswith("arm"):
+ required_packages += ["syslinux", "syslinux-extlinux"]
+ return required_packages
+
+ def _get_excluded_packages(self):
+ return BaseImageCreator._get_excluded_packages(self)
+
+ def _get_syslinux_boot_config(self):
+ rootdev = None
+ root_part_uuid = None
+ for p in self.__instloop.partitions:
+ if p['mountpoint'] == "/":
+ rootdev = "/dev/%s%-d" % (p['disk_name'], p['num'])
+ root_part_uuid = p['partuuid']
+
+ return (rootdev, root_part_uuid)
+
+ def _create_syslinux_config(self):
+
+ splash = os.path.join(self._instroot, "boot/extlinux")
+ if os.path.exists(splash):
+ splashline = "menu background splash.jpg"
+ else:
+ splashline = ""
+
+ (rootdev, root_part_uuid) = self._get_syslinux_boot_config()
+ options = self.ks.handler.bootloader.appendLine
+
+ #XXX don't hardcode default kernel - see livecd code
+ syslinux_conf = ""
+ syslinux_conf += "prompt 0\n"
+ syslinux_conf += "timeout 1\n"
+ syslinux_conf += "\n"
+ syslinux_conf += "default vesamenu.c32\n"
+ syslinux_conf += "menu autoboot Starting %s...\n" % self.distro_name
+ syslinux_conf += "menu hidden\n"
+ syslinux_conf += "\n"
+ syslinux_conf += "%s\n" % splashline
+ syslinux_conf += "menu title Welcome to %s!\n" % self.distro_name
+ syslinux_conf += "menu color border 0 #ffffffff #00000000\n"
+ syslinux_conf += "menu color sel 7 #ffffffff #ff000000\n"
+ syslinux_conf += "menu color title 0 #ffffffff #00000000\n"
+ syslinux_conf += "menu color tabmsg 0 #ffffffff #00000000\n"
+ syslinux_conf += "menu color unsel 0 #ffffffff #00000000\n"
+ syslinux_conf += "menu color hotsel 0 #ff000000 #ffffffff\n"
+ syslinux_conf += "menu color hotkey 7 #ffffffff #ff000000\n"
+ syslinux_conf += "menu color timeout_msg 0 #ffffffff #00000000\n"
+ syslinux_conf += "menu color timeout 0 #ffffffff #00000000\n"
+ syslinux_conf += "menu color cmdline 0 #ffffffff #00000000\n"
+
+ versions = []
+ kernels = self._get_kernel_versions()
+ symkern = "%s/boot/vmlinuz" % self._instroot
+
+ if os.path.lexists(symkern):
+ v = os.path.realpath(symkern).replace('%s-' % symkern, "")
+ syslinux_conf += "label %s\n" % self.distro_name.lower()
+ syslinux_conf += "\tmenu label %s (%s)\n" % (self.distro_name, v)
+ syslinux_conf += "\tlinux ../vmlinuz\n"
+ if self._ptable_format == 'msdos':
+ rootstr = rootdev
+ else:
+ if not root_part_uuid:
+ raise MountError("Cannot find the root GPT partition UUID")
+ rootstr = "PARTUUID=%s" % root_part_uuid
+ syslinux_conf += "\tappend ro root=%s %s\n" % (rootstr, options)
+ syslinux_conf += "\tmenu default\n"
+ else:
+ for kernel in kernels:
+ for version in kernels[kernel]:
+ versions.append(version)
+
+ footlabel = 0
+ for v in versions:
+ syslinux_conf += "label %s%d\n" \
+ % (self.distro_name.lower(), footlabel)
+ syslinux_conf += "\tmenu label %s (%s)\n" % (self.distro_name, v)
+ syslinux_conf += "\tlinux ../vmlinuz-%s\n" % v
+ syslinux_conf += "\tappend ro root=%s %s\n" \
+ % (rootdev, options)
+ if footlabel == 0:
+ syslinux_conf += "\tmenu default\n"
+ footlabel += 1;
+
+ msger.debug("Writing syslinux config %s/boot/extlinux/extlinux.conf" \
+ % self._instroot)
+ cfg = open(self._instroot + "/boot/extlinux/extlinux.conf", "w")
+ cfg.write(syslinux_conf)
+ cfg.close()
+
+ def _install_syslinux(self):
+ for name in self.__disks.keys():
+ loopdev = self.__disks[name].device
+
+ # Set MBR
+ mbrfile = "%s/usr/share/syslinux/" % self._instroot
+ if self._ptable_format == 'gpt':
+ mbrfile += "gptmbr.bin"
+ else:
+ mbrfile += "mbr.bin"
+
+ msger.debug("Installing syslinux bootloader '%s' to %s" % \
+ (mbrfile, loopdev))
+
+ rc = runner.show(['dd', 'if=%s' % mbrfile, 'of=' + loopdev])
+ if rc != 0:
+ raise MountError("Unable to set MBR to %s" % loopdev)
+
+
+ # Ensure all data is flushed to disk before doing syslinux install
+ runner.quiet('sync')
+
+ fullpathsyslinux = fs_related.find_binary_path("extlinux")
+ rc = runner.show([fullpathsyslinux,
+ "-i",
+ "%s/boot/extlinux" % self._instroot])
+ if rc != 0:
+ raise MountError("Unable to install syslinux bootloader to %s" \
+ % loopdev)
+
+ def _create_bootconfig(self):
+ #If syslinux is available do the required configurations.
+ if self._need_extlinux \
+ and os.path.exists("%s/usr/share/syslinux/" % (self._instroot)) \
+ and os.path.exists("%s/boot/extlinux/" % (self._instroot)):
+ self._create_syslinux_config()
+ self._install_syslinux()
+
+ def _unmount_instroot(self):
+ if not self.__instloop is None:
+ try:
+ self.__instloop.cleanup()
+ except MountError, err:
+ msger.warning("%s" % err)
+
+ def _resparse(self, size = None):
+ return self.__instloop.resparse(size)
+
+ def _get_post_scripts_env(self, in_chroot):
+ env = BaseImageCreator._get_post_scripts_env(self, in_chroot)
+
+ # Export the file-system UUIDs and partition UUIDs (AKA PARTUUIDs)
+ for p in self.__instloop.partitions:
+ env.update(self._set_part_env(p['ks_pnum'], "UUID", p['uuid']))
+ env.update(self._set_part_env(p['ks_pnum'], "PARTUUID", p['partuuid']))
+ env.update(self._set_part_env(p['ks_pnum'], "DEVNODE_NOW",
+ p['mapper_device']))
+ env.update(self._set_part_env(p['ks_pnum'], "DISK_DEVNODE_NOW",
+ self.__disks[p['disk_name']].device))
+
+ return env
+
+ def _stage_final_image(self):
+ """Stage the final system image in _outdir.
+ write meta data
+ """
+ self._resparse()
+ self.image_files.update({'disks': self.__disks.keys()})
+
+ if not (self.compress_image or self.pack_to):
+ for imgfile in os.listdir(self.__imgdir):
+ if imgfile.endswith('.raw'):
+ for disk in self.__disks.keys():
+ if imgfile.find(disk) != -1:
+ self.image_files.setdefault(disk, {}).update(
+ {'image': imgfile})
+ self.image_files.setdefault('image_files',
+ []).append(imgfile)
+
+ if self.compress_image:
+ for imgfile in os.listdir(self.__imgdir):
+ if imgfile.endswith('.raw') or imgfile.endswith('bin'):
+ imgpath = os.path.join(self.__imgdir, imgfile)
+ msger.info("Compressing image %s" % imgfile)
+ compressing(imgpath, self.compress_image)
+ if imgfile.endswith('.raw') and not self.pack_to:
+ for disk in self.__disks.keys():
+ if imgfile.find(disk) != -1:
+ imgname = '%s.%s' % (imgfile, self.compress_image)
+ self.image_files.setdefault(disk, {}).update(
+ {'image': imgname})
+ self.image_files.setdefault('image_files',
+ []).append(imgname)
+
+ if self.pack_to:
+ dst = os.path.join(self._outdir, self.pack_to)
+ msger.info("Pack all raw images to %s" % dst)
+ packing(dst, self.__imgdir)
+ self.image_files.update({'image_files': self.pack_to})
+ else:
+ msger.debug("moving disks to stage location")
+ for imgfile in os.listdir(self.__imgdir):
+ src = os.path.join(self.__imgdir, imgfile)
+ dst = os.path.join(self._outdir, imgfile)
+ msger.debug("moving %s to %s" % (src,dst))
+ shutil.move(src,dst)
+
+ self._write_image_xml()
+
+ def _write_image_xml(self):
+ imgarch = "i686"
+ if self.target_arch and self.target_arch.startswith("arm"):
+ imgarch = "arm"
+ xml = "\n"
+
+ msger.debug("writing image XML to %s/%s.xml" %(self._outdir, self.name))
+ cfg = open("%s/%s.xml" % (self._outdir, self.name), "w")
+ cfg.write(xml)
+ cfg.close()
+
+ def generate_bmap(self):
+ """ Generate block map file for the image. The idea is that while disk
+ images we generate may be large (e.g., 4GiB), they may actually contain
+ only little real data, e.g., 512MiB. This data are files, directories,
+ file-system meta-data, partition table, etc. In other words, when
+ flashing the image to the target device, you do not have to copy all the
+ 4GiB of data, you can copy only 512MiB of it, which is 4 times faster.
+
+ This function generates the block map file for an arbitrary image that
+ mic has generated. The block map file is basically an XML file which
+ contains a list of blocks which have to be copied to the target device.
+ The other blocks are not used and there is no need to copy them. """
+
+ if self.bmap_needed is None:
+ return
+
+ from mic.utils import BmapCreate
+ msger.info("Generating the map file(s)")
+
+ for name in self.__disks.keys():
+ image = self._full_path(self.__imgdir, name, self.__disk_format)
+ bmap_file = self._full_path(self._outdir, name, "bmap")
+ self.image_files.setdefault(name, {}).update({'bmap': \
+ os.path.basename(bmap_file)})
+
+ msger.debug("Generating block map file '%s'" % bmap_file)
+
+ try:
+ creator = BmapCreate.BmapCreate(image, bmap_file)
+ creator.generate()
+ del creator
+ except BmapCreate.Error as err:
+ raise CreatorError("Failed to create bmap file: %s" % str(err))
+
+ def create_manifest(self):
+ if self.compress_image:
+ self.image_files.update({'compress': self.compress_image})
+ super(RawImageCreator, self).create_manifest()
diff --git a/mic/utils/BmapCreate.py b/mic/utils/BmapCreate.py
new file mode 100755
index 0000000..6934f1a
--- /dev/null
+++ b/mic/utils/BmapCreate.py
@@ -0,0 +1,354 @@
+# Copyright (c) 2012-2013 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License, version 2,
+# as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+
+"""
+This module implements the block map (bmap) creation functionality and provides
+the corresponding API in form of the 'BmapCreate' class.
+
+The idea is that while images files may generally be very large (e.g., 4GiB),
+they may nevertheless contain only little real data, e.g., 512MiB. This data
+are files, directories, file-system meta-data, partition table, etc. When
+copying the image to the target device, you do not have to copy all the 4GiB of
+data, you can copy only 512MiB of it, which is 4 times less, so copying should
+presumably be 4 times faster.
+
+The block map file is an XML file which contains a list of blocks which have to
+be copied to the target device. The other blocks are not used and there is no
+need to copy them. The XML file also contains some additional information like
+block size, image size, count of mapped blocks, etc. There are also many
+commentaries, so it is human-readable.
+
+The image has to be a sparse file. Generally, this means that when you generate
+this image file, you should start with a huge sparse file which contains a
+single hole spanning the entire file. Then you should partition it, write all
+the data (probably by means of loop-back mounting the image or parts of it),
+etc. The end result should be a sparse file where mapped areas represent useful
+parts of the image and holes represent useless parts of the image, which do not
+have to be copied when copying the image to the target device.
+
+This module uses the FIBMAP ioctl to detect holes.
+"""
+
+# Disable the following pylint recommendations:
+# * Too many instance attributes - R0902
+# * Too few public methods - R0903
+# pylint: disable=R0902,R0903
+
+import hashlib
+import logging
+from mic.utils.misc import human_size
+from mic.utils import Filemap
+
+# The bmap format version we generate.
+#
+# Changelog:
+# o 1.3 -> 2.0:
+# Support SHA256 and SHA512 checksums, in 1.3 only SHA1 was supported.
+# "BmapFileChecksum" is used instead of "BmapFileSHA1", and "chksum="
+# attribute is used instead "sha1=". Introduced "ChecksumType" tag. This is
+# an incompatible change.
+# Note, bmap format 1.4 is identical to 2.0. Version 1.4 was a mistake,
+# instead of incrementing the major version number, we incremented minor
+# version number. Unfortunately, the mistake slipped into bmap-tools version
+# 3.0, and was only fixed in bmap-tools v3.1.
+SUPPORTED_BMAP_VERSION = "2.0"
+
+_BMAP_START_TEMPLATE = \
+"""<?xml version="1.0" ?>
+<!-- This file contains the block map for an image file, which is basically
+ a list of useful (mapped) block numbers in the image file. In other words,
+ it lists only those blocks which contain data (boot sector, partition
+ table, file-system metadata, files, directories, extents, etc). These
+ blocks have to be copied to the target device. The other blocks do not
+ contain any useful data and do not have to be copied to the target
+ device.
+
+ The block map an optimization which allows to copy or flash the image to
+ the image quicker than copying of flashing the entire image. This is
+ because with bmap less data is copied: <MappedBlocksCount> blocks instead
+ of <BlocksCount> blocks.
+
+ Besides the machine-readable data, this file contains useful commentaries
+ which contain human-readable information like image size, percentage of
+ mapped data, etc.
+
+ The 'version' attribute is the block map file format version in the
+ 'major.minor' format. The version major number is increased whenever an
+ incompatible block map format change is made. The minor number changes
+ in case of minor backward-compatible changes. -->
+
+<bmap version="%s">
+ <!-- Image size in bytes: %s -->
+ <ImageSize> %u </ImageSize>
+
+ <!-- Size of a block in bytes -->
+ <BlockSize> %u </BlockSize>
+
+ <!-- Count of blocks in the image file -->
+ <BlocksCount> %u </BlocksCount>
+
+"""
+
+class Error(Exception):
+ """
+ A class for exceptions generated by this module. We currently support only
+ one type of exceptions, and we basically throw human-readable problem
+ description in case of errors.
+ """
+ pass
+
+class BmapCreate(object):
+ """
+ This class implements the bmap creation functionality. To generate a bmap
+ for an image (which is supposedly a sparse file), you should first create
+ an instance of 'BmapCreate' and provide:
+
+ * full path or a file-like object of the image to create bmap for
+ * full path or a file object to use for writing the results to
+
+ Then you should invoke the 'generate()' method of this class. It will use
+ the FIEMAP ioctl to generate the bmap.
+ """
+
+ def __init__(self, image, bmap, chksum_type="sha256", log=None):
+ """
+ Initialize a class instance:
+ * image - full path or a file-like object of the image to create bmap
+ for
+ * bmap - full path or a file object to use for writing the resulting
+ bmap to
+ * chksum - type of the check sum to use in the bmap file (all checksum
+ types which python's "hashlib" module supports are allowed).
+ * log - the logger object to use for printing messages.
+ """
+
+ self._log = log
+ if self._log is None:
+ self._log = logging.getLogger(__name__)
+
+ self.image_size = None
+ self.image_size_human = None
+ self.block_size = None
+ self.blocks_cnt = None
+ self.mapped_cnt = None
+ self.mapped_size = None
+ self.mapped_size_human = None
+ self.mapped_percent = None
+
+ self._mapped_count_pos1 = None
+ self._mapped_count_pos2 = None
+ self._chksum_pos = None
+
+ self._f_image_needs_close = False
+ self._f_bmap_needs_close = False
+
+ self._cs_type = chksum_type.lower()
+ try:
+ self._cs_len = len(hashlib.new(self._cs_type).hexdigest())
+ except ValueError as err:
+ raise Error("cannot initialize hash function \"%s\": %s" %
+ (self._cs_type, err))
+
+ if hasattr(image, "read"):
+ self._f_image = image
+ self._image_path = image.name
+ else:
+ self._image_path = image
+ self._open_image_file()
+
+ if hasattr(bmap, "read"):
+ self._f_bmap = bmap
+ self._bmap_path = bmap.name
+ else:
+ self._bmap_path = bmap
+ self._open_bmap_file()
+
+ try:
+ self.filemap = Filemap.filemap(self._f_image, self._log)
+ except (Filemap.Error, Filemap.ErrorNotSupp) as err:
+ raise Error("cannot generate bmap: %s" % err)
+
+ self.image_size = self.filemap.image_size
+ self.image_size_human = human_size(self.image_size)
+ if self.image_size == 0:
+ raise Error("cannot generate bmap for zero-sized image file '%s'"
+ % self._image_path)
+
+ self.block_size = self.filemap.block_size
+ self.blocks_cnt = self.filemap.blocks_cnt
+
+ def __del__(self):
+ """The class destructor which closes the opened files."""
+ if self._f_image_needs_close:
+ self._f_image.close()
+ if self._f_bmap_needs_close:
+ self._f_bmap.close()
+
+ def _open_image_file(self):
+ """Open the image file."""
+ try:
+ self._f_image = open(self._image_path, 'rb')
+ except IOError as err:
+ raise Error("cannot open image file '%s': %s"
+ % (self._image_path, err))
+
+ self._f_image_needs_close = True
+
+ def _open_bmap_file(self):
+ """Open the bmap file."""
+ try:
+ self._f_bmap = open(self._bmap_path, 'w+')
+ except IOError as err:
+ raise Error("cannot open bmap file '%s': %s"
+ % (self._bmap_path, err))
+
+ self._f_bmap_needs_close = True
+
+ def _bmap_file_start(self):
+ """
+ A helper function which generates the starting contents of the block
+ map file: the header comment, image size, block size, etc.
+ """
+
+ # We do not know the amount of mapped blocks at the moment, so just put
+ # whitespaces instead of real numbers. Assume the longest possible
+ # numbers.
+
+ xml = _BMAP_START_TEMPLATE \
+ % (SUPPORTED_BMAP_VERSION, self.image_size_human,
+ self.image_size, self.block_size, self.blocks_cnt)
+ xml += " <!-- Count of mapped blocks: "
+
+ self._f_bmap.write(xml)
+ self._mapped_count_pos1 = self._f_bmap.tell()
+
+ xml = "%s or %s -->\n" % (' ' * len(self.image_size_human),
+ ' ' * len("100.0%"))
+ xml += " <MappedBlocksCount> "
+
+ self._f_bmap.write(xml)
+ self._mapped_count_pos2 = self._f_bmap.tell()
+
+ xml = "%s </MappedBlocksCount>\n\n" % (' ' * len(str(self.blocks_cnt)))
+
+ # pylint: disable=C0301
+ xml += " <!-- Type of checksum used in this file -->\n"
+ xml += " <ChecksumType> %s </ChecksumType>\n\n" % self._cs_type
+
+ xml += " <!-- The checksum of this bmap file. When it is calculated, the value of\n"
+ xml += " the checksum has be zero (all ASCII \"0\" symbols). -->\n"
+ xml += " <BmapFileChecksum> "
+
+ self._f_bmap.write(xml)
+ self._chksum_pos = self._f_bmap.tell()
+
+ xml = "0" * self._cs_len + " </BmapFileChecksum>\n\n"
+ xml += " <!-- The block map which consists of elements which may either be a\n"
+ xml += " range of blocks or a single block. The 'chksum' attribute\n"
+ xml += " (if present) is the checksum of this blocks range. -->\n"
+ xml += " <BlockMap>\n"
+ # pylint: enable=C0301
+
+ self._f_bmap.write(xml)
+
+ def _bmap_file_end(self):
+ """
+ A helper function which generates the final parts of the block map
+ file: the ending tags and the information about the amount of mapped
+ blocks.
+ """
+
+ xml = " </BlockMap>\n"
+ xml += "</bmap>\n"
+
+ self._f_bmap.write(xml)
+
+ self._f_bmap.seek(self._mapped_count_pos1)
+ self._f_bmap.write("%s or %.1f%%"
+ % (self.mapped_size_human, self.mapped_percent))
+
+ self._f_bmap.seek(self._mapped_count_pos2)
+ self._f_bmap.write("%u" % self.mapped_cnt)
+
+ self._f_bmap.seek(0)
+ hash_obj = hashlib.new(self._cs_type)
+ hash_obj.update(self._f_bmap.read())
+ chksum = hash_obj.hexdigest()
+ self._f_bmap.seek(self._chksum_pos)
+ self._f_bmap.write("%s" % chksum)
+
+ def _calculate_chksum(self, first, last):
+ """
+ A helper function which calculates checksum for the range of blocks of
+ the image file: from block 'first' to block 'last'.
+ """
+
+ start = first * self.block_size
+ end = (last + 1) * self.block_size
+
+ self._f_image.seek(start)
+ hash_obj = hashlib.new(self._cs_type)
+
+ chunk_size = 1024*1024
+ to_read = end - start
+ read = 0
+
+ while read < to_read:
+ if read + chunk_size > to_read:
+ chunk_size = to_read - read
+ chunk = self._f_image.read(chunk_size)
+ hash_obj.update(chunk)
+ read += chunk_size
+
+ return hash_obj.hexdigest()
+
+ def generate(self, include_checksums=True):
+ """
+ Generate bmap for the image file. If 'include_checksums' is 'True',
+ also generate checksums for block ranges.
+ """
+
+ # Save image file position in order to restore it at the end
+ image_pos = self._f_image.tell()
+
+ self._bmap_file_start()
+
+ # Generate the block map and write it to the XML block map
+ # file as we go.
+ self.mapped_cnt = 0
+ for first, last in self.filemap.get_mapped_ranges(0, self.blocks_cnt):
+ self.mapped_cnt += last - first + 1
+ if include_checksums:
+ chksum = self._calculate_chksum(first, last)
+ chksum = " chksum=\"%s\"" % chksum
+ else:
+ chksum = ""
+
+ if first != last:
+ self._f_bmap.write(" <Range%s> %s-%s </Range>\n"
+ % (chksum, first, last))
+ else:
+ self._f_bmap.write(" <Range%s> %s </Range>\n"
+ % (chksum, first))
+
+ self.mapped_size = self.mapped_cnt * self.block_size
+ self.mapped_size_human = human_size(self.mapped_size)
+ self.mapped_percent = (self.mapped_cnt * 100.0) / self.blocks_cnt
+
+ self._bmap_file_end()
+
+ try:
+ self._f_bmap.flush()
+ except IOError as err:
+ raise Error("cannot flush the bmap file '%s': %s"
+ % (self._bmap_path, err))
+
+ self._f_image.seek(image_pos)
diff --git a/mic/utils/Filemap.py b/mic/utils/Filemap.py
new file mode 100755
index 0000000..81d16c1
--- /dev/null
+++ b/mic/utils/Filemap.py
@@ -0,0 +1,520 @@
+# Copyright (c) 2012 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License, version 2,
+# as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+
+"""
+This module implements python implements a way to get file block. Two methods
+are supported - the FIEMAP ioctl and the 'SEEK_HOLE / SEEK_DATA' features of
+the file seek syscall. The former is implemented by the 'FilemapFiemap' class,
+the latter is implemented by the 'FilemapSeek' class. Both classes provide the
+same API. The 'filemap' function automatically selects which class can be used
+and returns an instance of the class.
+"""
+
+# Disable the following pylint recommendations:
+# * Too many instance attributes (R0902)
+# pylint: disable=R0902
+
+import os
+import struct
+import array
+import fcntl
+import tempfile
+import logging
+from mic.utils.misc import get_block_size
+
+
+class ErrorNotSupp(Exception):
+ """
+ An exception of this type is raised when the 'FIEMAP' or 'SEEK_HOLE' feature
+ is not supported either by the kernel or the file-system.
+ """
+ pass
+
+class Error(Exception):
+ """A class for all the other exceptions raised by this module."""
+ pass
+
+
+class _FilemapBase(object):
+ """
+ This is a base class for a couple of other classes in this module. This
+ class simply performs the common parts of the initialization process: opens
+ the image file, gets its size, etc. The 'log' parameter is the logger object
+ to use for printing messages.
+ """
+
+ def __init__(self, image, log=None):
+ """
+ Initialize a class instance. The 'image' argument is full path to the
+ file or file object to operate on.
+ """
+
+ self._log = log
+ if self._log is None:
+ self._log = logging.getLogger(__name__)
+
+ self._f_image_needs_close = False
+
+ if hasattr(image, "fileno"):
+ self._f_image = image
+ self._image_path = image.name
+ else:
+ self._image_path = image
+ self._open_image_file()
+
+ try:
+ self.image_size = os.fstat(self._f_image.fileno()).st_size
+ except IOError as err:
+ raise Error("cannot get information about file '%s': %s"
+ % (self._f_image.name, err))
+
+ try:
+ self.block_size = get_block_size(self._f_image)
+ except IOError as err:
+ raise Error("cannot get block size for '%s': %s"
+ % (self._image_path, err))
+
+ self.blocks_cnt = self.image_size + self.block_size - 1
+ self.blocks_cnt /= self.block_size
+
+ try:
+ self._f_image.flush()
+ except IOError as err:
+ raise Error("cannot flush image file '%s': %s"
+ % (self._image_path, err))
+
+ try:
+ os.fsync(self._f_image.fileno()),
+ except OSError as err:
+ raise Error("cannot synchronize image file '%s': %s "
+ % (self._image_path, err.strerror))
+
+ self._log.debug("opened image \"%s\"" % self._image_path)
+ self._log.debug("block size %d, blocks count %d, image size %d"
+ % (self.block_size, self.blocks_cnt, self.image_size))
+
+ def __del__(self):
+ """The class destructor which just closes the image file."""
+ if self._f_image_needs_close:
+ self._f_image.close()
+
+ def _open_image_file(self):
+ """Open the image file."""
+ try:
+ self._f_image = open(self._image_path, 'rb')
+ except IOError as err:
+ raise Error("cannot open image file '%s': %s"
+ % (self._image_path, err))
+
+ self._f_image_needs_close = True
+
+ def block_is_mapped(self, block): # pylint: disable=W0613,R0201
+ """
+ This method has has to be implemented by child classes. It returns
+ 'True' if block number 'block' of the image file is mapped and 'False'
+ otherwise.
+ """
+
+ raise Error("the method is not implemented")
+
+ def block_is_unmapped(self, block): # pylint: disable=W0613,R0201
+ """
+ This method has has to be implemented by child classes. It returns
+ 'True' if block number 'block' of the image file is not mapped (hole)
+ and 'False' otherwise.
+ """
+
+ raise Error("the method is not implemented")
+
+ def get_mapped_ranges(self, start, count): # pylint: disable=W0613,R0201
+ """
+ This method has has to be implemented by child classes. This is a
+ generator which yields ranges of mapped blocks in the file. The ranges
+ are tuples of 2 elements: [first, last], where 'first' is the first
+ mapped block and 'last' is the last mapped block.
+
+ The ranges are yielded for the area of the file of size 'count' blocks,
+ starting from block 'start'.
+ """
+
+ raise Error("the method is not implemented")
+
+ def get_unmapped_ranges(self, start, count): # pylint: disable=W0613,R0201
+ """
+ This method has has to be implemented by child classes. Just like
+ 'get_mapped_ranges()', but yields unmapped block ranges instead
+ (holes).
+ """
+
+ raise Error("the method is not implemented")
+
+
+# The 'SEEK_HOLE' and 'SEEK_DATA' options of the file seek system call
+_SEEK_DATA = 3
+_SEEK_HOLE = 4
+
+def _lseek(file_obj, offset, whence):
+ """This is a helper function which invokes 'os.lseek' for file object
+ 'file_obj' and with specified 'offset' and 'whence'. The 'whence'
+ argument is supposed to be either '_SEEK_DATA' or '_SEEK_HOLE'. When
+ there is no more data or hole starting from 'offset', this function
+ returns '-1'. Otherwise the data or hole position is returned."""
+
+ try:
+ return os.lseek(file_obj.fileno(), offset, whence)
+ except OSError as err:
+ # The 'lseek' system call returns the ENXIO if there is no data or
+ # hole starting from the specified offset.
+ if err.errno == os.errno.ENXIO:
+ return -1
+ elif err.errno == os.errno.EINVAL:
+ raise ErrorNotSupp("the kernel or file-system does not support "
+ "\"SEEK_HOLE\" and \"SEEK_DATA\"")
+ else:
+ raise
+
+class FilemapSeek(_FilemapBase):
+ """
+ This class uses the 'SEEK_HOLE' and 'SEEK_DATA' to find file block mapping.
+ Unfortunately, the current implementation requires the caller to have write
+ access to the image file.
+ """
+
+ def __init__(self, image, log=None):
+ """Refer the '_FilemapBase' class for the documentation."""
+
+ # Call the base class constructor first
+ _FilemapBase.__init__(self, image, log)
+ self._log.debug("FilemapSeek: initializing")
+
+ self._probe_seek_hole()
+
+ def _probe_seek_hole(self):
+ """
+ Check whether the system implements 'SEEK_HOLE' and 'SEEK_DATA'.
+ Unfortunately, there seems to be no clean way for detecting this,
+ because often the system just fakes them by just assuming that all
+ files are fully mapped, so 'SEEK_HOLE' always returns EOF and
+ 'SEEK_DATA' always returns the requested offset.
+
+ I could not invent a better way of detecting the fake 'SEEK_HOLE'
+ implementation than just to create a temporary file in the same
+ directory where the image file resides. It would be nice to change this
+ to something better.
+ """
+
+ directory = os.path.dirname(self._image_path)
+
+ try:
+ tmp_obj = tempfile.TemporaryFile("w+", dir=directory)
+ except IOError as err:
+ raise ErrorNotSupp("cannot create a temporary in \"%s\": %s"
+ % (directory, err))
+
+ try:
+ os.ftruncate(tmp_obj.fileno(), self.block_size)
+ except OSError as err:
+ raise ErrorNotSupp("cannot truncate temporary file in \"%s\": %s"
+ % (directory, err))
+
+ offs = _lseek(tmp_obj, 0, _SEEK_HOLE)
+ if offs != 0:
+ # We are dealing with the stub 'SEEK_HOLE' implementation which
+ # always returns EOF.
+ self._log.debug("lseek(0, SEEK_HOLE) returned %d" % offs)
+ raise ErrorNotSupp("the file-system does not support "
+ "\"SEEK_HOLE\" and \"SEEK_DATA\" but only "
+ "provides a stub implementation")
+
+ tmp_obj.close()
+
+ def block_is_mapped(self, block):
+ """Refer the '_FilemapBase' class for the documentation."""
+ offs = _lseek(self._f_image, block * self.block_size, _SEEK_DATA)
+ if offs == -1:
+ result = False
+ else:
+ result = (offs / self.block_size == block)
+
+ self._log.debug("FilemapSeek: block_is_mapped(%d) returns %s"
+ % (block, result))
+ return result
+
+ def block_is_unmapped(self, block):
+ """Refer the '_FilemapBase' class for the documentation."""
+ return not self.block_is_mapped(block)
+
+ def _get_ranges(self, start, count, whence1, whence2):
+ """
+ This function implements 'get_mapped_ranges()' and
+ 'get_unmapped_ranges()' depending on what is passed in the 'whence1'
+ and 'whence2' arguments.
+ """
+
+ assert whence1 != whence2
+ end = start * self.block_size
+ limit = end + count * self.block_size
+
+ while True:
+ start = _lseek(self._f_image, end, whence1)
+ if start == -1 or start >= limit or start == self.image_size:
+ break
+
+ end = _lseek(self._f_image, start, whence2)
+ if end == -1 or end == self.image_size:
+ end = self.blocks_cnt * self.block_size
+ if end > limit:
+ end = limit
+
+ start_blk = start / self.block_size
+ end_blk = end / self.block_size - 1
+ self._log.debug("FilemapSeek: yielding range (%d, %d)"
+ % (start_blk, end_blk))
+ yield (start_blk, end_blk)
+
+ def get_mapped_ranges(self, start, count):
+ """Refer the '_FilemapBase' class for the documentation."""
+ self._log.debug("FilemapSeek: get_mapped_ranges(%d, %d(%d))"
+ % (start, count, start + count - 1))
+ return self._get_ranges(start, count, _SEEK_DATA, _SEEK_HOLE)
+
+ def get_unmapped_ranges(self, start, count):
+ """Refer the '_FilemapBase' class for the documentation."""
+ self._log.debug("FilemapSeek: get_unmapped_ranges(%d, %d(%d))"
+ % (start, count, start + count - 1))
+ return self._get_ranges(start, count, _SEEK_HOLE, _SEEK_DATA)
+
+
+# Below goes the FIEMAP ioctl implementation, which is not very readable
+# because it deals with the rather complex FIEMAP ioctl. To understand the
+# code, you need to know the FIEMAP interface, which is documented in the
+# "Documentation/filesystems/fiemap.txt" file in the Linux kernel sources.
+
+# Format string for 'struct fiemap'
+_FIEMAP_FORMAT = "=QQLLLL"
+# sizeof(struct fiemap)
+_FIEMAP_SIZE = struct.calcsize(_FIEMAP_FORMAT)
+# Format string for 'struct fiemap_extent'
+_FIEMAP_EXTENT_FORMAT = "=QQQQQLLLL"
+# sizeof(struct fiemap_extent)
+_FIEMAP_EXTENT_SIZE = struct.calcsize(_FIEMAP_EXTENT_FORMAT)
+# The FIEMAP ioctl number
+_FIEMAP_IOCTL = 0xC020660B
+# This FIEMAP ioctl flag which instructs the kernel to sync the file before
+# reading the block map
+_FIEMAP_FLAG_SYNC = 0x00000001
+# Size of the buffer for 'struct fiemap_extent' elements which will be used
+# when invoking the FIEMAP ioctl. The larger is the buffer, the less times the
+# FIEMAP ioctl will be invoked.
+_FIEMAP_BUFFER_SIZE = 256 * 1024
+
+class FilemapFiemap(_FilemapBase):
+ """
+ This class provides API to the FIEMAP ioctl. Namely, it allows to iterate
+ over all mapped blocks and over all holes.
+
+ This class synchronizes the image file every time it invokes the FIEMAP
+ ioctl in order to work-around early FIEMAP implementation kernel bugs.
+ """
+
+ def __init__(self, image, log=None):
+ """
+ Initialize a class instance. The 'image' argument is full the file
+ object to operate on.
+ """
+
+ # Call the base class constructor first
+ _FilemapBase.__init__(self, image, log)
+ self._log.debug("FilemapFiemap: initializing")
+
+ self._buf_size = _FIEMAP_BUFFER_SIZE
+
+ # Calculate how many 'struct fiemap_extent' elements fit the buffer
+ self._buf_size -= _FIEMAP_SIZE
+ self._fiemap_extent_cnt = self._buf_size / _FIEMAP_EXTENT_SIZE
+ assert self._fiemap_extent_cnt > 0
+ self._buf_size = self._fiemap_extent_cnt * _FIEMAP_EXTENT_SIZE
+ self._buf_size += _FIEMAP_SIZE
+
+ # Allocate a mutable buffer for the FIEMAP ioctl
+ self._buf = array.array('B', [0] * self._buf_size)
+
+ # Check if the FIEMAP ioctl is supported
+ self.block_is_mapped(0)
+
+ def _invoke_fiemap(self, block, count):
+ """
+ Invoke the FIEMAP ioctl for 'count' blocks of the file starting from
+ block number 'block'.
+
+ The full result of the operation is stored in 'self._buf' on exit.
+ Returns the unpacked 'struct fiemap' data structure in form of a python
+ list (just like 'struct.upack()').
+ """
+
+ if self.blocks_cnt != 0 and (block < 0 or block >= self.blocks_cnt):
+ raise Error("bad block number %d, should be within [0, %d]"
+ % (block, self.blocks_cnt))
+
+ # Initialize the 'struct fiemap' part of the buffer. We use the
+ # '_FIEMAP_FLAG_SYNC' flag in order to make sure the file is
+ # synchronized. The reason for this is that early FIEMAP
+ # implementations had many bugs related to cached dirty data, and
+ # synchronizing the file is a necessary work-around.
+ struct.pack_into(_FIEMAP_FORMAT, self._buf, 0, block * self.block_size,
+ count * self.block_size, _FIEMAP_FLAG_SYNC, 0,
+ self._fiemap_extent_cnt, 0)
+
+ try:
+ fcntl.ioctl(self._f_image, _FIEMAP_IOCTL, self._buf, 1)
+ except IOError as err:
+ # Note, the FIEMAP ioctl is supported by the Linux kernel starting
+ # from version 2.6.28 (year 2008).
+ if err.errno == os.errno.EOPNOTSUPP:
+ errstr = "FilemapFiemap: the FIEMAP ioctl is not supported " \
+ "by the file-system"
+ self._log.debug(errstr)
+ raise ErrorNotSupp(errstr)
+ if err.errno == os.errno.ENOTTY:
+ errstr = "FilemapFiemap: the FIEMAP ioctl is not supported " \
+ "by the kernel"
+ self._log.debug(errstr)
+ raise ErrorNotSupp(errstr)
+ raise Error("the FIEMAP ioctl failed for '%s': %s"
+ % (self._image_path, err))
+
+ return struct.unpack(_FIEMAP_FORMAT, self._buf[:_FIEMAP_SIZE])
+
+ def block_is_mapped(self, block):
+ """Refer the '_FilemapBase' class for the documentation."""
+ struct_fiemap = self._invoke_fiemap(block, 1)
+
+ # The 3rd element of 'struct_fiemap' is the 'fm_mapped_extents' field.
+ # If it contains zero, the block is not mapped, otherwise it is
+ # mapped.
+ result = bool(struct_fiemap[3])
+ self._log.debug("FilemapFiemap: block_is_mapped(%d) returns %s"
+ % (block, result))
+ return result
+
+ def block_is_unmapped(self, block):
+ """Refer the '_FilemapBase' class for the documentation."""
+ return not self.block_is_mapped(block)
+
+ def _unpack_fiemap_extent(self, index):
+ """
+ Unpack a 'struct fiemap_extent' structure object number 'index' from
+ the internal 'self._buf' buffer.
+ """
+
+ offset = _FIEMAP_SIZE + _FIEMAP_EXTENT_SIZE * index
+ return struct.unpack(_FIEMAP_EXTENT_FORMAT,
+ self._buf[offset : offset + _FIEMAP_EXTENT_SIZE])
+
+ def _do_get_mapped_ranges(self, start, count):
+ """
+ Implements most the functionality for the 'get_mapped_ranges()'
+ generator: invokes the FIEMAP ioctl, walks through the mapped extents
+ and yields mapped block ranges. However, the ranges may be consecutive
+ (e.g., (1, 100), (100, 200)) and 'get_mapped_ranges()' simply merges
+ them.
+ """
+
+ block = start
+ while block < start + count:
+ struct_fiemap = self._invoke_fiemap(block, count)
+
+ mapped_extents = struct_fiemap[3]
+ if mapped_extents == 0:
+ # No more mapped blocks
+ return
+
+ extent = 0
+ while extent < mapped_extents:
+ fiemap_extent = self._unpack_fiemap_extent(extent)
+
+ # Start of the extent
+ extent_start = fiemap_extent[0]
+ # Starting block number of the extent
+ extent_block = extent_start / self.block_size
+ # Length of the extent
+ extent_len = fiemap_extent[2]
+ # Count of blocks in the extent
+ extent_count = extent_len / self.block_size
+
+ # Extent length and offset have to be block-aligned
+ assert extent_start % self.block_size == 0
+ assert extent_len % self.block_size == 0
+
+ if extent_block > start + count - 1:
+ return
+
+ first = max(extent_block, block)
+ last = min(extent_block + extent_count, start + count) - 1
+ yield (first, last)
+
+ extent += 1
+
+ block = extent_block + extent_count
+
+ def get_mapped_ranges(self, start, count):
+ """Refer the '_FilemapBase' class for the documentation."""
+ self._log.debug("FilemapFiemap: get_mapped_ranges(%d, %d(%d))"
+ % (start, count, start + count - 1))
+ iterator = self._do_get_mapped_ranges(start, count)
+ first_prev, last_prev = iterator.next()
+
+ for first, last in iterator:
+ if last_prev == first - 1:
+ last_prev = last
+ else:
+ self._log.debug("FilemapFiemap: yielding range (%d, %d)"
+ % (first_prev, last_prev))
+ yield (first_prev, last_prev)
+ first_prev, last_prev = first, last
+
+ self._log.debug("FilemapFiemap: yielding range (%d, %d)"
+ % (first_prev, last_prev))
+ yield (first_prev, last_prev)
+
+ def get_unmapped_ranges(self, start, count):
+ """Refer the '_FilemapBase' class for the documentation."""
+ self._log.debug("FilemapFiemap: get_unmapped_ranges(%d, %d(%d))"
+ % (start, count, start + count - 1))
+ hole_first = start
+ for first, last in self._do_get_mapped_ranges(start, count):
+ if first > hole_first:
+ self._log.debug("FilemapFiemap: yielding range (%d, %d)"
+ % (hole_first, first - 1))
+ yield (hole_first, first - 1)
+
+ hole_first = last + 1
+
+ if hole_first < start + count:
+ self._log.debug("FilemapFiemap: yielding range (%d, %d)"
+ % (hole_first, start + count - 1))
+ yield (hole_first, start + count - 1)
+
+
+def filemap(image, log=None):
+ """
+ Create and return an instance of a Filemap class - 'FilemapFiemap' or
+ 'FilemapSeek', depending on what the system we run on supports. If the
+ FIEMAP ioctl is supported, an instance of the 'FilemapFiemap' class is
+ returned. Otherwise, if 'SEEK_HOLE' is supported an instance of the
+ 'FilemapSeek' class is returned. If none of these are supported, the
+ function generates an 'Error' type exception.
+ """
+
+ try:
+ return FilemapFiemap(image, log)
+ except ErrorNotSupp:
+ return FilemapSeek(image, log)
diff --git a/plugins/imager/raw_plugin.py b/plugins/imager/raw_plugin.py
new file mode 100755
index 0000000..09a9714
--- /dev/null
+++ b/plugins/imager/raw_plugin.py
@@ -0,0 +1,260 @@
+#!/usr/bin/python -tt
+#
+# Copyright (c) 2011 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import os
+import shutil
+import re
+import tempfile
+
+from mic import chroot, msger, rt_util
+from mic.utils import misc, fs_related, errors, runner
+from mic.conf import configmgr
+from mic.plugin import pluginmgr
+from mic.utils.partitionedfs import PartitionedMount
+
+import mic.imager.raw as raw
+
+from mic.pluginbase import ImagerPlugin
+class RawPlugin(ImagerPlugin):
+ name = 'raw'
+
+ @classmethod
+ def do_create(self, args):
+ """${cmd_name}: create raw image
+
+ Usage:
+ ${name} ${cmd_name} <ksfile> [OPTS]
+
+ ${cmd_option_list}
+ """
+
+ creatoropts = configmgr.create
+ ksconf = args.ksfile
+
+ if creatoropts['runtime'] == "bootstrap":
+ configmgr._ksconf = ksconf
+ rt_util.bootstrap_mic()
+
+ recording_pkgs = []
+ if len(creatoropts['record_pkgs']) > 0:
+ recording_pkgs = creatoropts['record_pkgs']
+
+ if creatoropts['release'] is not None:
+ if 'name' not in recording_pkgs:
+ recording_pkgs.append('name')
+ if 'vcs' not in recording_pkgs:
+ recording_pkgs.append('vcs')
+
+ configmgr._ksconf = ksconf
+
+ # try to find the pkgmgr
+ pkgmgr = None
+ backends = pluginmgr.get_plugins('backend')
+ if 'auto' == creatoropts['pkgmgr']:
+ for key in configmgr.prefer_backends:
+ if key in backends:
+ pkgmgr = backends[key]
+ break
+ else:
+ for key in backends.keys():
+ if key == creatoropts['pkgmgr']:
+ pkgmgr = backends[key]
+ break
+
+ if not pkgmgr:
+ raise errors.CreatorError("Can't find backend: %s, "
+ "available choices: %s" %
+ (creatoropts['pkgmgr'],
+ ','.join(backends.keys())))
+
+ creator = raw.RawImageCreator(creatoropts, pkgmgr, args.compress_image,
+ args.generate_bmap, args.fstab_entry)
+
+ if len(recording_pkgs) > 0:
+ creator._recording_pkgs = recording_pkgs
+
+ images = ["%s-%s.raw" % (creator.name, disk_name)
+ for disk_name in creator.get_disk_names()]
+ self.check_image_exists(creator.destdir,
+ creator.pack_to,
+ images,
+ creatoropts['release'])
+
+ try:
+ creator.check_depend_tools()
+ creator.mount(None, creatoropts["cachedir"])
+ creator.install()
+ creator.configure(creatoropts["repomd"])
+ creator.copy_kernel()
+ creator.unmount()
+ creator.generate_bmap()
+ creator.package(creatoropts["destdir"])
+ creator.create_manifest()
+ if creatoropts['release'] is not None:
+ creator.release_output(ksconf, creatoropts['destdir'], creatoropts['release'])
+ creator.print_outimage_info()
+
+ except errors.CreatorError:
+ raise
+ finally:
+ creator.cleanup()
+
+ msger.info("Finished.")
+ return 0
+
+ @classmethod
+ def do_chroot(cls, target, cmd=[]):
+ img = target
+ imgsize = misc.get_file_size(img) * 1024L * 1024L
+ partedcmd = fs_related.find_binary_path("parted")
+ disk = fs_related.SparseLoopbackDisk(img, imgsize)
+ imgmnt = misc.mkdtemp()
+ imgloop = PartitionedMount(imgmnt, skipformat = True)
+ imgloop.add_disk('/dev/sdb', disk)
+ img_fstype = "ext3"
+
+ msger.info("Partition Table:")
+ partnum = []
+ for line in runner.outs([partedcmd, "-s", img, "print"]).splitlines():
+ # no use strip to keep line output here
+ if "Number" in line:
+ msger.raw(line)
+ if line.strip() and line.strip()[0].isdigit():
+ partnum.append(line.strip()[0])
+ msger.raw(line)
+
+ rootpart = None
+ if len(partnum) > 1:
+ rootpart = msger.choice("please choose root partition", partnum)
+
+ # Check the partitions from raw disk.
+ # if choose root part, the mark it as mounted
+ if rootpart:
+ root_mounted = True
+ else:
+ root_mounted = False
+ partition_mounts = 0
+ for line in runner.outs([ partedcmd, "-s", img, "unit", "B", "print" ]).splitlines():
+ line = line.strip()
+
+ # Lines that start with number are the partitions,
+ # because parted can be translated we can't refer to any text lines.
+ if not line or not line[0].isdigit():
+ continue
+
+ # Some vars have extra , as list seperator.
+ line = line.replace(",","")
+
+ # Example of parted output lines that are handled:
+ # Number Start End Size Type File system Flags
+ # 1 512B 3400000511B 3400000000B primary
+ # 2 3400531968B 3656384511B 255852544B primary linux-swap(v1)
+ # 3 3656384512B 3720347647B 63963136B primary fat16 boot, lba
+
+ partition_info = re.split("\s+", line)
+
+ size = partition_info[3].split("B")[0]
+
+ if len(partition_info) < 6 or partition_info[5] in ["boot"]:
+ # No filesystem can be found from partition line. Assuming
+ # btrfs, because that is the only MeeGo fs that parted does
+ # not recognize properly.
+ # TODO: Can we make better assumption?
+ fstype = "btrfs"
+ elif partition_info[5] in [ "ext2", "ext3", "ext4", "btrfs" ]:
+ fstype = partition_info[5]
+ elif partition_info[5] in [ "fat16", "fat32" ]:
+ fstype = "vfat"
+ elif "swap" in partition_info[5]:
+ fstype = "swap"
+ else:
+ raise errors.CreatorError("Could not recognize partition fs type '%s'." %
+ partition_info[5])
+
+ if rootpart and rootpart == line[0]:
+ mountpoint = '/'
+ elif not root_mounted and fstype in [ "ext2", "ext3", "ext4", "btrfs" ]:
+ # TODO: Check that this is actually the valid root partition from /etc/fstab
+ mountpoint = "/"
+ root_mounted = True
+ elif fstype == "swap":
+ mountpoint = "swap"
+ else:
+ # TODO: Assing better mount points for the rest of the partitions.
+ partition_mounts += 1
+ mountpoint = "/media/partition_%d" % partition_mounts
+
+ if "boot" in partition_info:
+ boot = True
+ else:
+ boot = False
+
+ msger.verbose("Size: %s Bytes, fstype: %s, mountpoint: %s, boot: %s" %
+ (size, fstype, mountpoint, boot))
+ # TODO: add_partition should take bytes as size parameter.
+ imgloop.add_partition((int)(size)/1024/1024, "/dev/sdb", mountpoint,
+ fstype = fstype, boot = boot)
+
+ try:
+ imgloop.mount()
+
+ except errors.MountError:
+ imgloop.cleanup()
+ raise
+
+ try:
+ if len(cmd) != 0:
+ cmdline = ' '.join(cmd)
+ else:
+ cmdline = "/bin/bash"
+ envcmd = fs_related.find_binary_inchroot("env", imgmnt)
+ if envcmd:
+ cmdline = "%s HOME=/root %s" % (envcmd, cmdline)
+ chroot.chroot(imgmnt, None, cmdline)
+ except:
+ raise errors.CreatorError("Failed to chroot to %s." %img)
+ finally:
+ chroot.cleanup_after_chroot("img", imgloop, None, imgmnt)
+
+ @classmethod
+ def do_unpack(cls, srcimg):
+ srcimgsize = (misc.get_file_size(srcimg)) * 1024L * 1024L
+ srcmnt = misc.mkdtemp("srcmnt")
+ disk = fs_related.SparseLoopbackDisk(srcimg, srcimgsize)
+ srcloop = PartitionedMount(srcmnt, skipformat = True)
+
+ srcloop.add_disk('/dev/sdb', disk)
+ srcloop.add_partition(srcimgsize/1024/1024, "/dev/sdb", "/", "ext3", boot=False)
+ try:
+ srcloop.mount()
+
+ except errors.MountError:
+ srcloop.cleanup()
+ raise
+
+ image = os.path.join(tempfile.mkdtemp(dir = "/var/tmp", prefix = "tmp"), "target.img")
+ args = ['dd', "if=%s" % srcloop.partitions[0]['device'], "of=%s" % image]
+
+ msger.info("`dd` image ...")
+ rc = runner.show(args)
+ srcloop.cleanup()
+ shutil.rmtree(os.path.dirname(srcmnt), ignore_errors = True)
+
+ if rc != 0:
+ raise errors.CreatorError("Failed to dd")
+ else:
+ return image
diff --git a/tools/mic b/tools/mic
index cb527f8..c54cb83 100755
--- a/tools/mic
+++ b/tools/mic
@@ -147,6 +147,19 @@ def create_parser(parser):
qcow_parser = subparsers.add_parser('qcow', parents=[parent_parser], help='create qcow image')
+ raw_parser = subparsers.add_parser('raw', parents=[parent_parser], help='create raw image')
+
+ raw_parser.add_argument("--compress-disk-image", dest="compress_image",
+ choices=("gz", "bz2"), default=None,
+ help="Same with --compress-image")
+ raw_parser.add_argument("--compress-image", dest="compress_image",
+ choices=("gz", "bz2"), default = None,
+ help="Compress all raw images before package")
+ raw_parser.add_argument("--generate-bmap", action="store_true", default = None,
+ help="also generate the block map file")
+ raw_parser.add_argument("--fstab-entry", dest="fstab_entry", choices=("name", "uuid"), default="uuid",
+ help="Set fstab entry, 'name' means using device names, "
+ "'uuid' means using filesystem uuid")
return parser
def main(argv):