diff options
46 files changed, 11459 insertions, 0 deletions
diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..23a4b5c --- /dev/null +++ b/Makefile @@ -0,0 +1,35 @@ +PYTHON ?= python +VERSION = $(shell cat VERSION) +TAGVER = $(shell cat VERSION | sed -e "s/\([0-9\.]*\).*/\1/") + +PKGNAME = micng + +ifeq ($(VERSION), $(TAGVER)) + TAG = $(TAGVER) +else + TAG = "HEAD" +endif + + +all: + $(PYTHON) setup.py build + +dist-bz2: + git archive --format=tar --prefix=$(PKGNAME)-$(VERSION)/ $(TAG) | \ + bzip2 > $(PKGNAME)-$(VERSION).tar.bz2 + +dist-gz: + git archive --format=tar --prefix=$(PKGNAME)-$(VERSION)/ $(TAG) | \ + gzip > $(PKGNAME)-$(VERSION).tar.gz + +install: all + $(PYTHON) setup.py install --root=${DESTDIR} + +develop: all + $(PYTHON) setup.py develop + +clean: + rm -f tools/*.py[co] + rm -rf *.egg-info + rm -rf build/ + rm -rf dist/ @@ -0,0 +1 @@ +0.1git diff --git a/distfiles/micng.conf b/distfiles/micng.conf new file mode 100644 index 0000000..0bc4a3f --- /dev/null +++ b/distfiles/micng.conf @@ -0,0 +1,13 @@ +[main] +cachedir= /var/tmp/cache +tmpdir= /var/tmp +outdir= . +distro_name=MeeGo +#proxy=http://proxy.yourcompany.com:8080/ +#no_proxy=localhost,127.0.0.0/8,.yourcompany.com +format=livecd +default_ks=default.ks +use_comps=1 + +#run mode: 0 - legacy, 1 - bootstrap +run_mode=0 diff --git a/micng/__init__.py b/micng/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/micng/__init__.py diff --git a/micng/__version__.py b/micng/__version__.py new file mode 100644 index 0000000..aa9bee7 --- /dev/null +++ b/micng/__version__.py @@ -0,0 +1 @@ +VERSION = "0.1git" diff --git a/micng/chroot.py b/micng/chroot.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/micng/chroot.py diff --git a/micng/configmgr.py b/micng/configmgr.py new file mode 100644 index 0000000..6a35c21 --- /dev/null +++ b/micng/configmgr.py @@ -0,0 +1,87 @@ +#!/usr/bin/python -t + +import os +import micng.utils as utils + +DEFAULT_OUTDIR='.' +DEFAULT_TMPDIR='/tmp' +DEFAULT_CACHE='/var/tmp' +DEFAULT_GSITECONF='/etc/micng/micng.conf' +DEFAULT_USITECONF='~/.micng.conf' + +class ConfigMgr(object): + def __init__(self, siteconf=None, ksfile=None): + self.outdir = DEFAULT_OUTDIR + self.tmpdir = DEFAULT_TMPDIR + self.cache = DEFAULT_CACHE + self.siteconf = siteconf + self.name = 'meego' + self.ksfile = ksfile + self.kickstart = None + self.ksrepos = None + self.repometadata = None + self.init_siteconf(self.siteconf) + self.init_kickstart(self.ksfile) + + def init_siteconf(self, siteconf = None): + from ConfigParser import SafeConfigParser + siteconf_parser = SafeConfigParser() + siteconf_files = [DEFAULT_GSITECONF, DEFAULT_USITECONF] + + if siteconf: + self.siteconf = siteconf + siteconf_files = [self.siteconf] + siteconf_parser.read(siteconf_files) + + for option in siteconf_parser.options('main'): + value = siteconf_parser.get('main', option) + setattr(self, option, value) + + def init_kickstart(self, ksfile=None): + if not ksfile: + return + self.ksfile = ksfile + try: + self.kickstart = utils.kickstart.read_kickstart(self.ksfile) + self.ksrepos = utils.misc.get_repostrs_from_ks(self.kickstart) + print "retrieving repo metadata..." + self.repometadata = utils.misc.get_metadata_from_repos(self.ksrepos, self.cache) + except OSError, e: + raise Exception("failed to create image: %s" % e) + except Exception, e: + raise Exception("unable to load kickstart file '%s': %s" % (self.ksfile, e)) + + + def setProperty(self, name, value): + if not hasattr(self, name): + return None + #print ">>", name, value + if name == 'ksfile': + self.init_kickstart(value) + return True + if name == 'siteconf': + self.init_siteconf(value) + return True + return setattr(self, name, value) + + def getProperty(self, name): + if not hasattr(self, name): + return None + return getattr(self, name) + +configmgr = ConfigMgr() + +def getConfigMgr(): + return configmgr + +def setProperty(cinfo, name): + if not isinstance(cinfo, ConfigMgr): + return None + if not hasattr(cinfo, name): + return None + +def getProperty(cinfo, name): + if not isinstance(cinfo, ConfigMgr): + return None + if not hasattr(cinfo, name): + return None diff --git a/micng/convertor.py b/micng/convertor.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/micng/convertor.py diff --git a/micng/imager/BaseImageCreator.py b/micng/imager/BaseImageCreator.py new file mode 100644 index 0000000..a5862c6 --- /dev/null +++ b/micng/imager/BaseImageCreator.py @@ -0,0 +1,1603 @@ +# +# creator.py : ImageCreator and LoopImageCreator base classes +# +# Copyright 2007, Red Hat Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +import os +import os.path +import stat +import sys +import tempfile +import shutil +import logging +import subprocess +import re +import tarfile +import glob + +import rpm + +from micng.utils.errors import * +from micng.utils.fs_related import * +from micng.utils import kickstart +from micng.utils import pkgmanagers +from micng.utils.rpmmisc import * +from micng.utils.misc import * + +FSLABEL_MAXLEN = 32 +"""The maximum string length supported for LoopImageCreator.fslabel.""" + +class ImageCreator(object): + """Installs a system to a chroot directory. + + ImageCreator is the simplest creator class available; it will install and + configure a system image according to the supplied kickstart file. + + e.g. + + import micng.imgcreate as imgcreate + ks = imgcreate.read_kickstart("foo.ks") + imgcreate.ImageCreator(ks, "foo").create() + + """ + + def __init__(self, ks, name): + """Initialize an ImageCreator instance. + + ks -- a pykickstart.KickstartParser instance; this instance will be + used to drive the install by e.g. providing the list of packages + to be installed, the system configuration and %post scripts + + name -- a name for the image; used for e.g. image filenames or + filesystem labels + + """ + + """ Initialize package managers """ +#package plugin manager + self.pkgmgr = pkgmanagers.pkgManager() + self.pkgmgr.load_pkg_managers() + + self.ks = ks + """A pykickstart.KickstartParser instance.""" + + self.name = name + """A name for the image.""" + + self.distro_name = "MeeGo" + + """Output image file names""" + self.outimage = [] + + """A flag to generate checksum""" + self._genchecksum = False + + self.tmpdir = "/var/tmp" + """The directory in which all temporary files will be created.""" + + self.cachedir = None + + self._alt_initrd_name = None + + self.__builddir = None + self.__bindmounts = [] + + """ Contains the compression method that is used to compress + the disk image after creation, e.g., bz2. + This value is set with compression_method function. """ + self.__img_compression_method = None + + # dependent commands to check + self._dep_checks = ["ls", "bash", "cp", "echo", "modprobe", "passwd"] + + self._recording_pkgs = None + + self._include_src = None + + self._local_pkgs_path = None + + # available size in root fs, init to 0 + self._root_fs_avail = 0 + + # target arch for non-x86 image + self.target_arch = None + + """ Name of the disk image file that is created. """ + self._img_name = None + + """ Image format """ + self.image_format = None + + """ Save qemu emulator file name in order to clean up it finally """ + self.qemu_emulator = None + + """ No ks provided when called by convertor, so skip the dependency check """ + if self.ks: + """ If we have btrfs partition we need to check that we have toosl for those """ + for part in self.ks.handler.partition.partitions: + if part.fstype and part.fstype == "btrfs": + self._dep_checks.append("mkfs.btrfs") + break + + def set_target_arch(self, arch): + if arch not in arches.keys(): + return False + self.target_arch = arch + if self.target_arch.startswith("arm"): + for dep in self._dep_checks: + if dep == "extlinux": + self._dep_checks.remove(dep) + + if not os.path.exists("/usr/bin/qemu-arm") or not is_statically_linked("/usr/bin/qemu-arm"): + self._dep_checks.append("qemu-arm-static") + + if os.path.exists("/proc/sys/vm/vdso_enabled"): + vdso_fh = open("/proc/sys/vm/vdso_enabled","r") + vdso_value = vdso_fh.read().strip() + vdso_fh.close() + if (int)(vdso_value) == 1: + print "\n= WARNING =" + print "vdso is enabled on your host, which might cause problems with arm emulations." + print "You can disable vdso with following command before starting image build:" + print "echo 0 | sudo tee /proc/sys/vm/vdso_enabled" + print "= WARNING =\n" + + return True + + + def __del__(self): + self.cleanup() + + # + # Properties + # + def __get_instroot(self): + if self.__builddir is None: + raise CreatorError("_instroot is not valid before calling mount()") + return self.__builddir + "/install_root" + _instroot = property(__get_instroot) + """The location of the install root directory. + + This is the directory into which the system is installed. Subclasses may + mount a filesystem image here or copy files to/from here. + + Note, this directory does not exist before ImageCreator.mount() is called. + + Note also, this is a read-only attribute. + + """ + + def __get_outdir(self): + if self.__builddir is None: + raise CreatorError("_outdir is not valid before calling mount()") + return self.__builddir + "/out" + _outdir = property(__get_outdir) + """The staging location for the final image. + + This is where subclasses should stage any files that are part of the final + image. ImageCreator.package() will copy any files found here into the + requested destination directory. + + Note, this directory does not exist before ImageCreator.mount() is called. + + Note also, this is a read-only attribute. + + """ + + # + # Hooks for subclasses + # + def _mount_instroot(self, base_on = None): + """Mount or prepare the install root directory. + + This is the hook where subclasses may prepare the install root by e.g. + mounting creating and loopback mounting a filesystem image to + _instroot. + + There is no default implementation. + + base_on -- this is the value passed to mount() and can be interpreted + as the subclass wishes; it might e.g. be the location of + a previously created ISO containing a system image. + + """ + pass + + def _unmount_instroot(self): + """Undo anything performed in _mount_instroot(). + + This is the hook where subclasses must undo anything which was done + in _mount_instroot(). For example, if a filesystem image was mounted + onto _instroot, it should be unmounted here. + + There is no default implementation. + + """ + pass + + def _create_bootconfig(self): + """Configure the image so that it's bootable. + + This is the hook where subclasses may prepare the image for booting by + e.g. creating an initramfs and bootloader configuration. + + This hook is called while the install root is still mounted, after the + packages have been installed and the kickstart configuration has been + applied, but before the %post scripts have been executed. + + There is no default implementation. + + """ + pass + + def _stage_final_image(self): + """Stage the final system image in _outdir. + + This is the hook where subclasses should place the image in _outdir + so that package() can copy it to the requested destination directory. + + By default, this moves the install root into _outdir. + + """ + shutil.move(self._instroot, self._outdir + "/" + self.name) + + def get_installed_packages(self): + return self._pkgs_content.keys() + + def _save_recording_pkgs(self, destdir): + """Save the list or content of installed packages to file. + """ + if self._recording_pkgs not in ('content', 'name'): + return + + pkgs = self._pkgs_content.keys() + pkgs.sort() # inplace op + + # save package name list anyhow + if not os.path.exists(destdir): + makedirs(destdir) + + namefile = os.path.join(destdir, self.name + '-pkgs.txt') + f = open(namefile, "w") + content = '\n'.join(pkgs) + f.write(content) + f.close() + self.outimage.append(namefile); + + # if 'content', save more details + if self._recording_pkgs == 'content': + contfile = os.path.join(destdir, self.name + '-pkgs-content.txt') + f = open(contfile, "w") + + for pkg in pkgs: + content = pkg + '\n' + + pkgcont = self._pkgs_content[pkg] + items = [] + if pkgcont.has_key('dir'): + items = map(lambda x:x+'/', pkgcont['dir']) + if pkgcont.has_key('file'): + items.extend(pkgcont['file']) + + if items: + content += ' ' + content += '\n '.join(items) + content += '\n' + + content += '\n' + f.write(content) + f.close() + self.outimage.append(contfile) + + def _get_required_packages(self): + """Return a list of required packages. + + This is the hook where subclasses may specify a set of packages which + it requires to be installed. + + This returns an empty list by default. + + Note, subclasses should usually chain up to the base class + implementation of this hook. + + """ + return [] + + def _get_excluded_packages(self): + """Return a list of excluded packages. + + This is the hook where subclasses may specify a set of packages which + it requires _not_ to be installed. + + This returns an empty list by default. + + Note, subclasses should usually chain up to the base class + implementation of this hook. + + """ + excluded_packages = [] + for rpm_path in self._get_local_packages(): + rpm_name = os.path.basename(rpm_path) + package_name = splitFilename(rpm_name)[0] + excluded_packages += [package_name] + return excluded_packages + + def _get_local_packages(self): + """Return a list of rpm path to be local installed. + + This is the hook where subclasses may specify a set of rpms which + it requires to be installed locally. + + This returns an empty list by default. + + Note, subclasses should usually chain up to the base class + implementation of this hook. + + """ + if self._local_pkgs_path: + if os.path.isdir(self._local_pkgs_path): + return glob.glob( + os.path.join(self._local_pkgs_path, '*.rpm')) + elif os.path.splitext(self._local_pkgs_path)[-1] == '.rpm': + return [self._local_pkgs_path] + + return [] + + def _get_fstab(self): + """Return the desired contents of /etc/fstab. + + This is the hook where subclasses may specify the contents of + /etc/fstab by returning a string containing the desired contents. + + A sensible default implementation is provided. + + """ + s = "/dev/root / %s %s 0 0\n" % (self._fstype, "defaults,noatime" if not self._fsopts else self._fsopts) + s += self._get_fstab_special() + return s + + def _get_fstab_special(self): + s = "devpts /dev/pts devpts gid=5,mode=620 0 0\n" + s += "tmpfs /dev/shm tmpfs defaults 0 0\n" + s += "proc /proc proc defaults 0 0\n" + s += "sysfs /sys sysfs defaults 0 0\n" + return s + + def _get_post_scripts_env(self, in_chroot): + """Return an environment dict for %post scripts. + + This is the hook where subclasses may specify some environment + variables for %post scripts by return a dict containing the desired + environment. + + By default, this returns an empty dict. + + in_chroot -- whether this %post script is to be executed chroot()ed + into _instroot. + + """ + return {} + + def __get_imgname(self): + return self.name + _name = property(__get_imgname) + """The name of the image file. + + """ + + def _get_kernel_versions(self): + """Return a dict detailing the available kernel types/versions. + + This is the hook where subclasses may override what kernel types and + versions should be available for e.g. creating the booloader + configuration. + + A dict should be returned mapping the available kernel types to a list + of the available versions for those kernels. + + The default implementation uses rpm to iterate over everything + providing 'kernel', finds /boot/vmlinuz-* and returns the version + obtained from the vmlinuz filename. (This can differ from the kernel + RPM's n-v-r in the case of e.g. xen) + + """ + def get_version(header): + version = None + for f in header['filenames']: + if f.startswith('/boot/vmlinuz-'): + version = f[14:] + return version + + ts = rpm.TransactionSet(self._instroot) + + ret = {} + for header in ts.dbMatch('provides', 'kernel'): + version = get_version(header) + if version is None: + continue + + name = header['name'] + if not name in ret: + ret[name] = [version] + elif not version in ret[name]: + ret[name].append(version) + + return ret + + # + # Helpers for subclasses + # + def _do_bindmounts(self): + """Mount various system directories onto _instroot. + + This method is called by mount(), but may also be used by subclasses + in order to re-mount the bindmounts after modifying the underlying + filesystem. + + """ + for b in self.__bindmounts: + b.mount() + + def _undo_bindmounts(self): + """Unmount the bind-mounted system directories from _instroot. + + This method is usually only called by unmount(), but may also be used + by subclasses in order to gain access to the filesystem obscured by + the bindmounts - e.g. in order to create device nodes on the image + filesystem. + + """ + self.__bindmounts.reverse() + for b in self.__bindmounts: + b.unmount() + + def _chroot(self): + """Chroot into the install root. + + This method may be used by subclasses when executing programs inside + the install root e.g. + + subprocess.call(["/bin/ls"], preexec_fn = self.chroot) + + """ + os.chroot(self._instroot) + os.chdir("/") + + def _mkdtemp(self, prefix = "tmp-"): + """Create a temporary directory. + + This method may be used by subclasses to create a temporary directory + for use in building the final image - e.g. a subclass might create + a temporary directory in order to bundle a set of files into a package. + + The subclass may delete this directory if it wishes, but it will be + automatically deleted by cleanup(). + + The absolute path to the temporary directory is returned. + + Note, this method should only be called after mount() has been called. + + prefix -- a prefix which should be used when creating the directory; + defaults to "tmp-". + + """ + self.__ensure_builddir() + return tempfile.mkdtemp(dir = self.__builddir, prefix = prefix) + + def _mkstemp(self, prefix = "tmp-"): + """Create a temporary file. + + This method may be used by subclasses to create a temporary file + for use in building the final image - e.g. a subclass might need + a temporary location to unpack a compressed file. + + The subclass may delete this file if it wishes, but it will be + automatically deleted by cleanup(). + + A tuple containing a file descriptor (returned from os.open() and the + absolute path to the temporary directory is returned. + + Note, this method should only be called after mount() has been called. + + prefix -- a prefix which should be used when creating the file; + defaults to "tmp-". + + """ + self.__ensure_builddir() + return tempfile.mkstemp(dir = self.__builddir, prefix = prefix) + + def _mktemp(self, prefix = "tmp-"): + """Create a temporary file. + + This method simply calls _mkstemp() and closes the returned file + descriptor. + + The absolute path to the temporary file is returned. + + Note, this method should only be called after mount() has been called. + + prefix -- a prefix which should be used when creating the file; + defaults to "tmp-". + + """ + + (f, path) = self._mkstemp(prefix) + os.close(f) + return path + + # + # Actual implementation + # + def __ensure_builddir(self): + if not self.__builddir is None: + return + + try: + self.__builddir = tempfile.mkdtemp(dir = self.tmpdir, + prefix = "imgcreate-") + except OSError, (err, msg): + raise CreatorError("Failed create build directory in %s: %s" % + (self.tmpdir, msg)) + + def get_cachedir(self, cachedir = None): + if self.cachedir: + return self.cachedir + + self.__ensure_builddir() + if cachedir: + self.cachedir = cachedir + else: + self.cachedir = self.__builddir + "/yum-cache" + makedirs(self.cachedir) + return self.cachedir + + def __sanity_check(self): + """Ensure that the config we've been given is sane.""" + if not (kickstart.get_packages(self.ks) or + kickstart.get_groups(self.ks)): + raise CreatorError("No packages or groups specified") + + kickstart.convert_method_to_repo(self.ks) + + if not kickstart.get_repos(self.ks): + raise CreatorError("No repositories specified") + + def __write_fstab(self): + fstab = open(self._instroot + "/etc/fstab", "w") + fstab.write(self._get_fstab()) + fstab.close() + + def __create_minimal_dev(self): + """Create a minimal /dev so that we don't corrupt the host /dev""" + origumask = os.umask(0000) + devices = (('null', 1, 3, 0666), + ('urandom',1, 9, 0666), + ('random', 1, 8, 0666), + ('full', 1, 7, 0666), + ('ptmx', 5, 2, 0666), + ('tty', 5, 0, 0666), + ('zero', 1, 5, 0666)) + links = (("/proc/self/fd", "/dev/fd"), + ("/proc/self/fd/0", "/dev/stdin"), + ("/proc/self/fd/1", "/dev/stdout"), + ("/proc/self/fd/2", "/dev/stderr")) + + for (node, major, minor, perm) in devices: + if not os.path.exists(self._instroot + "/dev/" + node): + os.mknod(self._instroot + "/dev/" + node, perm | stat.S_IFCHR, os.makedev(major,minor)) + for (src, dest) in links: + if not os.path.exists(self._instroot + dest): + os.symlink(src, self._instroot + dest) + os.umask(origumask) + + + def mount(self, base_on = None, cachedir = None): + """Setup the target filesystem in preparation for an install. + + This function sets up the filesystem which the ImageCreator will + install into and configure. The ImageCreator class merely creates an + install root directory, bind mounts some system directories (e.g. /dev) + and writes out /etc/fstab. Other subclasses may also e.g. create a + sparse file, format it and loopback mount it to the install root. + + base_on -- a previous install on which to base this install; defaults + to None, causing a new image to be created + + cachedir -- a directory in which to store the Yum cache; defaults to + None, causing a new cache to be created; by setting this + to another directory, the same cache can be reused across + multiple installs. + + """ + self.__ensure_builddir() + + makedirs(self._instroot) + makedirs(self._outdir) + + self._mount_instroot(base_on) + + for d in ("/dev/pts", "/etc", "/boot", "/var/log", "/var/cache/yum", "/sys", "/proc", "/usr/bin"): + makedirs(self._instroot + d) + + if self.target_arch and self.target_arch.startswith("arm"): + self.qemu_emulator = setup_qemu_emulator(self._instroot, self.target_arch) + + self.get_cachedir(cachedir) + + # bind mount system directories into _instroot + for (f, dest) in [("/sys", None), ("/proc", None), ("/proc/sys/fs/binfmt_misc", None), + ("/dev/pts", None), + (self.get_cachedir(), "/var/cache/yum")]: + self.__bindmounts.append(BindChrootMount(f, self._instroot, dest)) + + + self._do_bindmounts() + + self.__create_minimal_dev() + + if os.path.exists(self._instroot + "/etc/mtab"): + os.unlink(self._instroot + "/etc/mtab") + os.symlink("../proc/mounts", self._instroot + "/etc/mtab") + + self.__write_fstab() + + # get size of available space in 'instroot' fs + self._root_fs_avail = get_filesystem_avail(self._instroot) + + def unmount(self): + """Unmounts the target filesystem. + + The ImageCreator class detaches the system from the install root, but + other subclasses may also detach the loopback mounted filesystem image + from the install root. + + """ + try: + os.unlink(self._instroot + "/etc/mtab") + if self.qemu_emulator: + os.unlink(self._instroot + self.qemu_emulator) + """ Clean up yum garbage """ + instroot_pdir = os.path.dirname(self._instroot + self._instroot) + if os.path.exists(instroot_pdir): + shutil.rmtree(instroot_pdir, ignore_errors = True) + except OSError: + pass + + + self._undo_bindmounts() + + self._unmount_instroot() + + def cleanup(self): + """Unmounts the target filesystem and deletes temporary files. + + This method calls unmount() and then deletes any temporary files and + directories that were created on the host system while building the + image. + + Note, make sure to call this method once finished with the creator + instance in order to ensure no stale files are left on the host e.g.: + + creator = ImageCreator(ks, name) + try: + creator.create() + finally: + creator.cleanup() + + """ + if not self.__builddir: + return + + self.unmount() + + shutil.rmtree(self.__builddir, ignore_errors = True) + self.__builddir = None + + def __is_excluded_pkg(self, pkg): + if pkg in self._excluded_pkgs: + self._excluded_pkgs.remove(pkg) + return True + + for xpkg in self._excluded_pkgs: + if xpkg.endswith('*'): + if pkg.startswith(xpkg[:-1]): + return True + elif xpkg.startswith('*'): + if pkg.endswith(xpkg[1:]): + return True + + return None + + def __select_packages(self, pkg_manager): + skipped_pkgs = [] + for pkg in self._required_pkgs: + e = pkg_manager.selectPackage(pkg) + if e: + if kickstart.ignore_missing(self.ks): + skipped_pkgs.append(pkg) + elif self.__is_excluded_pkg(pkg): + skipped_pkgs.append(pkg) + else: + raise CreatorError("Failed to find package '%s' : %s" % + (pkg, e)) + + for pkg in skipped_pkgs: + logging.warn("Skipping missing package '%s'" % (pkg,)) + + def __select_groups(self, pkg_manager): + skipped_groups = [] + for group in self._required_groups: + e = pkg_manager.selectGroup(group.name, group.include) + if e: + if kickstart.ignore_missing(self.ks): + skipped_groups.append(group) + else: + raise CreatorError("Failed to find group '%s' : %s" % + (group.name, e)) + + for group in skipped_groups: + logging.warn("Skipping missing group '%s'" % (group.name,)) + + def __deselect_packages(self, pkg_manager): + for pkg in self._excluded_pkgs: + pkg_manager.deselectPackage(pkg) + + def __localinst_packages(self, pkg_manager): + for rpm_path in self._get_local_packages(): + pkg_manager.installLocal(rpm_path) + + def install(self, repo_urls = {}): + """Install packages into the install root. + + This function installs the packages listed in the supplied kickstart + into the install root. By default, the packages are installed from the + repository URLs specified in the kickstart. + + repo_urls -- a dict which maps a repository name to a repository URL; + if supplied, this causes any repository URLs specified in + the kickstart to be overridden. + + """ + + + # initialize pkg list to install + #import pdb + #pdb.set_trace() + if self.ks: + self.__sanity_check() + + self._required_pkgs = \ + kickstart.get_packages(self.ks, self._get_required_packages()) + self._excluded_pkgs = \ + kickstart.get_excluded(self.ks, self._get_excluded_packages()) + self._required_groups = kickstart.get_groups(self.ks) + else: + self._required_pkgs = None + self._excluded_pkgs = None + self._required_groups = None + + yum_conf = self._mktemp(prefix = "yum.conf-") + + keep_record = None + if self._include_src: + keep_record = 'include_src' + if self._recording_pkgs in ('name', 'content'): + keep_record = self._recording_pkgs + + pkg_manager = self.get_pkg_manager(keep_record) + pkg_manager.setup(yum_conf, self._instroot) + + for repo in kickstart.get_repos(self.ks, repo_urls): + (name, baseurl, mirrorlist, inc, exc, proxy, proxy_username, proxy_password, debuginfo, source, gpgkey, disable) = repo + + yr = pkg_manager.addRepository(name, baseurl, mirrorlist, proxy, proxy_username, proxy_password, inc, exc) + + if kickstart.exclude_docs(self.ks): + rpm.addMacro("_excludedocs", "1") + rpm.addMacro("__file_context_path", "%{nil}") + if kickstart.inst_langs(self.ks) != None: + rpm.addMacro("_install_langs", kickstart.inst_langs(self.ks)) + + try: + try: + #import pdb + #pdb.set_trace() + self.__select_packages(pkg_manager) + self.__select_groups(pkg_manager) + self.__deselect_packages(pkg_manager) + self.__localinst_packages(pkg_manager) + + BOOT_SAFEGUARD = 256L * 1024 * 1024 # 256M + checksize = self._root_fs_avail + if checksize: + checksize -= BOOT_SAFEGUARD + if self.target_arch: + pkg_manager._add_prob_flags(rpm.RPMPROB_FILTER_IGNOREARCH) + pkg_manager.runInstall(checksize) + except CreatorError, e: + raise CreatorError("%s" % (e,)) + finally: + if keep_record: + self._pkgs_content = pkg_manager.getAllContent() + + pkg_manager.closeRpmDB() + pkg_manager.close() + os.unlink(yum_conf) + + # do some clean up to avoid lvm info leakage. this sucks. + for subdir in ("cache", "backup", "archive"): + lvmdir = self._instroot + "/etc/lvm/" + subdir + try: + for f in os.listdir(lvmdir): + os.unlink(lvmdir + "/" + f) + except: + pass + + def __run_post_scripts(self): + print "Running scripts" + for s in kickstart.get_post_scripts(self.ks): + (fd, path) = tempfile.mkstemp(prefix = "ks-script-", + dir = self._instroot + "/tmp") + + s.script = s.script.replace("\r", "") + os.write(fd, s.script) + os.close(fd) + os.chmod(path, 0700) + + env = self._get_post_scripts_env(s.inChroot) + + if not s.inChroot: + env["INSTALL_ROOT"] = self._instroot + env["IMG_NAME"] = self._name + preexec = None + script = path + else: + preexec = self._chroot + script = "/tmp/" + os.path.basename(path) + + try: + try: + subprocess.call([s.interp, script], + preexec_fn = preexec, env = env, stdout = sys.stdout, stderr = sys.stderr) + except OSError, (err, msg): + raise CreatorError("Failed to execute %%post script " + "with '%s' : %s" % (s.interp, msg)) + finally: + os.unlink(path) + + def __save_repo_keys(self, repodata): + if not repodata: + return None + gpgkeydir = "/etc/pki/rpm-gpg" + makedirs(self._instroot + gpgkeydir) + for repo in repodata: + if repo["repokey"]: + repokey = gpgkeydir + "/RPM-GPG-KEY-%s" % repo["name"] + shutil.copy(repo["repokey"], self._instroot + repokey) + + def configure(self, repodata = None): + """Configure the system image according to the kickstart. + + This method applies the (e.g. keyboard or network) configuration + specified in the kickstart and executes the kickstart %post scripts. + + If neccessary, it also prepares the image to be bootable by e.g. + creating an initrd and bootloader configuration. + + """ + ksh = self.ks.handler + + try: + kickstart.LanguageConfig(self._instroot).apply(ksh.lang) + kickstart.KeyboardConfig(self._instroot).apply(ksh.keyboard) + kickstart.TimezoneConfig(self._instroot).apply(ksh.timezone) + #kickstart.AuthConfig(self._instroot).apply(ksh.authconfig) + kickstart.FirewallConfig(self._instroot).apply(ksh.firewall) + kickstart.RootPasswordConfig(self._instroot).apply(ksh.rootpw) + kickstart.UserConfig(self._instroot).apply(ksh.user) + kickstart.ServicesConfig(self._instroot).apply(ksh.services) + kickstart.XConfig(self._instroot).apply(ksh.xconfig) + kickstart.NetworkConfig(self._instroot).apply(ksh.network) + kickstart.RPMMacroConfig(self._instroot).apply(self.ks) + kickstart.DesktopConfig(self._instroot).apply(ksh.desktop) + self.__save_repo_keys(repodata) + kickstart.MoblinRepoConfig(self._instroot).apply(ksh.repo, repodata) + except: + print "Failed to apply configuration to image" + raise + + self._create_bootconfig() + self.__run_post_scripts() + + def launch_shell(self, launch): + """Launch a shell in the install root. + + This method is launches a bash shell chroot()ed in the install root; + this can be useful for debugging. + + """ + if launch: + print "Launching shell. Exit to continue." + print "----------------------------------" + subprocess.call(["/bin/bash"], preexec_fn = self._chroot) + + def do_genchecksum(self, image_name): + if not self._genchecksum: + return + + """ Generate md5sum if /usr/bin/md5sum is available """ + if os.path.exists("/usr/bin/md5sum"): + p = subprocess.Popen(["/usr/bin/md5sum", "-b", image_name], + stdout=subprocess.PIPE) + (md5sum, errorstr) = p.communicate() + if p.returncode != 0: + logging.warning("Can't generate md5sum for image %s" % image_name) + else: + pattern = re.compile("\*.*$") + md5sum = pattern.sub("*" + os.path.basename(image_name), md5sum) + fd = open(image_name + ".md5sum", "w") + fd.write(md5sum) + fd.close() + self.outimage.append(image_name+".md5sum") + + def package(self, destdir = "."): + """Prepares the created image for final delivery. + + In its simplest form, this method merely copies the install root to the + supplied destination directory; other subclasses may choose to package + the image by e.g. creating a bootable ISO containing the image and + bootloader configuration. + + destdir -- the directory into which the final image should be moved; + this defaults to the current directory. + + """ + self._stage_final_image() + + if self.__img_compression_method: + if not self._img_name: + raise CreatorError("Image name not set.") + rc = None + img_location = os.path.join(self._outdir,self._img_name) + if self.__img_compression_method == "bz2": + bzip2 = find_binary_path('bzip2') + print "Compressing %s with bzip2. Please wait..." % img_location + rc = subprocess.call([bzip2, "-f", img_location]) + if rc: + raise CreatorError("Failed to compress image %s with %s." % (img_location, self.__img_compression_method)) + for bootimg in glob.glob(os.path.dirname(img_location) + "/*-boot.bin"): + print "Compressing %s with bzip2. Please wait..." % bootimg + rc = subprocess.call([bzip2, "-f", bootimg]) + if rc: + raise CreatorError("Failed to compress image %s with %s." % (bootimg, self.__img_compression_method)) + + if self._recording_pkgs: + self._save_recording_pkgs(destdir) + + """ For image formats with two or multiple image files, it will be better to put them under a directory """ + if self.image_format in ("raw", "vmdk", "vdi", "nand", "mrstnand"): + destdir = os.path.join(destdir, "%s-%s" % (self.name, self.image_format)) + logging.debug("creating destination dir: %s" % destdir) + makedirs(destdir) + + # Ensure all data is flushed to _outdir + synccmd = find_binary_path("sync") + subprocess.call([synccmd]) + + for f in os.listdir(self._outdir): + shutil.move(os.path.join(self._outdir, f), + os.path.join(destdir, f)) + self.outimage.append(os.path.join(destdir, f)) + self.do_genchecksum(os.path.join(destdir, f)) + + def create(self): + """Install, configure and package an image. + + This method is a utility method which creates and image by calling some + of the other methods in the following order - mount(), install(), + configure(), unmount and package(). + + """ + self.mount() + self.install() + self.configure() + self.unmount() + self.package() + + def print_outimage_info(self): + print "Your new image can be found here:" + self.outimage.sort() + for file in self.outimage: + print os.path.abspath(file) + + def check_depend_tools(self): + for tool in self._dep_checks: + find_binary_path(tool) + + def package_output(self, image_format, destdir = ".", package="none"): + if not package or package == "none": + return + + destdir = os.path.abspath(os.path.expanduser(destdir)) + (pkg, comp) = os.path.splitext(package) + if comp: + comp=comp.lstrip(".") + + if pkg == "tar": + if comp: + dst = "%s/%s-%s.tar.%s" % (destdir, self.name, image_format, comp) + else: + dst = "%s/%s-%s.tar" % (destdir, self.name, image_format) + print "creating %s" % dst + tar = tarfile.open(dst, "w:" + comp) + + for file in self.outimage: + print "adding %s to %s" % (file, dst) + tar.add(file, arcname=os.path.join("%s-%s" % (self.name, image_format), os.path.basename(file))) + if os.path.isdir(file): + shutil.rmtree(file, ignore_errors = True) + else: + os.remove(file) + + + tar.close() + + '''All the file in outimage has been packaged into tar.* file''' + self.outimage = [dst] + + def release_output(self, config, destdir, name, release): + self.outimage = create_release(config, destdir, name, self.outimage, release) + + def save_kernel(self, destdir): + if not os.path.exists(destdir): + makedirs(destdir) + for kernel in glob.glob("%s/boot/vmlinuz-*" % self._instroot): + kernelfilename = "%s/%s-%s" % (destdir, self.name, os.path.basename(kernel)) + shutil.copy(kernel, kernelfilename) + self.outimage.append(kernelfilename) + + def compress_disk_image(self, compression_method): + """ + With this you can set the method that is used to compress the disk + image after it is created. + """ + + if compression_method not in ('bz2'): + raise CreatorError("Given disk image compression method ('%s') is not valid." % (compression_method)) + + self.__img_compression_method = compression_method + + def set_pkg_manager(self, name): + self.pkgmgr.set_default_pkg_manager(name) + + def get_pkg_manager(self, recording_pkgs=None): + pkgmgr_instance = self.pkgmgr.get_default_pkg_manager() + if not pkgmgr_instance: + raise CreatorError("No package manager available") + return pkgmgr_instance(creator = self, recording_pkgs = recording_pkgs) + +class LoopImageCreator(ImageCreator): + """Installs a system into a loopback-mountable filesystem image. + + LoopImageCreator is a straightforward ImageCreator subclass; the system + is installed into an ext3 filesystem on a sparse file which can be + subsequently loopback-mounted. + + """ + + def __init__(self, ks, name, fslabel = None): + """Initialize a LoopImageCreator instance. + + This method takes the same arguments as ImageCreator.__init__() with + the addition of: + + fslabel -- A string used as a label for any filesystems created. + + """ + ImageCreator.__init__(self, ks, name) + + self.__fslabel = None + self.fslabel = fslabel + + self.__minsize_KB = 0 + self.__blocksize = 4096 + if self.ks: + self.__fstype = kickstart.get_image_fstype(self.ks, "ext3") + self.__fsopts = kickstart.get_image_fsopts(self.ks, "defaults,noatime") + else: + self.__fstype = None + self.__fsopts = None + + self.__instloop = None + self.__imgdir = None + + if self.ks: + self.__image_size = kickstart.get_image_size(self.ks, + 4096L * 1024 * 1024) + else: + self.__image_size = 0 + + self._img_name = self.name + ".img" + + def _set_fstype(self, fstype): + self.__fstype = fstype + + def _set_image_size(self, imgsize): + self.__image_size = imgsize + + # + # Properties + # + def __get_fslabel(self): + if self.__fslabel is None: + return self.name + else: + return self.__fslabel + def __set_fslabel(self, val): + if val is None: + self.__fslabel = None + else: + self.__fslabel = val[:FSLABEL_MAXLEN] + fslabel = property(__get_fslabel, __set_fslabel) + """A string used to label any filesystems created. + + Some filesystems impose a constraint on the maximum allowed size of the + filesystem label. In the case of ext3 it's 16 characters, but in the case + of ISO9660 it's 32 characters. + + mke2fs silently truncates the label, but mkisofs aborts if the label is too + long. So, for convenience sake, any string assigned to this attribute is + silently truncated to FSLABEL_MAXLEN (32) characters. + + """ + + def __get_image(self): + if self.__imgdir is None: + raise CreatorError("_image is not valid before calling mount()") + return self.__imgdir + "/meego.img" + _image = property(__get_image) + """The location of the image file. + + This is the path to the filesystem image. Subclasses may use this path + in order to package the image in _stage_final_image(). + + Note, this directory does not exist before ImageCreator.mount() is called. + + Note also, this is a read-only attribute. + + """ + + def __get_blocksize(self): + return self.__blocksize + def __set_blocksize(self, val): + if self.__instloop: + raise CreatorError("_blocksize must be set before calling mount()") + try: + self.__blocksize = int(val) + except ValueError: + raise CreatorError("'%s' is not a valid integer value " + "for _blocksize" % val) + _blocksize = property(__get_blocksize, __set_blocksize) + """The block size used by the image's filesystem. + + This is the block size used when creating the filesystem image. Subclasses + may change this if they wish to use something other than a 4k block size. + + Note, this attribute may only be set before calling mount(). + + """ + + def __get_fstype(self): + return self.__fstype + def __set_fstype(self, val): + if val != "ext2" and val != "ext3": + raise CreatorError("Unknown _fstype '%s' supplied" % val) + self.__fstype = val + _fstype = property(__get_fstype, __set_fstype) + """The type of filesystem used for the image. + + This is the filesystem type used when creating the filesystem image. + Subclasses may change this if they wish to use something other ext3. + + Note, only ext2 and ext3 are currently supported. + + Note also, this attribute may only be set before calling mount(). + + """ + + def __get_fsopts(self): + return self.__fsopts + def __set_fsopts(self, val): + self.__fsopts = val + _fsopts = property(__get_fsopts, __set_fsopts) + """Mount options of filesystem used for the image. + + This can be specified by --fsoptions="xxx,yyy" in part command in + kickstart file. + """ + + # + # Helpers for subclasses + # + def _resparse(self, size = None): + """Rebuild the filesystem image to be as sparse as possible. + + This method should be used by subclasses when staging the final image + in order to reduce the actual space taken up by the sparse image file + to be as little as possible. + + This is done by resizing the filesystem to the minimal size (thereby + eliminating any space taken up by deleted files) and then resizing it + back to the supplied size. + + size -- the size in, in bytes, which the filesystem image should be + resized to after it has been minimized; this defaults to None, + causing the original size specified by the kickstart file to + be used (or 4GiB if not specified in the kickstart). + + """ + return self.__instloop.resparse(size) + + def _base_on(self, base_on): + shutil.copyfile(base_on, self._image) + + # + # Actual implementation + # + def _mount_instroot(self, base_on = None): + self.__imgdir = self._mkdtemp() + + if not base_on is None: + self._base_on(base_on) + + if self.__fstype in ("ext2", "ext3", "ext4"): + MyDiskMount = ExtDiskMount + elif self.__fstype == "btrfs": + MyDiskMount = BtrfsDiskMount + + self.__instloop = MyDiskMount(SparseLoopbackDisk(self._image, self.__image_size), + self._instroot, + self.__fstype, + self.__blocksize, + self.fslabel) + + try: + self.__instloop.mount() + except MountError, e: + raise CreatorError("Failed to loopback mount '%s' : %s" % + (self._image, e)) + + def _unmount_instroot(self): + if not self.__instloop is None: + self.__instloop.cleanup() + + def _stage_final_image(self): + self._resparse() + shutil.move(self._image, self._outdir + "/" + self._img_name) + +class LiveImageCreatorBase(LoopImageCreator): + """A base class for LiveCD image creators. + + This class serves as a base class for the architecture-specific LiveCD + image creator subclass, LiveImageCreator. + + LiveImageCreator creates a bootable ISO containing the system image, + bootloader, bootloader configuration, kernel and initramfs. + + """ + + def __init__(self, *args): + """Initialise a LiveImageCreator instance. + + This method takes the same arguments as ImageCreator.__init__(). + + """ + LoopImageCreator.__init__(self, *args) + + self.skip_compression = False + """Controls whether to use squashfs to compress the image.""" + + self.skip_minimize = False + """Controls whether an image minimizing snapshot should be created. + + This snapshot can be used when copying the system image from the ISO in + order to minimize the amount of data that needs to be copied; simply, + it makes it possible to create a version of the image's filesystem with + no spare space. + + """ + + self.actasconvertor = False + """A flag which indicates i act as a convertor""" + + if self.ks: + self._timeout = kickstart.get_timeout(self.ks, 10) + else: + self._timeout = 10 + """The bootloader timeout from kickstart.""" + + if self.ks: + self._default_kernel = kickstart.get_default_kernel(self.ks, "kernel") + else: + self._default_kernel = None + """The default kernel type from kickstart.""" + + self.__isodir = None + + self.__modules = ["=ata", "sym53c8xx", "aic7xxx", "=usb", "=firewire", "=mmc", "=pcmcia", "mptsas"] + if self.ks: + self.__modules.extend(kickstart.get_modules(self.ks)) + + self._dep_checks.extend(["isohybrid", "unsquashfs", "mksquashfs", "dd", "genisoimage"]) + + # + # Hooks for subclasses + # + def _configure_bootloader(self, isodir): + """Create the architecture specific booloader configuration. + + This is the hook where subclasses must create the booloader + configuration in order to allow a bootable ISO to be built. + + isodir -- the directory where the contents of the ISO are to be staged + + """ + raise CreatorError("Bootloader configuration is arch-specific, " + "but not implemented for this arch!") + def _get_menu_options(self): + """Return a menu options string for syslinux configuration. + + """ + r = kickstart.get_menu_args(self.ks) + return r + + def _get_kernel_options(self): + """Return a kernel options string for bootloader configuration. + + This is the hook where subclasses may specify a set of kernel options + which should be included in the images bootloader configuration. + + A sensible default implementation is provided. + + """ + r = kickstart.get_kernel_args(self.ks) + if os.path.exists(self._instroot + "/usr/bin/rhgb") or \ + os.path.exists(self._instroot + "/usr/bin/plymouth"): + r += " rhgb" + return r + + def _get_mkisofs_options(self, isodir): + """Return the architecture specific mkisosfs options. + + This is the hook where subclasses may specify additional arguments to + mkisofs, e.g. to enable a bootable ISO to be built. + + By default, an empty list is returned. + + """ + return [] + + # + # Helpers for subclasses + # + def _has_checkisomd5(self): + """Check whether checkisomd5 is available in the install root.""" + def exists(instroot, path): + return os.path.exists(instroot + path) + + if (exists(self._instroot, "/usr/lib/moblin-installer-runtime/checkisomd5") or + exists(self._instroot, "/usr/bin/checkisomd5")): + if (os.path.exists("/usr/bin/implantisomd5") or + os.path.exists("/usr/lib/anaconda-runtime/implantisomd5")): + return True + + return False + + def _uncompress_squashfs(self, squashfsimg, outdir): + """Uncompress file system from squshfs image""" + unsquashfs = find_binary_path("unsquashfs") + args = [unsquashfs, "-d", outdir, squashfsimg ] + rc = subprocess.call(args) + if (rc != 0): + raise CreatorError("Failed to uncompress %s." % squashfsimg) + # + # Actual implementation + # + def _base_on(self, base_on): + """Support Image Convertor""" + if self.actasconvertor: + if os.path.exists(base_on) and not os.path.isfile(base_on): + ddcmd = find_binary_path("dd") + args = [ ddcmd, "if=%s" % base_on, "of=%s" % self._image ] + print "dd %s -> %s" % (base_on, self._image) + rc = subprocess.call(args) + if rc != 0: + raise CreatorError("Failed to dd from %s to %s" % (base_on, self._image)) + self._set_image_size(get_file_size(self._image) * 1024L * 1024L) + if os.path.isfile(base_on): + print "Copying file system..." + shutil.copyfile(base_on, self._image) + self._set_image_size(get_file_size(self._image) * 1024L * 1024L) + return + + """helper function to extract ext3 file system from a live CD ISO""" + isoloop = DiskMount(LoopbackDisk(base_on, 0), self._mkdtemp()) + + try: + isoloop.mount() + except MountError, e: + raise CreatorError("Failed to loopback mount '%s' : %s" % + (base_on, e)) + + # legacy LiveOS filesystem layout support, remove for F9 or F10 + if os.path.exists(isoloop.mountdir + "/squashfs.img"): + squashimg = isoloop.mountdir + "/squashfs.img" + else: + squashimg = isoloop.mountdir + "/LiveOS/squashfs.img" + + tmpoutdir = self._mkdtemp() + # unsquashfs requires outdir mustn't exist + shutil.rmtree(tmpoutdir, ignore_errors = True) + self._uncompress_squashfs(squashimg, tmpoutdir) + + try: + # legacy LiveOS filesystem layout support, remove for F9 or F10 + if os.path.exists(tmpoutdir + "/os.img"): + os_image = tmpoutdir + "/os.img" + else: + os_image = tmpoutdir + "/LiveOS/ext3fs.img" + + if not os.path.exists(os_image): + raise CreatorError("'%s' is not a valid live CD ISO : neither " + "LiveOS/ext3fs.img nor os.img exist" % + base_on) + + print "Copying file system..." + shutil.copyfile(os_image, self._image) + self._set_image_size(get_file_size(self._image) * 1024L * 1024L) + finally: + shutil.rmtree(tmpoutdir, ignore_errors = True) + isoloop.cleanup() + + def _mount_instroot(self, base_on = None): + LoopImageCreator._mount_instroot(self, base_on) + self.__write_initrd_conf(self._instroot + "/etc/sysconfig/mkinitrd") + + def _unmount_instroot(self): + try: + os.unlink(self._instroot + "/etc/sysconfig/mkinitrd") + except: + pass + LoopImageCreator._unmount_instroot(self) + + def __ensure_isodir(self): + if self.__isodir is None: + self.__isodir = self._mkdtemp("iso-") + return self.__isodir + + def _get_isodir(self): + return self.__ensure_isodir() + + def _set_isodir(self, isodir = None): + self.__isodir = isodir + + def _create_bootconfig(self): + """Configure the image so that it's bootable.""" + self._configure_bootloader(self.__ensure_isodir()) + + def _get_post_scripts_env(self, in_chroot): + env = LoopImageCreator._get_post_scripts_env(self, in_chroot) + + if not in_chroot: + env["LIVE_ROOT"] = self.__ensure_isodir() + + return env + + def __write_initrd_conf(self, path): + content = "" + if not os.path.exists(os.path.dirname(path)): + makedirs(os.path.dirname(path)) + f = open(path, "w") + + content += 'LIVEOS="yes"\n' + content += 'PROBE="no"\n' + content += 'MODULES+="squashfs ext3 ext2 vfat msdos "\n' + content += 'MODULES+="sr_mod sd_mod ide-cd cdrom "\n' + + for module in self.__modules: + if module == "=usb": + content += 'MODULES+="ehci_hcd uhci_hcd ohci_hcd "\n' + content += 'MODULES+="usb_storage usbhid "\n' + elif module == "=firewire": + content += 'MODULES+="firewire-sbp2 firewire-ohci "\n' + content += 'MODULES+="sbp2 ohci1394 ieee1394 "\n' + elif module == "=mmc": + content += 'MODULES+="mmc_block sdhci sdhci-pci "\n' + elif module == "=pcmcia": + content += 'MODULES+="pata_pcmcia "\n' + else: + content += 'MODULES+="' + module + ' "\n' + f.write(content) + f.close() + + def __create_iso(self, isodir): + iso = self._outdir + "/" + self.name + ".iso" + genisoimage = find_binary_path("genisoimage") + args = [genisoimage, + "-J", "-r", + "-hide-rr-moved", "-hide-joliet-trans-tbl", + "-V", self.fslabel, + "-o", iso] + + args.extend(self._get_mkisofs_options(isodir)) + + args.append(isodir) + + if subprocess.call(args) != 0: + raise CreatorError("ISO creation failed!") + + """ It should be ok still even if you haven't isohybrid """ + isohybrid = None + try: + isohybrid = find_binary_path("isohybrid") + except: + pass + + if isohybrid: + args = [isohybrid, "-partok", iso ] + if subprocess.call(args) != 0: + raise CreatorError("Hybrid ISO creation failed!") + + self.__implant_md5sum(iso) + + def __implant_md5sum(self, iso): + """Implant an isomd5sum.""" + if os.path.exists("/usr/bin/implantisomd5"): + implantisomd5 = "/usr/bin/implantisomd5" + elif os.path.exists("/usr/lib/anaconda-runtime/implantisomd5"): + implantisomd5 = "/usr/lib/anaconda-runtime/implantisomd5" + else: + logging.warn("isomd5sum not installed; not setting up mediacheck") + implantisomd5 = "" + return + + subprocess.call([implantisomd5, iso], stdout=sys.stdout, stderr=sys.stderr) + + def _stage_final_image(self): + try: + makedirs(self.__ensure_isodir() + "/LiveOS") + + minimal_size = self._resparse() + + if not self.skip_minimize: + create_image_minimizer(self.__isodir + "/LiveOS/osmin.img", + self._image, minimal_size) + + if self.skip_compression: + shutil.move(self._image, self.__isodir + "/LiveOS/ext3fs.img") + else: + makedirs(os.path.join(os.path.dirname(self._image), "LiveOS")) + shutil.move(self._image, + os.path.join(os.path.dirname(self._image), + "LiveOS", "ext3fs.img")) + mksquashfs(os.path.dirname(self._image), + self.__isodir + "/LiveOS/squashfs.img") + + self.__create_iso(self.__isodir) + finally: + shutil.rmtree(self.__isodir, ignore_errors = True) + self.__isodir = None + diff --git a/micng/imager/__init__.py b/micng/imager/__init__.py new file mode 100644 index 0000000..9f9e0e3 --- /dev/null +++ b/micng/imager/__init__.py @@ -0,0 +1,2 @@ +import BaseImageCreator +import livecd diff --git a/micng/imager/fs.py b/micng/imager/fs.py new file mode 100644 index 0000000..b46c594 --- /dev/null +++ b/micng/imager/fs.py @@ -0,0 +1,76 @@ +# +# creator.py : ImageCreator and LoopImageCreator base classes +# +# Copyright 2007, Red Hat Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +import os +import os.path +import stat +import sys +import tempfile +import shutil +import logging +import subprocess +import re +import tarfile +import glob + +import rpm + +from micng.utils.errors import * +from micng.utils.fs_related import * +from micng.utils import kickstart +from micng.utils import pkgmanagers +from micng.utils.rpmmisc import * +from micng.utils.misc import * +from BaseImageCreator import ImageCreator + + +class FsImageCreator(ImageCreator): + def __init__(self, ks, name): + """Initialize a LoopImageCreator instance. + + This method takes the same arguments as ImageCreator.__init__() + """ + ImageCreator.__init__(self, ks, name) + + self._fstype = None + self._fsopts = None + + def _stage_final_image(self): + """ nothing to do """ + pass + + def package(self, destdir = "."): + self._stage_final_image() + + destdir = os.path.abspath(os.path.expanduser(destdir)) + if self._recording_pkgs: + self._save_recording_pkgs(destdir) + + print "Copying %s to %s, please be patient to wait (it is slow if they are on different file systems/partitons/disks)" \ + % (self._instroot, destdir + "/" + self.name) + + copycmd = find_binary_path("cp") + args = [ copycmd, "-af", self._instroot, destdir + "/" + self.name ] + subprocess.call(args) + + ignores = ["/dev/fd", "/dev/stdin", "/dev/stdout", "/dev/stderr", "/etc/mtab"] + for exclude in ignores: + if os.path.exists(destdir + "/" + self.name + exclude): + os.unlink(destdir + "/" + self.name + exclude) + + self.outimage.append(destdir + "/" + self.name) diff --git a/micng/imager/livecd.py b/micng/imager/livecd.py new file mode 100644 index 0000000..7fb754d --- /dev/null +++ b/micng/imager/livecd.py @@ -0,0 +1,407 @@ +# +#live.py : LiveImageCreator class for creating Live CD images +# +# Copyright 2007, Red Hat Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +import os +import os.path +import glob +import shutil +import subprocess +import logging +import re +import time + +from micng.utils.errors import * +from micng.utils.fs_related import * +from micng.utils.rpmmisc import * +from BaseImageCreator import LiveImageCreatorBase + +class LivecdImageCreator(LiveImageCreatorBase): + """ImageCreator for x86 machines""" + + def _get_mkisofs_options(self, isodir): + return [ "-b", "isolinux/isolinux.bin", + "-c", "isolinux/boot.cat", + "-no-emul-boot", "-boot-info-table", + "-boot-load-size", "4" ] + + def _get_required_packages(self): + return ["syslinux", "syslinux-extlinux", "moblin-live"] + LiveImageCreatorBase._get_required_packages(self) + + def _get_isolinux_stanzas(self, isodir): + return "" + + def __find_syslinux_menu(self): + for menu in ["vesamenu.c32", "menu.c32"]: + if os.path.isfile(self._instroot + "/usr/share/syslinux/" + menu): + return menu + + raise CreatorError("syslinux not installed : " + "no suitable /usr/share/syslinux/*menu.c32 found") + + def __find_syslinux_mboot(self): + # + # We only need the mboot module if we have any xen hypervisors + # + if not glob.glob(self._instroot + "/boot/xen.gz*"): + return None + + return "mboot.c32" + + def __copy_syslinux_files(self, isodir, menu, mboot = None): + files = ["isolinux.bin", menu] + if mboot: + files += [mboot] + + for f in files: + path = self._instroot + "/usr/share/syslinux/" + f + + if not os.path.isfile(path): + raise CreatorError("syslinux not installed : " + "%s not found" % path) + + shutil.copy(path, isodir + "/isolinux/") + + def __copy_syslinux_background(self, isodest): + background_path = self._instroot + \ + "/usr/lib/anaconda-runtime/syslinux-vesa-splash.jpg" + + if not os.path.exists(background_path): + return False + + shutil.copyfile(background_path, isodest) + + return True + + def __copy_kernel_and_initramfs(self, isodir, version, index): + bootdir = self._instroot + "/boot" + + if self._alt_initrd_name: + src_initrd_path = os.path.join(bootdir, self._alt_initrd_name) + else: + src_initrd_path = os.path.join(bootdir, "initrd-" + version + ".img") + + try: + shutil.copyfile(bootdir + "/vmlinuz-" + version, + isodir + "/isolinux/vmlinuz" + index) + shutil.copyfile(src_initrd_path, + isodir + "/isolinux/initrd" + index + ".img") + except: + raise CreatorError("Unable to copy valid kernels or initrds, please check the repo") + + is_xen = False + if os.path.exists(bootdir + "/xen.gz-" + version[:-3]): + shutil.copyfile(bootdir + "/xen.gz-" + version[:-3], + isodir + "/isolinux/xen" + index + ".gz") + is_xen = True + + return is_xen + + def __is_default_kernel(self, kernel, kernels): + if len(kernels) == 1: + return True + + if kernel == self._default_kernel: + return True + + if kernel.startswith("kernel-") and kernel[7:] == self._default_kernel: + return True + + return False + + def __get_basic_syslinux_config(self, **args): + return """ +default %(menu)s +timeout %(timeout)d + +%(background)s +menu title Welcome to %(distroname)s! +menu color border 0 #ffffffff #00000000 +menu color sel 7 #ffffffff #ff000000 +menu color title 0 #ffffffff #00000000 +menu color tabmsg 0 #ffffffff #00000000 +menu color unsel 0 #ffffffff #00000000 +menu color hotsel 0 #ff000000 #ffffffff +menu color hotkey 7 #ffffffff #ff000000 +menu color timeout_msg 0 #ffffffff #00000000 +menu color timeout 0 #ffffffff #00000000 +menu color cmdline 0 #ffffffff #00000000 +""" % args + + def __get_image_stanza(self, is_xen, **args): + if not is_xen: + template = """label %(short)s + menu label %(long)s + kernel vmlinuz%(index)s + append initrd=initrd%(index)s.img root=CDLABEL=%(fslabel)s rootfstype=iso9660 %(liveargs)s %(extra)s +""" + else: + template = """label %(short)s + menu label %(long)s + kernel mboot.c32 + append xen%(index)s.gz --- vmlinuz%(index)s root=CDLABEL=%(fslabel)s rootfstype=iso9660 %(liveargs)s %(extra)s --- initrd%(index)s.img +""" + return template % args + + def __get_image_stanzas(self, isodir): + versions = [] + kernels = self._get_kernel_versions() + for kernel in kernels: + for version in kernels[kernel]: + versions.append(version) + + if not versions: + raise CreatorError("Unable to find valid kernels, please check the repo") + + kernel_options = self._get_kernel_options() + menu_options = self._get_menu_options() + + + cfg = "" + + default_version = None + default_index = None + index = "0" + for version in versions: + is_xen = self.__copy_kernel_and_initramfs(isodir, version, index) + + default = self.__is_default_kernel(kernel, kernels) + liveinst = False + autoliveinst = False + netinst = False + checkisomd5 = False + basicinst = False + + if menu_options.find("bootinstall") >= 0: + liveinst = True + + if menu_options.find("autoinst") >= 0: + autoliveinst = True + + if menu_options.find("verify") >= 0 and self._has_checkisomd5(): + checkisomd5 = True + + if menu_options.find("netinst") >= 0: + netinst = True + + if default: + long = "Boot %s" % self.distro_name + elif kernel.startswith("kernel-"): + long = "Boot %s(%s)" % (self.name, kernel[7:]) + else: + long = "Boot %s(%s)" % (self.name, kernel) + + cfg += self.__get_image_stanza(is_xen, + fslabel = self.fslabel, + liveargs = kernel_options, + long = long, + short = "linux" + index, + extra = "", + index = index) + + if default: + cfg += "menu default\n" + default_version = version + default_index = index + if basicinst: + cfg += self.__get_image_stanza(is_xen, + fslabel = self.fslabel, + liveargs = kernel_options, + long = "Installation Only (Text based)", + short = "basic" + index, + extra = "basic nosplash 4", + index = index) + + if liveinst: + cfg += self.__get_image_stanza(is_xen, + fslabel = self.fslabel, + liveargs = kernel_options, + long = "Installation Only", + short = "liveinst" + index, + extra = "liveinst nosplash 4", + index = index) + if autoliveinst: + cfg += self.__get_image_stanza(is_xen, + fslabel = self.fslabel, + liveargs = kernel_options, + long = "Autoinstall (Deletes all existing content)", + short = "autoinst" + index, + extra = "autoinst nosplash 4", + index = index) + + if checkisomd5: + cfg += self.__get_image_stanza(is_xen, + fslabel = self.fslabel, + liveargs = kernel_options, + long = "Verify and " + long, + short = "check" + index, + extra = "check", + index = index) + + index = str(int(index) + 1) + + if not default_version: + default_version = versions[0] + if not default_index: + default_index = "0" + + + if netinst: + cfg += self.__get_image_stanza(is_xen, + fslabel = self.fslabel, + liveargs = kernel_options, + long = "Network Installation", + short = "netinst", + extra = "netinst 4", + index = default_index) + + return cfg + + def __get_memtest_stanza(self, isodir): + memtest = glob.glob(self._instroot + "/boot/memtest86*") + if not memtest: + return "" + + shutil.copyfile(memtest[0], isodir + "/isolinux/memtest") + + return """label memtest + menu label Memory Test + kernel memtest +""" + + def __get_local_stanza(self, isodir): + return """label local + menu label Boot from local drive + localboot 0xffff +""" + + def _configure_syslinux_bootloader(self, isodir): + """configure the boot loader""" + makedirs(isodir + "/isolinux") + + menu = self.__find_syslinux_menu() + + self.__copy_syslinux_files(isodir, menu, + self.__find_syslinux_mboot()) + + background = "" + if self.__copy_syslinux_background(isodir + "/isolinux/splash.jpg"): + background = "menu background splash.jpg" + + cfg = self.__get_basic_syslinux_config(menu = menu, + background = background, + name = self.name, + timeout = self._timeout * 10, + distroname = self.distro_name) + + cfg += self.__get_image_stanzas(isodir) + cfg += self.__get_memtest_stanza(isodir) + cfg += self.__get_local_stanza(isodir) + cfg += self._get_isolinux_stanzas(isodir) + + cfgf = open(isodir + "/isolinux/isolinux.cfg", "w") + cfgf.write(cfg) + cfgf.close() + + def __copy_efi_files(self, isodir): + if not os.path.exists(self._instroot + "/boot/efi/EFI/redhat/grub.efi"): + return False + shutil.copy(self._instroot + "/boot/efi/EFI/redhat/grub.efi", + isodir + "/EFI/boot/grub.efi") + shutil.copy(self._instroot + "/boot/grub/splash.xpm.gz", + isodir + "/EFI/boot/splash.xpm.gz") + + return True + + def __get_basic_efi_config(self, **args): + return """ +default=0 +splashimage=/EFI/boot/splash.xpm.gz +timeout %(timeout)d +hiddenmenu + +""" %args + + def __get_efi_image_stanza(self, **args): + return """title %(long)s + kernel /EFI/boot/vmlinuz%(index)s root=CDLABEL=%(fslabel)s rootfstype=iso9660 %(liveargs)s %(extra)s + initrd /EFI/boot/initrd%(index)s.img +""" %args + + def __get_efi_image_stanzas(self, isodir, name): + # FIXME: this only supports one kernel right now... + + kernel_options = self._get_kernel_options() + checkisomd5 = self._has_checkisomd5() + + cfg = "" + + for index in range(0, 9): + # we don't support xen kernels + if os.path.exists("%s/EFI/boot/xen%d.gz" %(isodir, index)): + continue + cfg += self.__get_efi_image_stanza(fslabel = self.fslabel, + liveargs = kernel_options, + long = name, + extra = "", index = index) + if checkisomd5: + cfg += self.__get_efi_image_stanza(fslabel = self.fslabel, + liveargs = kernel_options, + long = "Verify and Boot " + name, + extra = "check", + index = index) + break + + return cfg + + def _configure_efi_bootloader(self, isodir): + """Set up the configuration for an EFI bootloader""" + makedirs(isodir + "/EFI/boot") + + if not self.__copy_efi_files(isodir): + shutil.rmtree(isodir + "/EFI") + return + + for f in os.listdir(isodir + "/isolinux"): + os.link("%s/isolinux/%s" %(isodir, f), + "%s/EFI/boot/%s" %(isodir, f)) + + + cfg = self.__get_basic_efi_config(name = self.name, + timeout = self._timeout) + cfg += self.__get_efi_image_stanzas(isodir, self.name) + + cfgf = open(isodir + "/EFI/boot/grub.conf", "w") + cfgf.write(cfg) + cfgf.close() + + # first gen mactel machines get the bootloader name wrong apparently + if getBaseArch() == "i386": + os.link(isodir + "/EFI/boot/grub.efi", isodir + "/EFI/boot/boot.efi") + os.link(isodir + "/EFI/boot/grub.conf", isodir + "/EFI/boot/boot.conf") + + # for most things, we want them named boot$efiarch + efiarch = {"i386": "ia32", "x86_64": "x64"} + efiname = efiarch[getBaseArch()] + os.rename(isodir + "/EFI/boot/grub.efi", isodir + "/EFI/boot/boot%s.efi" %(efiname,)) + os.link(isodir + "/EFI/boot/grub.conf", isodir + "/EFI/boot/boot%s.conf" %(efiname,)) + + + def _configure_bootloader(self, isodir): + self._configure_syslinux_bootloader(isodir) + self._configure_efi_bootloader(isodir) + diff --git a/micng/pluginbase/__init__.py b/micng/pluginbase/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/micng/pluginbase/__init__.py diff --git a/micng/pluginbase/backend_plugin.py b/micng/pluginbase/backend_plugin.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/micng/pluginbase/backend_plugin.py diff --git a/micng/pluginbase/base_plugin.py b/micng/pluginbase/base_plugin.py new file mode 100644 index 0000000..36bd3b3 --- /dev/null +++ b/micng/pluginbase/base_plugin.py @@ -0,0 +1,5 @@ +#!/usr/bin/python +class PluginBase(object): + plugin_type = None + def __init__(self): + pass diff --git a/micng/pluginbase/hook_plugin.py b/micng/pluginbase/hook_plugin.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/micng/pluginbase/hook_plugin.py diff --git a/micng/pluginbase/imager_plugin.py b/micng/pluginbase/imager_plugin.py new file mode 100644 index 0000000..f587d71 --- /dev/null +++ b/micng/pluginbase/imager_plugin.py @@ -0,0 +1,90 @@ +#!/usr/bin/python +from micng.pluginbase.base_plugin import PluginBase +import micng.configmgr as configmgr + +class ImagerPlugin(PluginBase): + plugin_type = "imager" + def __init__(self, configinfo=None): + if not configinfo: + self.configinfo = configmgr.getConfigInfo() + return + self.configinfo = configinfo + + def do_mount_instroot(self): + """Mount or prepare the install root directory. + + This is the interface where plugin may prepare the install root by e.g. + mounting creating and loopback mounting a filesystem image to + _instroot. + """ + pass + + def do_umount_instroot(self): + """Undo anything performed in do_mount_instroot(). + + This is the interface where plugin must undo anything which was done + in do_mount_instroot(). For example, if a filesystem image was mounted + onto _instroot, it should be unmounted here. + """ + pass + + def do_mount(self): + """Setup the target filesystem in preparation for an install. + + This interface should setup the filesystem which other functions will + install into and configure. + """ + pass + + def do_umount(self): + """Unmounts the target filesystem. + + It should detache the system from the install root. + """ + pass + + def do_cleanup(self): + """Unmounts the target filesystem and deletes temporary files. + + This interface deletes any temporary files and directories that were created + on the host system while building the image. + """ + pass + + def do_install(self): + """Install packages into the install root. + + This interface installs the packages listed in the supplied kickstart + into the install root. By default, the packages are installed from the + repository URLs specified in the kickstart. + """ + pass + + def do_configure(self): + """Configure the system image according to the kickstart. + + This interface applies the (e.g. keyboard or network) configuration + specified in the kickstart and executes the kickstart %post scripts. + + If neccessary, it also prepares the image to be bootable by e.g. + creating an initrd and bootloader configuration. + """ + pass + + def do_package(self, destdir): + """Prepares the created image for final delivery. + + This interface merely copies the install root to the supplied destination + directory, + """ + pass + + def do_create(self, args): + """ Temporary solution to create image in one single interface """ + pass + + def pack(self): + pass + + def unpack(self): + pass diff --git a/micng/pluginmgr.py b/micng/pluginmgr.py new file mode 100644 index 0000000..cecc0ed --- /dev/null +++ b/micng/pluginmgr.py @@ -0,0 +1,33 @@ +#!/usr/bin/python +import os +import sys +import micng.pluginbase.base_plugin as bp + +class PluginMgr(object): + def __init__(self, dirlist = []): + self.plugin_place = ["/usr/lib/micng/plugins"] + dirlist + self.plugins = {} + + def loadPlugins(self): + for pdir in map(os.path.abspath, self.plugin_place): + for pitem in os.walk(pdir): + sys.path.append(pitem[0]) + for pf in pitem[2]: + if not pf.endswith(".py"): + continue + + pmod = __import__(os.path.splitext(pf)[0]) + if hasattr(pmod, "mic_plugin"): + pname, pcls = pmod.mic_plugin + ptmp = (pname, pcls) + if hasattr(pcls, "plugin_type"): + if pcls.plugin_type not in self.plugins.keys(): + self.plugins[pcls.plugin_type] = [ptmp] + else: + self.plugins[pcls.plugin_type].append(ptmp) + + def getPluginByCateg(self, categ = None): + if categ is None: + return self.plugins + else: + return self.plugins[categ] diff --git a/micng/utils/__init__.py b/micng/utils/__init__.py new file mode 100644 index 0000000..49a7889 --- /dev/null +++ b/micng/utils/__init__.py @@ -0,0 +1,6 @@ +import misc +import cmdln +import kickstart +import errors +import fs_related +import argparse diff --git a/micng/utils/argparse.py b/micng/utils/argparse.py new file mode 100644 index 0000000..a69d294 --- /dev/null +++ b/micng/utils/argparse.py @@ -0,0 +1,2271 @@ +# -*- coding: utf-8 -*-
+
+# Copyright © 2006-2009 Steven J. Bethard <steven.bethard@gmail.com>.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy
+# of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Command-line parsing library
+
+This module is an optparse-inspired command-line parsing library that:
+
+ - handles both optional and positional arguments
+ - produces highly informative usage messages
+ - supports parsers that dispatch to sub-parsers
+
+The following is a simple usage example that sums integers from the
+command-line and writes the result to a file::
+
+ parser = argparse.ArgumentParser(
+ description='sum the integers at the command line')
+ parser.add_argument(
+ 'integers', metavar='int', nargs='+', type=int,
+ help='an integer to be summed')
+ parser.add_argument(
+ '--log', default=sys.stdout, type=argparse.FileType('w'),
+ help='the file where the sum should be written')
+ args = parser.parse_args()
+ args.log.write('%s' % sum(args.integers))
+ args.log.close()
+
+The module contains the following public classes:
+
+ - ArgumentParser -- The main entry point for command-line parsing. As the
+ example above shows, the add_argument() method is used to populate
+ the parser with actions for optional and positional arguments. Then
+ the parse_args() method is invoked to convert the args at the
+ command-line into an object with attributes.
+
+ - ArgumentError -- The exception raised by ArgumentParser objects when
+ there are errors with the parser's actions. Errors raised while
+ parsing the command-line are caught by ArgumentParser and emitted
+ as command-line messages.
+
+ - FileType -- A factory for defining types of files to be created. As the
+ example above shows, instances of FileType are typically passed as
+ the type= argument of add_argument() calls.
+
+ - Action -- The base class for parser actions. Typically actions are
+ selected by passing strings like 'store_true' or 'append_const' to
+ the action= argument of add_argument(). However, for greater
+ customization of ArgumentParser actions, subclasses of Action may
+ be defined and passed as the action= argument.
+
+ - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
+ ArgumentDefaultsHelpFormatter -- Formatter classes which
+ may be passed as the formatter_class= argument to the
+ ArgumentParser constructor. HelpFormatter is the default,
+ RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser
+ not to change the formatting for help text, and
+ ArgumentDefaultsHelpFormatter adds information about argument defaults
+ to the help.
+
+All other classes in this module are considered implementation details.
+(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
+considered public as object names -- the API of the formatter objects is
+still considered an implementation detail.)
+"""
+
+__version__ = '1.0.1'
+__all__ = [
+ 'ArgumentParser',
+ 'ArgumentError',
+ 'Namespace',
+ 'Action',
+ 'FileType',
+ 'HelpFormatter',
+ 'RawDescriptionHelpFormatter',
+ 'RawTextHelpFormatter'
+ 'ArgumentDefaultsHelpFormatter',
+]
+
+
+import copy as _copy
+import os as _os
+import re as _re
+import sys as _sys
+import textwrap as _textwrap
+
+from gettext import gettext as _
+
+try:
+ _set = set
+except NameError:
+ from sets import Set as _set
+
+try:
+ _basestring = basestring
+except NameError:
+ _basestring = str
+
+try:
+ _sorted = sorted
+except NameError:
+
+ def _sorted(iterable, reverse=False):
+ result = list(iterable)
+ result.sort()
+ if reverse:
+ result.reverse()
+ return result
+
+# silence Python 2.6 buggy warnings about Exception.message
+if _sys.version_info[:2] == (2, 6):
+ import warnings
+ warnings.filterwarnings(
+ action='ignore',
+ message='BaseException.message has been deprecated as of Python 2.6',
+ category=DeprecationWarning,
+ module='argparse')
+
+
+SUPPRESS = '==SUPPRESS=='
+
+OPTIONAL = '?'
+ZERO_OR_MORE = '*'
+ONE_OR_MORE = '+'
+PARSER = '==PARSER=='
+
+# =============================
+# Utility functions and classes
+# =============================
+
+class _AttributeHolder(object):
+ """Abstract base class that provides __repr__.
+
+ The __repr__ method returns a string in the format::
+ ClassName(attr=name, attr=name, ...)
+ The attributes are determined either by a class-level attribute,
+ '_kwarg_names', or by inspecting the instance __dict__.
+ """
+
+ def __repr__(self):
+ type_name = type(self).__name__
+ arg_strings = []
+ for arg in self._get_args():
+ arg_strings.append(repr(arg))
+ for name, value in self._get_kwargs():
+ arg_strings.append('%s=%r' % (name, value))
+ return '%s(%s)' % (type_name, ', '.join(arg_strings))
+
+ def _get_kwargs(self):
+ return _sorted(self.__dict__.items())
+
+ def _get_args(self):
+ return []
+
+
+def _ensure_value(namespace, name, value):
+ if getattr(namespace, name, None) is None:
+ setattr(namespace, name, value)
+ return getattr(namespace, name)
+
+
+# ===============
+# Formatting Help
+# ===============
+
+class HelpFormatter(object):
+ """Formatter for generating usage messages and argument help strings.
+
+ Only the name of this class is considered a public API. All the methods
+ provided by the class are considered an implementation detail.
+ """
+
+ def __init__(self,
+ prog,
+ indent_increment=2,
+ max_help_position=24,
+ width=None):
+
+ # default setting for width
+ if width is None:
+ try:
+ width = int(_os.environ['COLUMNS'])
+ except (KeyError, ValueError):
+ width = 80
+ width -= 2
+
+ self._prog = prog
+ self._indent_increment = indent_increment
+ self._max_help_position = max_help_position
+ self._width = width
+
+ self._current_indent = 0
+ self._level = 0
+ self._action_max_length = 0
+
+ self._root_section = self._Section(self, None)
+ self._current_section = self._root_section
+
+ self._whitespace_matcher = _re.compile(r'\s+')
+ self._long_break_matcher = _re.compile(r'\n\n\n+')
+
+ # ===============================
+ # Section and indentation methods
+ # ===============================
+ def _indent(self):
+ self._current_indent += self._indent_increment
+ self._level += 1
+
+ def _dedent(self):
+ self._current_indent -= self._indent_increment
+ assert self._current_indent >= 0, 'Indent decreased below 0.'
+ self._level -= 1
+
+ class _Section(object):
+
+ def __init__(self, formatter, parent, heading=None):
+ self.formatter = formatter
+ self.parent = parent
+ self.heading = heading
+ self.items = []
+
+ def format_help(self):
+ # format the indented section
+ if self.parent is not None:
+ self.formatter._indent()
+ join = self.formatter._join_parts
+ for func, args in self.items:
+ func(*args)
+ item_help = join([func(*args) for func, args in self.items])
+ if self.parent is not None:
+ self.formatter._dedent()
+
+ # return nothing if the section was empty
+ if not item_help:
+ return ''
+
+ # add the heading if the section was non-empty
+ if self.heading is not SUPPRESS and self.heading is not None:
+ current_indent = self.formatter._current_indent
+ heading = '%*s%s:\n' % (current_indent, '', self.heading)
+ else:
+ heading = ''
+
+ # join the section-initial newline, the heading and the help
+ return join(['\n', heading, item_help, '\n'])
+
+ def _add_item(self, func, args):
+ self._current_section.items.append((func, args))
+
+ # ========================
+ # Message building methods
+ # ========================
+ def start_section(self, heading):
+ self._indent()
+ section = self._Section(self, self._current_section, heading)
+ self._add_item(section.format_help, [])
+ self._current_section = section
+
+ def end_section(self):
+ self._current_section = self._current_section.parent
+ self._dedent()
+
+ def add_text(self, text):
+ if text is not SUPPRESS and text is not None:
+ self._add_item(self._format_text, [text])
+
+ def add_usage(self, usage, actions, groups, prefix=None):
+ if usage is not SUPPRESS:
+ args = usage, actions, groups, prefix
+ self._add_item(self._format_usage, args)
+
+ def add_argument(self, action):
+ if action.help is not SUPPRESS:
+
+ # find all invocations
+ get_invocation = self._format_action_invocation
+ invocations = [get_invocation(action)]
+ for subaction in self._iter_indented_subactions(action):
+ invocations.append(get_invocation(subaction))
+
+ # update the maximum item length
+ invocation_length = max([len(s) for s in invocations])
+ action_length = invocation_length + self._current_indent
+ self._action_max_length = max(self._action_max_length,
+ action_length)
+
+ # add the item to the list
+ self._add_item(self._format_action, [action])
+
+ def add_arguments(self, actions):
+ for action in actions:
+ self.add_argument(action)
+
+ # =======================
+ # Help-formatting methods
+ # =======================
+ def format_help(self):
+ help = self._root_section.format_help()
+ if help:
+ help = self._long_break_matcher.sub('\n\n', help)
+ help = help.strip('\n') + '\n'
+ return help
+
+ def _join_parts(self, part_strings):
+ return ''.join([part
+ for part in part_strings
+ if part and part is not SUPPRESS])
+
+ def _format_usage(self, usage, actions, groups, prefix):
+ if prefix is None:
+ prefix = _('usage: ')
+
+ # if usage is specified, use that
+ if usage is not None:
+ usage = usage % dict(prog=self._prog)
+
+ # if no optionals or positionals are available, usage is just prog
+ elif usage is None and not actions:
+ usage = '%(prog)s' % dict(prog=self._prog)
+
+ # if optionals and positionals are available, calculate usage
+ elif usage is None:
+ prog = '%(prog)s' % dict(prog=self._prog)
+
+ # split optionals from positionals
+ optionals = []
+ positionals = []
+ for action in actions:
+ if action.option_strings:
+ optionals.append(action)
+ else:
+ positionals.append(action)
+
+ # build full usage string
+ format = self._format_actions_usage
+ action_usage = format(optionals + positionals, groups)
+ usage = ' '.join([s for s in [prog, action_usage] if s])
+
+ # wrap the usage parts if it's too long
+ text_width = self._width - self._current_indent
+ if len(prefix) + len(usage) > text_width:
+
+ # break usage into wrappable parts
+ part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'
+ opt_usage = format(optionals, groups)
+ pos_usage = format(positionals, groups)
+ opt_parts = _re.findall(part_regexp, opt_usage)
+ pos_parts = _re.findall(part_regexp, pos_usage)
+ assert ' '.join(opt_parts) == opt_usage
+ assert ' '.join(pos_parts) == pos_usage
+
+ # helper for wrapping lines
+ def get_lines(parts, indent, prefix=None):
+ lines = []
+ line = []
+ if prefix is not None:
+ line_len = len(prefix) - 1
+ else:
+ line_len = len(indent) - 1
+ for part in parts:
+ if line_len + 1 + len(part) > text_width:
+ lines.append(indent + ' '.join(line))
+ line = []
+ line_len = len(indent) - 1
+ line.append(part)
+ line_len += len(part) + 1
+ if line:
+ lines.append(indent + ' '.join(line))
+ if prefix is not None:
+ lines[0] = lines[0][len(indent):]
+ return lines
+
+ # if prog is short, follow it with optionals or positionals
+ if len(prefix) + len(prog) <= 0.75 * text_width:
+ indent = ' ' * (len(prefix) + len(prog) + 1)
+ if opt_parts:
+ lines = get_lines([prog] + opt_parts, indent, prefix)
+ lines.extend(get_lines(pos_parts, indent))
+ elif pos_parts:
+ lines = get_lines([prog] + pos_parts, indent, prefix)
+ else:
+ lines = [prog]
+
+ # if prog is long, put it on its own line
+ else:
+ indent = ' ' * len(prefix)
+ parts = opt_parts + pos_parts
+ lines = get_lines(parts, indent)
+ if len(lines) > 1:
+ lines = []
+ lines.extend(get_lines(opt_parts, indent))
+ lines.extend(get_lines(pos_parts, indent))
+ lines = [prog] + lines
+
+ # join lines into usage
+ usage = '\n'.join(lines)
+
+ # prefix with 'usage:'
+ return '%s%s\n\n' % (prefix, usage)
+
+ def _format_actions_usage(self, actions, groups):
+ # find group indices and identify actions in groups
+ group_actions = _set()
+ inserts = {}
+ for group in groups:
+ try:
+ start = actions.index(group._group_actions[0])
+ except ValueError:
+ continue
+ else:
+ end = start + len(group._group_actions)
+ if actions[start:end] == group._group_actions:
+ for action in group._group_actions:
+ group_actions.add(action)
+ if not group.required:
+ inserts[start] = '['
+ inserts[end] = ']'
+ else:
+ inserts[start] = '('
+ inserts[end] = ')'
+ for i in range(start + 1, end):
+ inserts[i] = '|'
+
+ # collect all actions format strings
+ parts = []
+ for i, action in enumerate(actions):
+
+ # suppressed arguments are marked with None
+ # remove | separators for suppressed arguments
+ if action.help is SUPPRESS:
+ parts.append(None)
+ if inserts.get(i) == '|':
+ inserts.pop(i)
+ elif inserts.get(i + 1) == '|':
+ inserts.pop(i + 1)
+
+ # produce all arg strings
+ elif not action.option_strings:
+ part = self._format_args(action, action.dest)
+
+ # if it's in a group, strip the outer []
+ if action in group_actions:
+ if part[0] == '[' and part[-1] == ']':
+ part = part[1:-1]
+
+ # add the action string to the list
+ parts.append(part)
+
+ # produce the first way to invoke the option in brackets
+ else:
+ option_string = action.option_strings[0]
+
+ # if the Optional doesn't take a value, format is:
+ # -s or --long
+ if action.nargs == 0:
+ part = '%s' % option_string
+
+ # if the Optional takes a value, format is:
+ # -s ARGS or --long ARGS
+ else:
+ default = action.dest.upper()
+ args_string = self._format_args(action, default)
+ part = '%s %s' % (option_string, args_string)
+
+ # make it look optional if it's not required or in a group
+ if not action.required and action not in group_actions:
+ part = '[%s]' % part
+
+ # add the action string to the list
+ parts.append(part)
+
+ # insert things at the necessary indices
+ for i in _sorted(inserts, reverse=True):
+ parts[i:i] = [inserts[i]]
+
+ # join all the action items with spaces
+ text = ' '.join([item for item in parts if item is not None])
+
+ # clean up separators for mutually exclusive groups
+ open = r'[\[(]'
+ close = r'[\])]'
+ text = _re.sub(r'(%s) ' % open, r'\1', text)
+ text = _re.sub(r' (%s)' % close, r'\1', text)
+ text = _re.sub(r'%s *%s' % (open, close), r'', text)
+ text = _re.sub(r'\(([^|]*)\)', r'\1', text)
+ text = text.strip()
+
+ # return the text
+ return text
+
+ def _format_text(self, text):
+ text_width = self._width - self._current_indent
+ indent = ' ' * self._current_indent
+ return self._fill_text(text, text_width, indent) + '\n\n'
+
+ def _format_action(self, action):
+ # determine the required width and the entry label
+ help_position = min(self._action_max_length + 2,
+ self._max_help_position)
+ help_width = self._width - help_position
+ action_width = help_position - self._current_indent - 2
+ action_header = self._format_action_invocation(action)
+
+ # ho nelp; start on same line and add a final newline
+ if not action.help:
+ tup = self._current_indent, '', action_header
+ action_header = '%*s%s\n' % tup
+
+ # short action name; start on the same line and pad two spaces
+ elif len(action_header) <= action_width:
+ tup = self._current_indent, '', action_width, action_header
+ action_header = '%*s%-*s ' % tup
+ indent_first = 0
+
+ # long action name; start on the next line
+ else:
+ tup = self._current_indent, '', action_header
+ action_header = '%*s%s\n' % tup
+ indent_first = help_position
+
+ # collect the pieces of the action help
+ parts = [action_header]
+
+ # if there was help for the action, add lines of help text
+ if action.help:
+ help_text = self._expand_help(action)
+ help_lines = self._split_lines(help_text, help_width)
+ parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
+ for line in help_lines[1:]:
+ parts.append('%*s%s\n' % (help_position, '', line))
+
+ # or add a newline if the description doesn't end with one
+ elif not action_header.endswith('\n'):
+ parts.append('\n')
+
+ # if there are any sub-actions, add their help as well
+ for subaction in self._iter_indented_subactions(action):
+ parts.append(self._format_action(subaction))
+
+ # return a single string
+ return self._join_parts(parts)
+
+ def _format_action_invocation(self, action):
+ if not action.option_strings:
+ metavar, = self._metavar_formatter(action, action.dest)(1)
+ return metavar
+
+ else:
+ parts = []
+
+ # if the Optional doesn't take a value, format is:
+ # -s, --long
+ if action.nargs == 0:
+ parts.extend(action.option_strings)
+
+ # if the Optional takes a value, format is:
+ # -s ARGS, --long ARGS
+ else:
+ default = action.dest.upper()
+ args_string = self._format_args(action, default)
+ for option_string in action.option_strings:
+ parts.append('%s %s' % (option_string, args_string))
+
+ return ', '.join(parts)
+
+ def _metavar_formatter(self, action, default_metavar):
+ if action.metavar is not None:
+ result = action.metavar
+ elif action.choices is not None:
+ choice_strs = [str(choice) for choice in action.choices]
+ #result = '{%s}' % ','.join(choice_strs)
+ result = ""
+ else:
+ result = default_metavar
+
+ def format(tuple_size):
+ if isinstance(result, tuple):
+ return result
+ else:
+ return (result, ) * tuple_size
+ return format
+
+ def _format_args(self, action, default_metavar):
+ get_metavar = self._metavar_formatter(action, default_metavar)
+ if action.nargs is None:
+ result = '%s' % get_metavar(1)
+ elif action.nargs == OPTIONAL:
+ result = '[%s]' % get_metavar(1)
+ elif action.nargs == ZERO_OR_MORE:
+ result = '[%s [%s ...]]' % get_metavar(2)
+ elif action.nargs == ONE_OR_MORE:
+ result = '%s [%s ...]' % get_metavar(2)
+ elif action.nargs is PARSER:
+ result = '%s ...' % get_metavar(1)
+ else:
+ formats = ['%s' for _ in range(action.nargs)]
+ result = ' '.join(formats) % get_metavar(action.nargs)
+ return result
+
+ def _expand_help(self, action):
+ params = dict(vars(action), prog=self._prog)
+ for name in list(params):
+ if params[name] is SUPPRESS:
+ del params[name]
+ if params.get('choices') is not None:
+ choices_str = ', '.join([str(c) for c in params['choices']])
+ params['choices'] = choices_str
+ return self._get_help_string(action) % params
+
+ def _iter_indented_subactions(self, action):
+ try:
+ get_subactions = action._get_subactions
+ except AttributeError:
+ pass
+ else:
+ self._indent()
+ for subaction in get_subactions():
+ yield subaction
+ self._dedent()
+
+ def _split_lines(self, text, width):
+ text = self._whitespace_matcher.sub(' ', text).strip()
+ return _textwrap.wrap(text, width)
+
+ def _fill_text(self, text, width, indent):
+ text = self._whitespace_matcher.sub(' ', text).strip()
+ return _textwrap.fill(text, width, initial_indent=indent,
+ subsequent_indent=indent)
+
+ def _get_help_string(self, action):
+ return action.help
+
+
+class RawDescriptionHelpFormatter(HelpFormatter):
+ """Help message formatter which retains any formatting in descriptions.
+
+ Only the name of this class is considered a public API. All the methods
+ provided by the class are considered an implementation detail.
+ """
+
+ def _fill_text(self, text, width, indent):
+ return ''.join([indent + line for line in text.splitlines(True)])
+
+
+class RawTextHelpFormatter(RawDescriptionHelpFormatter):
+ """Help message formatter which retains formatting of all help text.
+
+ Only the name of this class is considered a public API. All the methods
+ provided by the class are considered an implementation detail.
+ """
+
+ def _split_lines(self, text, width):
+ return text.splitlines()
+
+
+class ArgumentDefaultsHelpFormatter(HelpFormatter):
+ """Help message formatter which adds default values to argument help.
+
+ Only the name of this class is considered a public API. All the methods
+ provided by the class are considered an implementation detail.
+ """
+
+ def _get_help_string(self, action):
+ help = action.help
+ if '%(default)' not in action.help:
+ if action.default is not SUPPRESS:
+ defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
+ if action.option_strings or action.nargs in defaulting_nargs:
+ help += ' (default: %(default)s)'
+ return help
+
+
+# =====================
+# Options and Arguments
+# =====================
+
+def _get_action_name(argument):
+ if argument is None:
+ return None
+ elif argument.option_strings:
+ return '/'.join(argument.option_strings)
+ elif argument.metavar not in (None, SUPPRESS):
+ return argument.metavar
+ elif argument.dest not in (None, SUPPRESS):
+ return argument.dest
+ else:
+ return None
+
+
+class ArgumentError(Exception):
+ """An error from creating or using an argument (optional or positional).
+
+ The string value of this exception is the message, augmented with
+ information about the argument that caused it.
+ """
+
+ def __init__(self, argument, message):
+ self.argument_name = _get_action_name(argument)
+ self.message = message
+
+ def __str__(self):
+ if self.argument_name is None:
+ format = '%(message)s'
+ else:
+ format = 'argument %(argument_name)s: %(message)s'
+ return format % dict(message=self.message,
+ argument_name=self.argument_name)
+
+# ==============
+# Action classes
+# ==============
+
+class Action(_AttributeHolder):
+ """Information about how to convert command line strings to Python objects.
+
+ Action objects are used by an ArgumentParser to represent the information
+ needed to parse a single argument from one or more strings from the
+ command line. The keyword arguments to the Action constructor are also
+ all attributes of Action instances.
+
+ Keyword Arguments:
+
+ - option_strings -- A list of command-line option strings which
+ should be associated with this action.
+
+ - dest -- The name of the attribute to hold the created object(s)
+
+ - nargs -- The number of command-line arguments that should be
+ consumed. By default, one argument will be consumed and a single
+ value will be produced. Other values include:
+ - N (an integer) consumes N arguments (and produces a list)
+ - '?' consumes zero or one arguments
+ - '*' consumes zero or more arguments (and produces a list)
+ - '+' consumes one or more arguments (and produces a list)
+ Note that the difference between the default and nargs=1 is that
+ with the default, a single value will be produced, while with
+ nargs=1, a list containing a single value will be produced.
+
+ - const -- The value to be produced if the option is specified and the
+ option uses an action that takes no values.
+
+ - default -- The value to be produced if the option is not specified.
+
+ - type -- The type which the command-line arguments should be converted
+ to, should be one of 'string', 'int', 'float', 'complex' or a
+ callable object that accepts a single string argument. If None,
+ 'string' is assumed.
+
+ - choices -- A container of values that should be allowed. If not None,
+ after a command-line argument has been converted to the appropriate
+ type, an exception will be raised if it is not a member of this
+ collection.
+
+ - required -- True if the action must always be specified at the
+ command line. This is only meaningful for optional command-line
+ arguments.
+
+ - help -- The help string describing the argument.
+
+ - metavar -- The name to be used for the option's argument with the
+ help string. If None, the 'dest' value will be used as the name.
+ """
+
+ def __init__(self,
+ option_strings,
+ dest,
+ nargs=None,
+ const=None,
+ default=None,
+ type=None,
+ choices=None,
+ required=False,
+ help=None,
+ metavar=None):
+ self.option_strings = option_strings
+ self.dest = dest
+ self.nargs = nargs
+ self.const = const
+ self.default = default
+ self.type = type
+ self.choices = choices
+ self.required = required
+ self.help = help
+ self.metavar = metavar
+
+ def _get_kwargs(self):
+ names = [
+ 'option_strings',
+ 'dest',
+ 'nargs',
+ 'const',
+ 'default',
+ 'type',
+ 'choices',
+ 'help',
+ 'metavar',
+ ]
+ return [(name, getattr(self, name)) for name in names]
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ raise NotImplementedError(_('.__call__() not defined'))
+
+
+class _StoreAction(Action):
+
+ def __init__(self,
+ option_strings,
+ dest,
+ nargs=None,
+ const=None,
+ default=None,
+ type=None,
+ choices=None,
+ required=False,
+ help=None,
+ metavar=None):
+ if nargs == 0:
+ raise ValueError('nargs for store actions must be > 0; if you '
+ 'have nothing to store, actions such as store '
+ 'true or store const may be more appropriate')
+ if const is not None and nargs != OPTIONAL:
+ raise ValueError('nargs must be %r to supply const' % OPTIONAL)
+ super(_StoreAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ nargs=nargs,
+ const=const,
+ default=default,
+ type=type,
+ choices=choices,
+ required=required,
+ help=help,
+ metavar=metavar)
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ setattr(namespace, self.dest, values)
+
+
+class _StoreConstAction(Action):
+
+ def __init__(self,
+ option_strings,
+ dest,
+ const,
+ default=None,
+ required=False,
+ help=None,
+ metavar=None):
+ super(_StoreConstAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ nargs=0,
+ const=const,
+ default=default,
+ required=required,
+ help=help)
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ setattr(namespace, self.dest, self.const)
+
+
+class _StoreTrueAction(_StoreConstAction):
+
+ def __init__(self,
+ option_strings,
+ dest,
+ default=False,
+ required=False,
+ help=None):
+ super(_StoreTrueAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ const=True,
+ default=default,
+ required=required,
+ help=help)
+
+
+class _StoreFalseAction(_StoreConstAction):
+
+ def __init__(self,
+ option_strings,
+ dest,
+ default=True,
+ required=False,
+ help=None):
+ super(_StoreFalseAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ const=False,
+ default=default,
+ required=required,
+ help=help)
+
+
+class _AppendAction(Action):
+
+ def __init__(self,
+ option_strings,
+ dest,
+ nargs=None,
+ const=None,
+ default=None,
+ type=None,
+ choices=None,
+ required=False,
+ help=None,
+ metavar=None):
+ if nargs == 0:
+ raise ValueError('nargs for append actions must be > 0; if arg '
+ 'strings are not supplying the value to append, '
+ 'the append const action may be more appropriate')
+ if const is not None and nargs != OPTIONAL:
+ raise ValueError('nargs must be %r to supply const' % OPTIONAL)
+ super(_AppendAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ nargs=nargs,
+ const=const,
+ default=default,
+ type=type,
+ choices=choices,
+ required=required,
+ help=help,
+ metavar=metavar)
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ items = _copy.copy(_ensure_value(namespace, self.dest, []))
+ items.append(values)
+ setattr(namespace, self.dest, items)
+
+
+class _AppendConstAction(Action):
+
+ def __init__(self,
+ option_strings,
+ dest,
+ const,
+ default=None,
+ required=False,
+ help=None,
+ metavar=None):
+ super(_AppendConstAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ nargs=0,
+ const=const,
+ default=default,
+ required=required,
+ help=help,
+ metavar=metavar)
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ items = _copy.copy(_ensure_value(namespace, self.dest, []))
+ items.append(self.const)
+ setattr(namespace, self.dest, items)
+
+
+class _CountAction(Action):
+
+ def __init__(self,
+ option_strings,
+ dest,
+ default=None,
+ required=False,
+ help=None):
+ super(_CountAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ nargs=0,
+ default=default,
+ required=required,
+ help=help)
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ new_count = _ensure_value(namespace, self.dest, 0) + 1
+ setattr(namespace, self.dest, new_count)
+
+
+class _HelpAction(Action):
+
+ def __init__(self,
+ option_strings,
+ dest=SUPPRESS,
+ default=SUPPRESS,
+ help=None):
+ super(_HelpAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ default=default,
+ nargs=0,
+ help=help)
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ parser.print_help()
+ parser.exit()
+
+
+class _VersionAction(Action):
+
+ def __init__(self,
+ option_strings,
+ dest=SUPPRESS,
+ default=SUPPRESS,
+ help=None):
+ super(_VersionAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ default=default,
+ nargs=0,
+ help=help)
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ parser.print_version()
+ parser.exit()
+
+
+class _SubParsersAction(Action):
+
+ class _ChoicesPseudoAction(Action):
+
+ def __init__(self, name, help):
+ sup = super(_SubParsersAction._ChoicesPseudoAction, self)
+ sup.__init__(option_strings=[], dest=name, help=help)
+
+ def __init__(self,
+ option_strings,
+ prog,
+ parser_class,
+ dest=SUPPRESS,
+ help=None,
+ metavar=None):
+
+ self._prog_prefix = prog
+ self._parser_class = parser_class
+ self._name_parser_map = {}
+ self._choices_actions = []
+
+ super(_SubParsersAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ nargs=PARSER,
+ choices=self._name_parser_map,
+ help=help,
+ metavar=metavar)
+
+ def add_parser(self, name, **kwargs):
+ # set prog from the existing prefix
+ if kwargs.get('prog') is None:
+ kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
+
+ # create a pseudo-action to hold the choice help
+ if 'help' in kwargs:
+ help = kwargs.pop('help')
+ choice_action = self._ChoicesPseudoAction(name, help)
+ self._choices_actions.append(choice_action)
+
+ # create the parser and add it to the map
+ parser = self._parser_class(**kwargs)
+ self._name_parser_map[name] = parser
+ return parser
+
+ def _get_subactions(self):
+ return self._choices_actions
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ parser_name = values[0]
+ arg_strings = values[1:]
+
+ # set the parser name if requested
+ if self.dest is not SUPPRESS:
+ setattr(namespace, self.dest, parser_name)
+
+ # select the parser
+ try:
+ parser = self._name_parser_map[parser_name]
+ except KeyError:
+ tup = parser_name, ', '.join(self._name_parser_map)
+ msg = _('unknown parser %r (choices: %s)' % tup)
+ raise ArgumentError(self, msg)
+
+ # parse all the remaining options into the namespace
+ parser.parse_args(arg_strings, namespace)
+
+
+# ==============
+# Type classes
+# ==============
+
+class FileType(object):
+ """Factory for creating file object types
+
+ Instances of FileType are typically passed as type= arguments to the
+ ArgumentParser add_argument() method.
+
+ Keyword Arguments:
+ - mode -- A string indicating how the file is to be opened. Accepts the
+ same values as the builtin open() function.
+ - bufsize -- The file's desired buffer size. Accepts the same values as
+ the builtin open() function.
+ """
+
+ def __init__(self, mode='r', bufsize=None):
+ self._mode = mode
+ self._bufsize = bufsize
+
+ def __call__(self, string):
+ # the special argument "-" means sys.std{in,out}
+ if string == '-':
+ if 'r' in self._mode:
+ return _sys.stdin
+ elif 'w' in self._mode:
+ return _sys.stdout
+ else:
+ msg = _('argument "-" with mode %r' % self._mode)
+ raise ValueError(msg)
+
+ # all other arguments are used as file names
+ if self._bufsize:
+ return open(string, self._mode, self._bufsize)
+ else:
+ return open(string, self._mode)
+
+ def __repr__(self):
+ args = [self._mode, self._bufsize]
+ args_str = ', '.join([repr(arg) for arg in args if arg is not None])
+ return '%s(%s)' % (type(self).__name__, args_str)
+
+# ===========================
+# Optional and Positional Parsing
+# ===========================
+
+class Namespace(_AttributeHolder):
+ """Simple object for storing attributes.
+
+ Implements equality by attribute names and values, and provides a simple
+ string representation.
+ """
+
+ def __init__(self, **kwargs):
+ for name in kwargs:
+ setattr(self, name, kwargs[name])
+
+ def __eq__(self, other):
+ return vars(self) == vars(other)
+
+ def __ne__(self, other):
+ return not (self == other)
+
+
+class _ActionsContainer(object):
+
+ def __init__(self,
+ description,
+ prefix_chars,
+ argument_default,
+ conflict_handler):
+ super(_ActionsContainer, self).__init__()
+
+ self.description = description
+ self.argument_default = argument_default
+ self.prefix_chars = prefix_chars
+ self.conflict_handler = conflict_handler
+
+ # set up registries
+ self._registries = {}
+
+ # register actions
+ self.register('action', None, _StoreAction)
+ self.register('action', 'store', _StoreAction)
+ self.register('action', 'store_const', _StoreConstAction)
+ self.register('action', 'store_true', _StoreTrueAction)
+ self.register('action', 'store_false', _StoreFalseAction)
+ self.register('action', 'append', _AppendAction)
+ self.register('action', 'append_const', _AppendConstAction)
+ self.register('action', 'count', _CountAction)
+ self.register('action', 'help', _HelpAction)
+ self.register('action', 'version', _VersionAction)
+ self.register('action', 'parsers', _SubParsersAction)
+
+ # raise an exception if the conflict handler is invalid
+ self._get_handler()
+
+ # action storage
+ self._actions = []
+ self._option_string_actions = {}
+
+ # groups
+ self._action_groups = []
+ self._mutually_exclusive_groups = []
+
+ # defaults storage
+ self._defaults = {}
+
+ # determines whether an "option" looks like a negative number
+ self._negative_number_matcher = _re.compile(r'^-\d+|-\d*.\d+$')
+
+ # whether or not there are any optionals that look like negative
+ # numbers -- uses a list so it can be shared and edited
+ self._has_negative_number_optionals = []
+
+ # ====================
+ # Registration methods
+ # ====================
+ def register(self, registry_name, value, object):
+ registry = self._registries.setdefault(registry_name, {})
+ registry[value] = object
+
+ def _registry_get(self, registry_name, value, default=None):
+ return self._registries[registry_name].get(value, default)
+
+ # ==================================
+ # Namespace default settings methods
+ # ==================================
+ def set_defaults(self, **kwargs):
+ self._defaults.update(kwargs)
+
+ # if these defaults match any existing arguments, replace
+ # the previous default on the object with the new one
+ for action in self._actions:
+ if action.dest in kwargs:
+ action.default = kwargs[action.dest]
+
+ # =======================
+ # Adding argument actions
+ # =======================
+ def add_argument(self, *args, **kwargs):
+ """
+ add_argument(dest, ..., name=value, ...)
+ add_argument(option_string, option_string, ..., name=value, ...)
+ """
+
+ # if no positional args are supplied or only one is supplied and
+ # it doesn't look like an option string, parse a positional
+ # argument
+ chars = self.prefix_chars
+ if not args or len(args) == 1 and args[0][0] not in chars:
+ kwargs = self._get_positional_kwargs(*args, **kwargs)
+
+ # otherwise, we're adding an optional argument
+ else:
+ kwargs = self._get_optional_kwargs(*args, **kwargs)
+
+ # if no default was supplied, use the parser-level default
+ if 'default' not in kwargs:
+ dest = kwargs['dest']
+ if dest in self._defaults:
+ kwargs['default'] = self._defaults[dest]
+ elif self.argument_default is not None:
+ kwargs['default'] = self.argument_default
+
+ # create the action object, and add it to the parser
+ action_class = self._pop_action_class(kwargs)
+ action = action_class(**kwargs)
+ return self._add_action(action)
+
+ def add_argument_group(self, *args, **kwargs):
+ group = _ArgumentGroup(self, *args, **kwargs)
+ self._action_groups.append(group)
+ return group
+
+ def add_mutually_exclusive_group(self, **kwargs):
+ group = _MutuallyExclusiveGroup(self, **kwargs)
+ self._mutually_exclusive_groups.append(group)
+ return group
+
+ def _add_action(self, action):
+ # resolve any conflicts
+ self._check_conflict(action)
+
+ # add to actions list
+ self._actions.append(action)
+ action.container = self
+
+ # index the action by any option strings it has
+ for option_string in action.option_strings:
+ self._option_string_actions[option_string] = action
+
+ # set the flag if any option strings look like negative numbers
+ for option_string in action.option_strings:
+ if self._negative_number_matcher.match(option_string):
+ if not self._has_negative_number_optionals:
+ self._has_negative_number_optionals.append(True)
+
+ # return the created action
+ return action
+
+ def _remove_action(self, action):
+ self._actions.remove(action)
+
+ def _add_container_actions(self, container):
+ # collect groups by titles
+ title_group_map = {}
+ for group in self._action_groups:
+ if group.title in title_group_map:
+ msg = _('cannot merge actions - two groups are named %r')
+ raise ValueError(msg % (group.title))
+ title_group_map[group.title] = group
+
+ # map each action to its group
+ group_map = {}
+ for group in container._action_groups:
+
+ # if a group with the title exists, use that, otherwise
+ # create a new group matching the container's group
+ if group.title not in title_group_map:
+ title_group_map[group.title] = self.add_argument_group(
+ title=group.title,
+ description=group.description,
+ conflict_handler=group.conflict_handler)
+
+ # map the actions to their new group
+ for action in group._group_actions:
+ group_map[action] = title_group_map[group.title]
+
+ # add container's mutually exclusive groups
+ # NOTE: if add_mutually_exclusive_group ever gains title= and
+ # description= then this code will need to be expanded as above
+ for group in container._mutually_exclusive_groups:
+ mutex_group = self.add_mutually_exclusive_group(
+ required=group.required)
+
+ # map the actions to their new mutex group
+ for action in group._group_actions:
+ group_map[action] = mutex_group
+
+ # add all actions to this container or their group
+ for action in container._actions:
+ group_map.get(action, self)._add_action(action)
+
+ def _get_positional_kwargs(self, dest, **kwargs):
+ # make sure required is not specified
+ if 'required' in kwargs:
+ msg = _("'required' is an invalid argument for positionals")
+ raise TypeError(msg)
+
+ # mark positional arguments as required if at least one is
+ # always required
+ if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
+ kwargs['required'] = True
+ if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
+ kwargs['required'] = True
+
+ # return the keyword arguments with no option strings
+ return dict(kwargs, dest=dest, option_strings=[])
+
+ def _get_optional_kwargs(self, *args, **kwargs):
+ # determine short and long option strings
+ option_strings = []
+ long_option_strings = []
+ for option_string in args:
+ # error on one-or-fewer-character option strings
+ if len(option_string) < 2:
+ msg = _('invalid option string %r: '
+ 'must be at least two characters long')
+ raise ValueError(msg % option_string)
+
+ # error on strings that don't start with an appropriate prefix
+ if not option_string[0] in self.prefix_chars:
+ msg = _('invalid option string %r: '
+ 'must start with a character %r')
+ tup = option_string, self.prefix_chars
+ raise ValueError(msg % tup)
+
+ # error on strings that are all prefix characters
+ if not (_set(option_string) - _set(self.prefix_chars)):
+ msg = _('invalid option string %r: '
+ 'must contain characters other than %r')
+ tup = option_string, self.prefix_chars
+ raise ValueError(msg % tup)
+
+ # strings starting with two prefix characters are long options
+ option_strings.append(option_string)
+ if option_string[0] in self.prefix_chars:
+ if option_string[1] in self.prefix_chars:
+ long_option_strings.append(option_string)
+
+ # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
+ dest = kwargs.pop('dest', None)
+ if dest is None:
+ if long_option_strings:
+ dest_option_string = long_option_strings[0]
+ else:
+ dest_option_string = option_strings[0]
+ dest = dest_option_string.lstrip(self.prefix_chars)
+ dest = dest.replace('-', '_')
+
+ # return the updated keyword arguments
+ return dict(kwargs, dest=dest, option_strings=option_strings)
+
+ def _pop_action_class(self, kwargs, default=None):
+ action = kwargs.pop('action', default)
+ return self._registry_get('action', action, action)
+
+ def _get_handler(self):
+ # determine function from conflict handler string
+ handler_func_name = '_handle_conflict_%s' % self.conflict_handler
+ try:
+ return getattr(self, handler_func_name)
+ except AttributeError:
+ msg = _('invalid conflict_resolution value: %r')
+ raise ValueError(msg % self.conflict_handler)
+
+ def _check_conflict(self, action):
+
+ # find all options that conflict with this option
+ confl_optionals = []
+ for option_string in action.option_strings:
+ if option_string in self._option_string_actions:
+ confl_optional = self._option_string_actions[option_string]
+ confl_optionals.append((option_string, confl_optional))
+
+ # resolve any conflicts
+ if confl_optionals:
+ conflict_handler = self._get_handler()
+ conflict_handler(action, confl_optionals)
+
+ def _handle_conflict_error(self, action, conflicting_actions):
+ message = _('conflicting option string(s): %s')
+ conflict_string = ', '.join([option_string
+ for option_string, action
+ in conflicting_actions])
+ raise ArgumentError(action, message % conflict_string)
+
+ def _handle_conflict_resolve(self, action, conflicting_actions):
+
+ # remove all conflicting options
+ for option_string, action in conflicting_actions:
+
+ # remove the conflicting option
+ action.option_strings.remove(option_string)
+ self._option_string_actions.pop(option_string, None)
+
+ # if the option now has no option string, remove it from the
+ # container holding it
+ if not action.option_strings:
+ action.container._remove_action(action)
+
+
+class _ArgumentGroup(_ActionsContainer):
+
+ def __init__(self, container, title=None, description=None, **kwargs):
+ # add any missing keyword arguments by checking the container
+ update = kwargs.setdefault
+ update('conflict_handler', container.conflict_handler)
+ update('prefix_chars', container.prefix_chars)
+ update('argument_default', container.argument_default)
+ super_init = super(_ArgumentGroup, self).__init__
+ super_init(description=description, **kwargs)
+
+ # group attributes
+ self.title = title
+ self._group_actions = []
+
+ # share most attributes with the container
+ self._registries = container._registries
+ self._actions = container._actions
+ self._option_string_actions = container._option_string_actions
+ self._defaults = container._defaults
+ self._has_negative_number_optionals = \
+ container._has_negative_number_optionals
+
+ def _add_action(self, action):
+ action = super(_ArgumentGroup, self)._add_action(action)
+ self._group_actions.append(action)
+ return action
+
+ def _remove_action(self, action):
+ super(_ArgumentGroup, self)._remove_action(action)
+ self._group_actions.remove(action)
+
+
+class _MutuallyExclusiveGroup(_ArgumentGroup):
+
+ def __init__(self, container, required=False):
+ super(_MutuallyExclusiveGroup, self).__init__(container)
+ self.required = required
+ self._container = container
+
+ def _add_action(self, action):
+ if action.required:
+ msg = _('mutually exclusive arguments must be optional')
+ raise ValueError(msg)
+ action = self._container._add_action(action)
+ self._group_actions.append(action)
+ return action
+
+ def _remove_action(self, action):
+ self._container._remove_action(action)
+ self._group_actions.remove(action)
+
+
+class ArgumentParser(_AttributeHolder, _ActionsContainer):
+ """Object for parsing command line strings into Python objects.
+
+ Keyword Arguments:
+ - prog -- The name of the program (default: sys.argv[0])
+ - usage -- A usage message (default: auto-generated from arguments)
+ - description -- A description of what the program does
+ - epilog -- Text following the argument descriptions
+ - version -- Add a -v/--version option with the given version string
+ - parents -- Parsers whose arguments should be copied into this one
+ - formatter_class -- HelpFormatter class for printing help messages
+ - prefix_chars -- Characters that prefix optional arguments
+ - fromfile_prefix_chars -- Characters that prefix files containing
+ additional arguments
+ - argument_default -- The default value for all arguments
+ - conflict_handler -- String indicating how to handle conflicts
+ - add_help -- Add a -h/-help option
+ """
+
+ def __init__(self,
+ prog=None,
+ usage=None,
+ description=None,
+ epilog=None,
+ version=None,
+ parents=[],
+ formatter_class=HelpFormatter,
+ prefix_chars='-',
+ fromfile_prefix_chars=None,
+ argument_default=None,
+ conflict_handler='error',
+ add_help=True):
+
+ superinit = super(ArgumentParser, self).__init__
+ superinit(description=description,
+ prefix_chars=prefix_chars,
+ argument_default=argument_default,
+ conflict_handler=conflict_handler)
+
+ # default setting for prog
+ if prog is None:
+ prog = _os.path.basename(_sys.argv[0])
+
+ self.prog = prog
+ self.usage = usage
+ self.epilog = epilog
+ self.version = version
+ self.formatter_class = formatter_class
+ self.fromfile_prefix_chars = fromfile_prefix_chars
+ self.add_help = add_help
+
+ add_group = self.add_argument_group
+ self._positionals = add_group(_('arguments'))
+ self._optionals = add_group(_('options'))
+ self._subparsers = None
+
+ # register types
+ def identity(string):
+ return string
+ self.register('type', None, identity)
+
+ # add help and version arguments if necessary
+ # (using explicit default to override global argument_default)
+ if self.add_help:
+ self.add_argument(
+ '-h', '--help', action='help', default=SUPPRESS,
+ help=_('show this help message and exit'))
+ if self.version:
+ self.add_argument(
+ '-v', '--version', action='version', default=SUPPRESS,
+ help=_("show program's version number and exit"))
+
+ # add parent arguments and defaults
+ for parent in parents:
+ self._add_container_actions(parent)
+ try:
+ defaults = parent._defaults
+ except AttributeError:
+ pass
+ else:
+ self._defaults.update(defaults)
+
+ # =======================
+ # Pretty __repr__ methods
+ # =======================
+ def _get_kwargs(self):
+ names = [
+ 'prog',
+ 'usage',
+ 'description',
+ 'version',
+ 'formatter_class',
+ 'conflict_handler',
+ 'add_help',
+ ]
+ return [(name, getattr(self, name)) for name in names]
+
+ # ==================================
+ # Optional/Positional adding methods
+ # ==================================
+ def add_subparsers(self, **kwargs):
+ if self._subparsers is not None:
+ self.error(_('cannot have multiple subparser arguments'))
+
+ # add the parser class to the arguments if it's not present
+ kwargs.setdefault('parser_class', type(self))
+
+ if 'title' in kwargs or 'description' in kwargs:
+ title = _(kwargs.pop('title', 'subcommands'))
+ description = _(kwargs.pop('description', None))
+ self._subparsers = self.add_argument_group(title, description)
+ else:
+ self._subparsers = self._positionals
+
+ # prog defaults to the usage message of this parser, skipping
+ # optional arguments and with no "usage:" prefix
+ if kwargs.get('prog') is None:
+ formatter = self._get_formatter()
+ positionals = self._get_positional_actions()
+ groups = self._mutually_exclusive_groups
+ formatter.add_usage(self.usage, positionals, groups, '')
+ kwargs['prog'] = formatter.format_help().strip()
+
+ # create the parsers action and add it to the positionals list
+ parsers_class = self._pop_action_class(kwargs, 'parsers')
+ action = parsers_class(option_strings=[], **kwargs)
+ self._subparsers._add_action(action)
+
+ # return the created parsers action
+ return action
+
+ def _add_action(self, action):
+ if action.option_strings:
+ self._optionals._add_action(action)
+ else:
+ self._positionals._add_action(action)
+ return action
+
+ def _get_optional_actions(self):
+ return [action
+ for action in self._actions
+ if action.option_strings]
+
+ def _get_positional_actions(self):
+ return [action
+ for action in self._actions
+ if not action.option_strings]
+
+ # =====================================
+ # Command line argument parsing methods
+ # =====================================
+ def parse_args(self, args=None, namespace=None):
+ args, argv = self.parse_known_args(args, namespace)
+ if argv:
+ msg = _('unrecognized arguments: %s')
+ self.error(msg % ' '.join(argv))
+ return args
+
+ def parse_known_args(self, args=None, namespace=None):
+ # args default to the system args
+ if args is None:
+ args = _sys.argv[1:]
+
+ # default Namespace built from parser defaults
+ if namespace is None:
+ namespace = Namespace()
+
+ # add any action defaults that aren't present
+ for action in self._actions:
+ if action.dest is not SUPPRESS:
+ if not hasattr(namespace, action.dest):
+ if action.default is not SUPPRESS:
+ default = action.default
+ if isinstance(action.default, _basestring):
+ default = self._get_value(action, default)
+ setattr(namespace, action.dest, default)
+
+ # add any parser defaults that aren't present
+ for dest in self._defaults:
+ if not hasattr(namespace, dest):
+ setattr(namespace, dest, self._defaults[dest])
+
+ # parse the arguments and exit if there are any errors
+ try:
+ return self._parse_known_args(args, namespace)
+ except ArgumentError:
+ err = _sys.exc_info()[1]
+ self.error(str(err))
+
+ def _parse_known_args(self, arg_strings, namespace):
+ # replace arg strings that are file references
+ if self.fromfile_prefix_chars is not None:
+ arg_strings = self._read_args_from_files(arg_strings)
+
+ # map all mutually exclusive arguments to the other arguments
+ # they can't occur with
+ action_conflicts = {}
+ for mutex_group in self._mutually_exclusive_groups:
+ group_actions = mutex_group._group_actions
+ for i, mutex_action in enumerate(mutex_group._group_actions):
+ conflicts = action_conflicts.setdefault(mutex_action, [])
+ conflicts.extend(group_actions[:i])
+ conflicts.extend(group_actions[i + 1:])
+
+ # find all option indices, and determine the arg_string_pattern
+ # which has an 'O' if there is an option at an index,
+ # an 'A' if there is an argument, or a '-' if there is a '--'
+ option_string_indices = {}
+ arg_string_pattern_parts = []
+ arg_strings_iter = iter(arg_strings)
+ for i, arg_string in enumerate(arg_strings_iter):
+
+ # all args after -- are non-options
+ if arg_string == '--':
+ arg_string_pattern_parts.append('-')
+ for arg_string in arg_strings_iter:
+ arg_string_pattern_parts.append('A')
+
+ # otherwise, add the arg to the arg strings
+ # and note the index if it was an option
+ else:
+ option_tuple = self._parse_optional(arg_string)
+ if option_tuple is None:
+ pattern = 'A'
+ else:
+ option_string_indices[i] = option_tuple
+ pattern = 'O'
+ arg_string_pattern_parts.append(pattern)
+
+ # join the pieces together to form the pattern
+ arg_strings_pattern = ''.join(arg_string_pattern_parts)
+
+ # converts arg strings to the appropriate and then takes the action
+ seen_actions = _set()
+ seen_non_default_actions = _set()
+
+ def take_action(action, argument_strings, option_string=None):
+ seen_actions.add(action)
+ argument_values = self._get_values(action, argument_strings)
+
+ # error if this argument is not allowed with other previously
+ # seen arguments, assuming that actions that use the default
+ # value don't really count as "present"
+ if argument_values is not action.default:
+ seen_non_default_actions.add(action)
+ for conflict_action in action_conflicts.get(action, []):
+ if conflict_action in seen_non_default_actions:
+ msg = _('not allowed with argument %s')
+ action_name = _get_action_name(conflict_action)
+ raise ArgumentError(action, msg % action_name)
+
+ # take the action if we didn't receive a SUPPRESS value
+ # (e.g. from a default)
+ if argument_values is not SUPPRESS:
+ action(self, namespace, argument_values, option_string)
+
+ # function to convert arg_strings into an optional action
+ def consume_optional(start_index):
+
+ # get the optional identified at this index
+ option_tuple = option_string_indices[start_index]
+ action, option_string, explicit_arg = option_tuple
+
+ # identify additional optionals in the same arg string
+ # (e.g. -xyz is the same as -x -y -z if no args are required)
+ match_argument = self._match_argument
+ action_tuples = []
+ while True:
+
+ # if we found no optional action, skip it
+ if action is None:
+ extras.append(arg_strings[start_index])
+ return start_index + 1
+
+ # if there is an explicit argument, try to match the
+ # optional's string arguments to only this
+ if explicit_arg is not None:
+ arg_count = match_argument(action, 'A')
+
+ # if the action is a single-dash option and takes no
+ # arguments, try to parse more single-dash options out
+ # of the tail of the option string
+ chars = self.prefix_chars
+ if arg_count == 0 and option_string[1] not in chars:
+ action_tuples.append((action, [], option_string))
+ for char in self.prefix_chars:
+ option_string = char + explicit_arg[0]
+ explicit_arg = explicit_arg[1:] or None
+ optionals_map = self._option_string_actions
+ if option_string in optionals_map:
+ action = optionals_map[option_string]
+ break
+ else:
+ msg = _('ignored explicit argument %r')
+ raise ArgumentError(action, msg % explicit_arg)
+
+ # if the action expect exactly one argument, we've
+ # successfully matched the option; exit the loop
+ elif arg_count == 1:
+ stop = start_index + 1
+ args = [explicit_arg]
+ action_tuples.append((action, args, option_string))
+ break
+
+ # error if a double-dash option did not use the
+ # explicit argument
+ else:
+ msg = _('ignored explicit argument %r')
+ raise ArgumentError(action, msg % explicit_arg)
+
+ # if there is no explicit argument, try to match the
+ # optional's string arguments with the following strings
+ # if successful, exit the loop
+ else:
+ start = start_index + 1
+ selected_patterns = arg_strings_pattern[start:]
+ arg_count = match_argument(action, selected_patterns)
+ stop = start + arg_count
+ args = arg_strings[start:stop]
+ action_tuples.append((action, args, option_string))
+ break
+
+ # add the Optional to the list and return the index at which
+ # the Optional's string args stopped
+ assert action_tuples
+ for action, args, option_string in action_tuples:
+ take_action(action, args, option_string)
+ return stop
+
+ # the list of Positionals left to be parsed; this is modified
+ # by consume_positionals()
+ positionals = self._get_positional_actions()
+
+ # function to convert arg_strings into positional actions
+ def consume_positionals(start_index):
+ # match as many Positionals as possible
+ match_partial = self._match_arguments_partial
+ selected_pattern = arg_strings_pattern[start_index:]
+ arg_counts = match_partial(positionals, selected_pattern)
+
+ # slice off the appropriate arg strings for each Positional
+ # and add the Positional and its args to the list
+ for action, arg_count in zip(positionals, arg_counts):
+ args = arg_strings[start_index: start_index + arg_count]
+ start_index += arg_count
+ take_action(action, args)
+
+ # slice off the Positionals that we just parsed and return the
+ # index at which the Positionals' string args stopped
+ positionals[:] = positionals[len(arg_counts):]
+ return start_index
+
+ # consume Positionals and Optionals alternately, until we have
+ # passed the last option string
+ extras = []
+ start_index = 0
+ if option_string_indices:
+ max_option_string_index = max(option_string_indices)
+ else:
+ max_option_string_index = -1
+ while start_index <= max_option_string_index:
+
+ # consume any Positionals preceding the next option
+ next_option_string_index = min([
+ index
+ for index in option_string_indices
+ if index >= start_index])
+ if start_index != next_option_string_index:
+ positionals_end_index = consume_positionals(start_index)
+
+ # only try to parse the next optional if we didn't consume
+ # the option string during the positionals parsing
+ if positionals_end_index > start_index:
+ start_index = positionals_end_index
+ continue
+ else:
+ start_index = positionals_end_index
+
+ # if we consumed all the positionals we could and we're not
+ # at the index of an option string, there were extra arguments
+ if start_index not in option_string_indices:
+ strings = arg_strings[start_index:next_option_string_index]
+ extras.extend(strings)
+ start_index = next_option_string_index
+
+ # consume the next optional and any arguments for it
+ start_index = consume_optional(start_index)
+
+ # consume any positionals following the last Optional
+ stop_index = consume_positionals(start_index)
+
+ # if we didn't consume all the argument strings, there were extras
+ extras.extend(arg_strings[stop_index:])
+
+ # if we didn't use all the Positional objects, there were too few
+ # arg strings supplied.
+ if positionals:
+ self.error(_('too few arguments'))
+
+ # make sure all required actions were present
+ for action in self._actions:
+ if action.required:
+ if action not in seen_actions:
+ name = _get_action_name(action)
+ self.error(_('argument %s is required') % name)
+
+ # make sure all required groups had one option present
+ for group in self._mutually_exclusive_groups:
+ if group.required:
+ for action in group._group_actions:
+ if action in seen_non_default_actions:
+ break
+
+ # if no actions were used, report the error
+ else:
+ names = [_get_action_name(action)
+ for action in group._group_actions
+ if action.help is not SUPPRESS]
+ msg = _('one of the arguments %s is required')
+ self.error(msg % ' '.join(names))
+
+ # return the updated namespace and the extra arguments
+ return namespace, extras
+
+ def _read_args_from_files(self, arg_strings):
+ # expand arguments referencing files
+ new_arg_strings = []
+ for arg_string in arg_strings:
+
+ # for regular arguments, just add them back into the list
+ if arg_string[0] not in self.fromfile_prefix_chars:
+ new_arg_strings.append(arg_string)
+
+ # replace arguments referencing files with the file content
+ else:
+ try:
+ args_file = open(arg_string[1:])
+ try:
+ arg_strings = args_file.read().splitlines()
+ arg_strings = self._read_args_from_files(arg_strings)
+ new_arg_strings.extend(arg_strings)
+ finally:
+ args_file.close()
+ except IOError:
+ err = _sys.exc_info()[1]
+ self.error(str(err))
+
+ # return the modified argument list
+ return new_arg_strings
+
+ def _match_argument(self, action, arg_strings_pattern):
+ # match the pattern for this action to the arg strings
+ nargs_pattern = self._get_nargs_pattern(action)
+ match = _re.match(nargs_pattern, arg_strings_pattern)
+
+ # raise an exception if we weren't able to find a match
+ if match is None:
+ nargs_errors = {
+ None: _('expected one argument'),
+ OPTIONAL: _('expected at most one argument'),
+ ONE_OR_MORE: _('expected at least one argument'),
+ }
+ default = _('expected %s argument(s)') % action.nargs
+ msg = nargs_errors.get(action.nargs, default)
+ raise ArgumentError(action, msg)
+
+ # return the number of arguments matched
+ return len(match.group(1))
+
+ def _match_arguments_partial(self, actions, arg_strings_pattern):
+ # progressively shorten the actions list by slicing off the
+ # final actions until we find a match
+ result = []
+ for i in range(len(actions), 0, -1):
+ actions_slice = actions[:i]
+ pattern = ''.join([self._get_nargs_pattern(action)
+ for action in actions_slice])
+ match = _re.match(pattern, arg_strings_pattern)
+ if match is not None:
+ result.extend([len(string) for string in match.groups()])
+ break
+
+ # return the list of arg string counts
+ return result
+
+ def _parse_optional(self, arg_string):
+ # if it's an empty string, it was meant to be a positional
+ if not arg_string:
+ return None
+
+ # if it doesn't start with a prefix, it was meant to be positional
+ if not arg_string[0] in self.prefix_chars:
+ return None
+
+ # if it's just dashes, it was meant to be positional
+ if not arg_string.strip('-'):
+ return None
+
+ # if the option string is present in the parser, return the action
+ if arg_string in self._option_string_actions:
+ action = self._option_string_actions[arg_string]
+ return action, arg_string, None
+
+ # search through all possible prefixes of the option string
+ # and all actions in the parser for possible interpretations
+ option_tuples = self._get_option_tuples(arg_string)
+
+ # if multiple actions match, the option string was ambiguous
+ if len(option_tuples) > 1:
+ options = ', '.join([option_string
+ for action, option_string, explicit_arg in option_tuples])
+ tup = arg_string, options
+ self.error(_('ambiguous option: %s could match %s') % tup)
+
+ # if exactly one action matched, this segmentation is good,
+ # so return the parsed action
+ elif len(option_tuples) == 1:
+ option_tuple, = option_tuples
+ return option_tuple
+
+ # if it was not found as an option, but it looks like a negative
+ # number, it was meant to be positional
+ # unless there are negative-number-like options
+ if self._negative_number_matcher.match(arg_string):
+ if not self._has_negative_number_optionals:
+ return None
+
+ # if it contains a space, it was meant to be a positional
+ if ' ' in arg_string:
+ return None
+
+ # it was meant to be an optional but there is no such option
+ # in this parser (though it might be a valid option in a subparser)
+ return None, arg_string, None
+
+ def _get_option_tuples(self, option_string):
+ result = []
+
+ # option strings starting with two prefix characters are only
+ # split at the '='
+ chars = self.prefix_chars
+ if option_string[0] in chars and option_string[1] in chars:
+ if '=' in option_string:
+ option_prefix, explicit_arg = option_string.split('=', 1)
+ else:
+ option_prefix = option_string
+ explicit_arg = None
+ for option_string in self._option_string_actions:
+ if option_string.startswith(option_prefix):
+ action = self._option_string_actions[option_string]
+ tup = action, option_string, explicit_arg
+ result.append(tup)
+
+ # single character options can be concatenated with their arguments
+ # but multiple character options always have to have their argument
+ # separate
+ elif option_string[0] in chars and option_string[1] not in chars:
+ option_prefix = option_string
+ explicit_arg = None
+ short_option_prefix = option_string[:2]
+ short_explicit_arg = option_string[2:]
+
+ for option_string in self._option_string_actions:
+ if option_string == short_option_prefix:
+ action = self._option_string_actions[option_string]
+ tup = action, option_string, short_explicit_arg
+ result.append(tup)
+ elif option_string.startswith(option_prefix):
+ action = self._option_string_actions[option_string]
+ tup = action, option_string, explicit_arg
+ result.append(tup)
+
+ # shouldn't ever get here
+ else:
+ self.error(_('unexpected option string: %s') % option_string)
+
+ # return the collected option tuples
+ return result
+
+ def _get_nargs_pattern(self, action):
+ # in all examples below, we have to allow for '--' args
+ # which are represented as '-' in the pattern
+ nargs = action.nargs
+
+ # the default (None) is assumed to be a single argument
+ if nargs is None:
+ nargs_pattern = '(-*A-*)'
+
+ # allow zero or one arguments
+ elif nargs == OPTIONAL:
+ nargs_pattern = '(-*A?-*)'
+
+ # allow zero or more arguments
+ elif nargs == ZERO_OR_MORE:
+ nargs_pattern = '(-*[A-]*)'
+
+ # allow one or more arguments
+ elif nargs == ONE_OR_MORE:
+ nargs_pattern = '(-*A[A-]*)'
+
+ # allow one argument followed by any number of options or arguments
+ elif nargs is PARSER:
+ nargs_pattern = '(-*A[-AO]*)'
+
+ # all others should be integers
+ else:
+ nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
+
+ # if this is an optional action, -- is not allowed
+ if action.option_strings:
+ nargs_pattern = nargs_pattern.replace('-*', '')
+ nargs_pattern = nargs_pattern.replace('-', '')
+
+ # return the pattern
+ return nargs_pattern
+
+ # ========================
+ # Value conversion methods
+ # ========================
+ def _get_values(self, action, arg_strings):
+ # for everything but PARSER args, strip out '--'
+ if action.nargs is not PARSER:
+ arg_strings = [s for s in arg_strings if s != '--']
+
+ # optional argument produces a default when not present
+ if not arg_strings and action.nargs == OPTIONAL:
+ if action.option_strings:
+ value = action.const
+ else:
+ value = action.default
+ if isinstance(value, _basestring):
+ value = self._get_value(action, value)
+ self._check_value(action, value)
+
+ # when nargs='*' on a positional, if there were no command-line
+ # args, use the default if it is anything other than None
+ elif (not arg_strings and action.nargs == ZERO_OR_MORE and
+ not action.option_strings):
+ if action.default is not None:
+ value = action.default
+ else:
+ value = arg_strings
+ self._check_value(action, value)
+
+ # single argument or optional argument produces a single value
+ elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
+ arg_string, = arg_strings
+ value = self._get_value(action, arg_string)
+ self._check_value(action, value)
+
+ # PARSER arguments convert all values, but check only the first
+ elif action.nargs is PARSER:
+ value = [self._get_value(action, v) for v in arg_strings]
+ self._check_value(action, value[0])
+
+ # all other types of nargs produce a list
+ else:
+ value = [self._get_value(action, v) for v in arg_strings]
+ for v in value:
+ self._check_value(action, v)
+
+ # return the converted value
+ return value
+
+ def _get_value(self, action, arg_string):
+ type_func = self._registry_get('type', action.type, action.type)
+ if not hasattr(type_func, '__call__'):
+ if not hasattr(type_func, '__bases__'): # classic classes
+ msg = _('%r is not callable')
+ raise ArgumentError(action, msg % type_func)
+
+ # convert the value to the appropriate type
+ try:
+ result = type_func(arg_string)
+
+ # TypeErrors or ValueErrors indicate errors
+ except (TypeError, ValueError):
+ name = getattr(action.type, '__name__', repr(action.type))
+ msg = _('invalid %s value: %r')
+ raise ArgumentError(action, msg % (name, arg_string))
+
+ # return the converted value
+ return result
+
+ def _check_value(self, action, value):
+ # converted value must be one of the choices (if specified)
+ if action.choices is not None and value not in action.choices:
+ tup = value, ', '.join(map(repr, action.choices))
+ msg = _('invalid choice: %r (choose from %s)') % tup
+ raise ArgumentError(action, msg)
+
+ # =======================
+ # Help-formatting methods
+ # =======================
+ def format_usage(self):
+ formatter = self._get_formatter()
+ formatter.add_usage(self.usage, self._actions,
+ self._mutually_exclusive_groups)
+ return formatter.format_help()
+
+ def format_help(self):
+ formatter = self._get_formatter()
+
+ # usage
+ formatter.add_usage(self.usage, self._actions,
+ self._mutually_exclusive_groups)
+
+ # description
+ formatter.add_text(self.description)
+
+ # positionals, optionals and user-defined groups
+ for action_group in self._action_groups:
+ formatter.start_section(action_group.title)
+ formatter.add_text(action_group.description)
+ formatter.add_arguments(action_group._group_actions)
+ formatter.end_section()
+
+ # epilog
+ formatter.add_text(self.epilog)
+
+ # determine help from format above
+ return formatter.format_help()
+
+ def format_version(self):
+ formatter = self._get_formatter()
+ formatter.add_text(self.version)
+ return formatter.format_help()
+
+ def _get_formatter(self):
+ return self.formatter_class(prog=self.prog)
+
+ # =====================
+ # Help-printing methods
+ # =====================
+ def print_usage(self, file=None):
+ self._print_message(self.format_usage(), file)
+
+ def print_help(self, file=None):
+ self._print_message(self.format_help(), file)
+
+ def print_version(self, file=None):
+ self._print_message(self.format_version(), file)
+
+ def _print_message(self, message, file=None):
+ if message:
+ if file is None:
+ file = _sys.stderr
+ file.write(message)
+
+ # ===============
+ # Exiting methods
+ # ===============
+ def exit(self, status=0, message=None):
+ if message:
+ _sys.stderr.write(message)
+ _sys.exit(status)
+
+ def error(self, message):
+ """error(message: string)
+
+ Prints a usage message incorporating the message to stderr and
+ exits.
+
+ If you override this in a subclass, it should not return -- it
+ should either exit or raise an exception.
+ """
+ self.print_usage(_sys.stderr)
+ self.exit(2, _('%s: error: %s\n') % (self.prog, message))
diff --git a/micng/utils/cmdln.py b/micng/utils/cmdln.py new file mode 100644 index 0000000..aa37fa9 --- /dev/null +++ b/micng/utils/cmdln.py @@ -0,0 +1,1539 @@ +# Copyright (c) 2002-2005 ActiveState Corp. +# License: MIT (see LICENSE.txt for license details) +# Author: Trent Mick (TrentM@ActiveState.com) +# Home: http://trentm.com/projects/cmdln/ + +"""An improvement on Python's standard cmd.py module. + +As with cmd.py, this module provides "a simple framework for writing +line-oriented command intepreters." This module provides a 'RawCmdln' +class that fixes some design flaws in cmd.Cmd, making it more scalable +and nicer to use for good 'cvs'- or 'svn'-style command line interfaces +or simple shells. And it provides a 'Cmdln' class that add +optparse-based option processing. Basically you use it like this: + + import cmdln + + class MySVN(cmdln.Cmdln): + name = "svn" + + @cmdln.alias('stat', 'st') + @cmdln.option('-v', '--verbose', action='store_true' + help='print verbose information') + def do_status(self, subcmd, opts, *paths): + print "handle 'svn status' command" + + #... + + if __name__ == "__main__": + shell = MySVN() + retval = shell.main() + sys.exit(retval) + +See the README.txt or <http://trentm.com/projects/cmdln/> for more +details. +""" + +__revision__ = "$Id: cmdln.py 1666 2007-05-09 03:13:03Z trentm $" +__version_info__ = (1, 0, 0) +__version__ = '.'.join(map(str, __version_info__)) + +import os +import re +import cmd +import optparse +from pprint import pprint +from datetime import date + + + + +#---- globals + +LOOP_ALWAYS, LOOP_NEVER, LOOP_IF_EMPTY = range(3) + +# An unspecified optional argument when None is a meaningful value. +_NOT_SPECIFIED = ("Not", "Specified") + +# Pattern to match a TypeError message from a call that +# failed because of incorrect number of arguments (see +# Python/getargs.c). +_INCORRECT_NUM_ARGS_RE = re.compile( + r"(takes [\w ]+ )(\d+)( arguments? \()(\d+)( given\))") + +# Static bits of man page +MAN_HEADER = r""".TH %(ucname)s "1" "%(date)s" "%(name)s %(version)s" "User Commands" +.SH NAME +%(name)s \- Program to do useful things. +.SH SYNOPSIS +.B %(name)s +[\fIGLOBALOPTS\fR] \fISUBCOMMAND \fR[\fIOPTS\fR] [\fIARGS\fR...] +.br +.B %(name)s +\fIhelp SUBCOMMAND\fR +.SH DESCRIPTION +""" +MAN_COMMANDS_HEADER = r""" +.SS COMMANDS +""" +MAN_OPTIONS_HEADER = r""" +.SS GLOBAL OPTIONS +""" +MAN_FOOTER = r""" +.SH AUTHOR +This man page is automatically generated. +""" + +#---- exceptions + +class CmdlnError(Exception): + """A cmdln.py usage error.""" + def __init__(self, msg): + self.msg = msg + def __str__(self): + return self.msg + +class CmdlnUserError(Exception): + """An error by a user of a cmdln-based tool/shell.""" + pass + + + +#---- public methods and classes + +def alias(*aliases): + """Decorator to add aliases for Cmdln.do_* command handlers. + + Example: + class MyShell(cmdln.Cmdln): + @cmdln.alias("!", "sh") + def do_shell(self, argv): + #...implement 'shell' command + """ + def decorate(f): + if not hasattr(f, "aliases"): + f.aliases = [] + f.aliases += aliases + return f + return decorate + +MAN_REPLACES = [ + (re.compile(r'(^|[ \t\[\'])--([^/ \t/,-]*)-([^/ \t/,-]*)-([^/ \t/,-]*)(?=$|[ \t=\]\'/,])'), r'\1\-\-\2\-\3\-\4'), + (re.compile(r'(^|[ \t\[\'])-([^/ \t/,-]*)-([^/ \t/,-]*)-([^/ \t/,-]*)(?=$|[ \t=\]\'/,])'), r'\1\-\2\-\3\-\4'), + (re.compile(r'(^|[ \t\[\'])--([^/ \t/,-]*)-([^/ \t/,-]*)(?=$|[ \t=\]\'/,])'), r'\1\-\-\2\-\3'), + (re.compile(r'(^|[ \t\[\'])-([^/ \t/,-]*)-([^/ \t/,-]*)(?=$|[ \t=\]\'/,])'), r'\1\-\2\-\3'), + (re.compile(r'(^|[ \t\[\'])--([^/ \t/,-]*)(?=$|[ \t=\]\'/,])'), r'\1\-\-\2'), + (re.compile(r'(^|[ \t\[\'])-([^/ \t/,-]*)(?=$|[ \t=\]\'/,])'), r'\1\-\2'), + (re.compile(r"^'"), r" '"), + ] + +def man_escape(text): + ''' + Escapes text to be included in man page. + + For now it only escapes dashes in command line options. + ''' + for repl in MAN_REPLACES: + text = repl[0].sub(repl[1], text) + return text + +class RawCmdln(cmd.Cmd): + """An improved (on cmd.Cmd) framework for building multi-subcommand + scripts (think "svn" & "cvs") and simple shells (think "pdb" and + "gdb"). + + A simple example: + + import cmdln + + class MySVN(cmdln.RawCmdln): + name = "svn" + + @cmdln.aliases('stat', 'st') + def do_status(self, argv): + print "handle 'svn status' command" + + if __name__ == "__main__": + shell = MySVN() + retval = shell.main() + sys.exit(retval) + + See <http://trentm.com/projects/cmdln> for more information. + """ + name = None # if unset, defaults basename(sys.argv[0]) + prompt = None # if unset, defaults to self.name+"> " + version = None # if set, default top-level options include --version + + # Default messages for some 'help' command error cases. + # They are interpolated with one arg: the command. + nohelp = "no help on '%s'" + unknowncmd = "unknown command: '%s'" + + helpindent = '' # string with which to indent help output + + # Default man page parts, please change them in subclass + man_header = MAN_HEADER + man_commands_header = MAN_COMMANDS_HEADER + man_options_header = MAN_OPTIONS_HEADER + man_footer = MAN_FOOTER + + def __init__(self, completekey='tab', + stdin=None, stdout=None, stderr=None): + """Cmdln(completekey='tab', stdin=None, stdout=None, stderr=None) + + The optional argument 'completekey' is the readline name of a + completion key; it defaults to the Tab key. If completekey is + not None and the readline module is available, command completion + is done automatically. + + The optional arguments 'stdin', 'stdout' and 'stderr' specify + alternate input, output and error output file objects; if not + specified, sys.* are used. + + If 'stdout' but not 'stderr' is specified, stdout is used for + error output. This is to provide least surprise for users used + to only the 'stdin' and 'stdout' options with cmd.Cmd. + """ + import sys + if self.name is None: + self.name = os.path.basename(sys.argv[0]) + if self.prompt is None: + self.prompt = self.name+"> " + self._name_str = self._str(self.name) + self._prompt_str = self._str(self.prompt) + if stdin is not None: + self.stdin = stdin + else: + self.stdin = sys.stdin + if stdout is not None: + self.stdout = stdout + else: + self.stdout = sys.stdout + if stderr is not None: + self.stderr = stderr + elif stdout is not None: + self.stderr = stdout + else: + self.stderr = sys.stderr + self.cmdqueue = [] + self.completekey = completekey + self.cmdlooping = False + + def get_optparser(self): + """Hook for subclasses to set the option parser for the + top-level command/shell. + + This option parser is used retrieved and used by `.main()' to + handle top-level options. + + The default implements a single '-h|--help' option. Sub-classes + can return None to have no options at the top-level. Typically + an instance of CmdlnOptionParser should be returned. + """ + version = (self.version is not None + and "%s %s" % (self._name_str, self.version) + or None) + return CmdlnOptionParser(self, version=version) + + def get_version(self): + """ + Returns version of program. To be replaced in subclass. + """ + return __version__ + + def postoptparse(self): + """Hook method executed just after `.main()' parses top-level + options. + + When called `self.values' holds the results of the option parse. + """ + pass + + def main(self, argv=None, loop=LOOP_NEVER): + """A possible mainline handler for a script, like so: + + import cmdln + class MyCmd(cmdln.Cmdln): + name = "mycmd" + ... + + if __name__ == "__main__": + MyCmd().main() + + By default this will use sys.argv to issue a single command to + 'MyCmd', then exit. The 'loop' argument can be use to control + interactive shell behaviour. + + Arguments: + "argv" (optional, default sys.argv) is the command to run. + It must be a sequence, where the first element is the + command name and subsequent elements the args for that + command. + "loop" (optional, default LOOP_NEVER) is a constant + indicating if a command loop should be started (i.e. an + interactive shell). Valid values (constants on this module): + LOOP_ALWAYS start loop and run "argv", if any + LOOP_NEVER run "argv" (or .emptyline()) and exit + LOOP_IF_EMPTY run "argv", if given, and exit; + otherwise, start loop + """ + if argv is None: + import sys + argv = sys.argv + else: + argv = argv[:] # don't modify caller's list + + self.optparser = self.get_optparser() + if self.optparser: # i.e. optparser=None means don't process for opts + try: + self.options, args = self.optparser.parse_args(argv[1:]) + except CmdlnUserError, ex: + msg = "%s: %s\nTry '%s help' for info.\n"\ + % (self.name, ex, self.name) + self.stderr.write(self._str(msg)) + self.stderr.flush() + return 1 + except StopOptionProcessing, ex: + return 0 + else: + self.options, args = None, argv[1:] + self.postoptparse() + + if loop == LOOP_ALWAYS: + if args: + self.cmdqueue.append(args) + return self.cmdloop() + elif loop == LOOP_NEVER: + if args: + return self.cmd(args) + else: + return self.emptyline() + elif loop == LOOP_IF_EMPTY: + if args: + return self.cmd(args) + else: + return self.cmdloop() + + def cmd(self, argv): + """Run one command and exit. + + "argv" is the arglist for the command to run. argv[0] is the + command to run. If argv is an empty list then the + 'emptyline' handler is run. + + Returns the return value from the command handler. + """ + assert isinstance(argv, (list, tuple)), \ + "'argv' is not a sequence: %r" % argv + retval = None + try: + argv = self.precmd(argv) + retval = self.onecmd(argv) + self.postcmd(argv) + except: + if not self.cmdexc(argv): + raise + retval = 1 + return retval + + def _str(self, s): + """Safely convert the given str/unicode to a string for printing.""" + try: + return str(s) + except UnicodeError: + #XXX What is the proper encoding to use here? 'utf-8' seems + # to work better than "getdefaultencoding" (usually + # 'ascii'), on OS X at least. + #import sys + #return s.encode(sys.getdefaultencoding(), "replace") + return s.encode("utf-8", "replace") + + def cmdloop(self, intro=None): + """Repeatedly issue a prompt, accept input, parse into an argv, and + dispatch (via .precmd(), .onecmd() and .postcmd()), passing them + the argv. In other words, start a shell. + + "intro" (optional) is a introductory message to print when + starting the command loop. This overrides the class + "intro" attribute, if any. + """ + self.cmdlooping = True + self.preloop() + if intro is None: + intro = self.intro + if intro: + intro_str = self._str(intro) + self.stdout.write(intro_str+'\n') + self.stop = False + retval = None + while not self.stop: + if self.cmdqueue: + argv = self.cmdqueue.pop(0) + assert isinstance(argv, (list, tuple)), \ + "item on 'cmdqueue' is not a sequence: %r" % argv + else: + if self.use_rawinput: + try: + line = raw_input(self._prompt_str) + except EOFError: + line = 'EOF' + else: + self.stdout.write(self._prompt_str) + self.stdout.flush() + line = self.stdin.readline() + if not len(line): + line = 'EOF' + else: + line = line[:-1] # chop '\n' + argv = line2argv(line) + try: + argv = self.precmd(argv) + retval = self.onecmd(argv) + self.postcmd(argv) + except: + if not self.cmdexc(argv): + raise + retval = 1 + self.lastretval = retval + self.postloop() + self.cmdlooping = False + return retval + + def precmd(self, argv): + """Hook method executed just before the command argv is + interpreted, but after the input prompt is generated and issued. + + "argv" is the cmd to run. + + Returns an argv to run (i.e. this method can modify the command + to run). + """ + return argv + + def postcmd(self, argv): + """Hook method executed just after a command dispatch is finished. + + "argv" is the command that was run. + """ + pass + + def cmdexc(self, argv): + """Called if an exception is raised in any of precmd(), onecmd(), + or postcmd(). If True is returned, the exception is deemed to have + been dealt with. Otherwise, the exception is re-raised. + + The default implementation handles CmdlnUserError's, which + typically correspond to user error in calling commands (as + opposed to programmer error in the design of the script using + cmdln.py). + """ + import sys + type, exc, traceback = sys.exc_info() + if isinstance(exc, CmdlnUserError): + msg = "%s %s: %s\nTry '%s help %s' for info.\n"\ + % (self.name, argv[0], exc, self.name, argv[0]) + self.stderr.write(self._str(msg)) + self.stderr.flush() + return True + + def onecmd(self, argv): + if not argv: + return self.emptyline() + self.lastcmd = argv + cmdname = self._get_canonical_cmd_name(argv[0]) + if cmdname: + handler = self._get_cmd_handler(cmdname) + if handler: + return self._dispatch_cmd(handler, argv) + return self.default(argv) + + def _dispatch_cmd(self, handler, argv): + return handler(argv) + + def default(self, argv): + """Hook called to handle a command for which there is no handler. + + "argv" is the command and arguments to run. + + The default implementation writes and error message to stderr + and returns an error exit status. + + Returns a numeric command exit status. + """ + errmsg = self._str(self.unknowncmd % (argv[0],)) + if self.cmdlooping: + self.stderr.write(errmsg+"\n") + else: + self.stderr.write("%s: %s\nTry '%s help' for info.\n" + % (self._name_str, errmsg, self._name_str)) + self.stderr.flush() + return 1 + + def parseline(self, line): + # This is used by Cmd.complete (readline completer function) to + # massage the current line buffer before completion processing. + # We override to drop special '!' handling. + line = line.strip() + if not line: + return None, None, line + elif line[0] == '?': + line = 'help ' + line[1:] + i, n = 0, len(line) + while i < n and line[i] in self.identchars: i = i+1 + cmd, arg = line[:i], line[i:].strip() + return cmd, arg, line + + def helpdefault(self, cmd, known): + """Hook called to handle help on a command for which there is no + help handler. + + "cmd" is the command name on which help was requested. + "known" is a boolean indicating if this command is known + (i.e. if there is a handler for it). + + Returns a return code. + """ + if known: + msg = self._str(self.nohelp % (cmd,)) + if self.cmdlooping: + self.stderr.write(msg + '\n') + else: + self.stderr.write("%s: %s\n" % (self.name, msg)) + else: + msg = self.unknowncmd % (cmd,) + if self.cmdlooping: + self.stderr.write(msg + '\n') + else: + self.stderr.write("%s: %s\n" + "Try '%s help' for info.\n" + % (self.name, msg, self.name)) + self.stderr.flush() + return 1 + + + def do_help(self, argv): + """${cmd_name}: give detailed help on a specific sub-command + + usage: + ${name} help [SUBCOMMAND] + """ + if len(argv) > 1: # asking for help on a particular command + doc = None + cmdname = self._get_canonical_cmd_name(argv[1]) or argv[1] + if not cmdname: + return self.helpdefault(argv[1], False) + else: + helpfunc = getattr(self, "help_"+cmdname, None) + if helpfunc: + doc = helpfunc() + else: + handler = self._get_cmd_handler(cmdname) + if handler: + doc = handler.__doc__ + if doc is None: + return self.helpdefault(argv[1], handler != None) + else: # bare "help" command + doc = self.__class__.__doc__ # try class docstring + if doc is None: + # Try to provide some reasonable useful default help. + if self.cmdlooping: prefix = "" + else: prefix = self.name+' ' + doc = """usage: + %sSUBCOMMAND [ARGS...] + %shelp [SUBCOMMAND] + + ${option_list} + ${command_list} + ${help_list} + """ % (prefix, prefix) + cmdname = None + + if doc: # *do* have help content, massage and print that + doc = self._help_reindent(doc) + doc = self._help_preprocess(doc, cmdname) + doc = doc.rstrip() + '\n' # trim down trailing space + self.stdout.write(self._str(doc)) + self.stdout.flush() + do_help.aliases = ["?"] + + + def do_man(self, argv): + """${cmd_name}: generates a man page + + usage: + ${name} man + """ + self.stdout.write(self.man_header % { + 'date': date.today().strftime('%b %Y'), + 'version': self.get_version(), + 'name': self.name, + 'ucname': self.name.upper() + } + ) + + self.stdout.write(self.man_commands_header) + commands = self._help_get_command_list() + for command, doc in commands: + cmdname = command.split(' ')[0] + text = self._help_preprocess(doc, cmdname) + lines = [] + for line in text.splitlines(False): + if line[:8] == ' ' * 8: + line = line[8:] + lines.append(man_escape(line)) + + self.stdout.write('.TP\n\\fB%s\\fR\n%s\n' % (command, '\n'.join(lines))) + + self.stdout.write(self.man_options_header) + self.stdout.write(man_escape(self._help_preprocess('${option_list}', None))) + + self.stdout.write(self.man_footer) + + self.stdout.flush() + + def _help_reindent(self, help, indent=None): + """Hook to re-indent help strings before writing to stdout. + + "help" is the help content to re-indent + "indent" is a string with which to indent each line of the + help content after normalizing. If unspecified or None + then the default is use: the 'self.helpindent' class + attribute. By default this is the empty string, i.e. + no indentation. + + By default, all common leading whitespace is removed and then + the lot is indented by 'self.helpindent'. When calculating the + common leading whitespace the first line is ignored -- hence + help content for Conan can be written as follows and have the + expected indentation: + + def do_crush(self, ...): + '''${cmd_name}: crush your enemies, see them driven before you... + + c.f. Conan the Barbarian''' + """ + if indent is None: + indent = self.helpindent + lines = help.splitlines(0) + _dedentlines(lines, skip_first_line=True) + lines = [(indent+line).rstrip() for line in lines] + return '\n'.join(lines) + + def _help_preprocess(self, help, cmdname): + """Hook to preprocess a help string before writing to stdout. + + "help" is the help string to process. + "cmdname" is the canonical sub-command name for which help + is being given, or None if the help is not specific to a + command. + + By default the following template variables are interpolated in + help content. (Note: these are similar to Python 2.4's + string.Template interpolation but not quite.) + + ${name} + The tool's/shell's name, i.e. 'self.name'. + ${option_list} + A formatted table of options for this shell/tool. + ${command_list} + A formatted table of available sub-commands. + ${help_list} + A formatted table of additional help topics (i.e. 'help_*' + methods with no matching 'do_*' method). + ${cmd_name} + The name (and aliases) for this sub-command formatted as: + "NAME (ALIAS1, ALIAS2, ...)". + ${cmd_usage} + A formatted usage block inferred from the command function + signature. + ${cmd_option_list} + A formatted table of options for this sub-command. (This is + only available for commands using the optparse integration, + i.e. using @cmdln.option decorators or manually setting the + 'optparser' attribute on the 'do_*' method.) + + Returns the processed help. + """ + preprocessors = { + "${name}": self._help_preprocess_name, + "${option_list}": self._help_preprocess_option_list, + "${command_list}": self._help_preprocess_command_list, + "${help_list}": self._help_preprocess_help_list, + "${cmd_name}": self._help_preprocess_cmd_name, + "${cmd_usage}": self._help_preprocess_cmd_usage, + "${cmd_option_list}": self._help_preprocess_cmd_option_list, + } + + for marker, preprocessor in preprocessors.items(): + if marker in help: + help = preprocessor(help, cmdname) + return help + + def _help_preprocess_name(self, help, cmdname=None): + return help.replace("${name}", self.name) + + def _help_preprocess_option_list(self, help, cmdname=None): + marker = "${option_list}" + indent, indent_width = _get_indent(marker, help) + suffix = _get_trailing_whitespace(marker, help) + + if self.optparser: + # Setup formatting options and format. + # - Indentation of 4 is better than optparse default of 2. + # C.f. Damian Conway's discussion of this in Perl Best + # Practices. + self.optparser.formatter.indent_increment = 4 + self.optparser.formatter.current_indent = indent_width + block = self.optparser.format_option_help() + '\n' + else: + block = "" + + help = help.replace(indent+marker+suffix, block, 1) + return help + + def _help_get_command_list(self): + # Find any aliases for commands. + token2canonical = self._get_canonical_map() + aliases = {} + for token, cmdname in token2canonical.items(): + if token == cmdname: continue + aliases.setdefault(cmdname, []).append(token) + + # Get the list of (non-hidden) commands and their + # documentation, if any. + cmdnames = {} # use a dict to strip duplicates + for attr in self.get_names(): + if attr.startswith("do_"): + cmdnames[attr[3:]] = True + cmdnames = cmdnames.keys() + cmdnames.remove("help") + cmdnames.remove("man") + #cmdnames.sort() + linedata = [] + for cmdname in cmdnames: + if aliases.get(cmdname): + a = aliases[cmdname] + a.sort() + cmdstr = "%s (%s)" % (cmdname, ", ".join(a)) + else: + cmdstr = cmdname + doc = None + try: + helpfunc = getattr(self, 'help_'+cmdname) + except AttributeError: + handler = self._get_cmd_handler(cmdname) + if handler: + doc = handler.__doc__ + else: + doc = helpfunc() + + # Strip "${cmd_name}: " from the start of a command's doc. Best + # practice dictates that command help strings begin with this, but + # it isn't at all wanted for the command list. + to_strip = "${cmd_name}:" + if doc and doc.startswith(to_strip): + #log.debug("stripping %r from start of %s's help string", + # to_strip, cmdname) + doc = doc[len(to_strip):].lstrip() + if not getattr(self._get_cmd_handler(cmdname), "hidden", None): + linedata.append( (cmdstr, doc) ) + + return linedata + + def _help_preprocess_command_list(self, help, cmdname=None): + marker = "${command_list}" + indent, indent_width = _get_indent(marker, help) + suffix = _get_trailing_whitespace(marker, help) + + linedata = self._help_get_command_list() + + if linedata: + subindent = indent + ' '*4 + lines = _format_linedata(linedata, subindent, indent_width+4) + block = indent + "commands:\n" \ + + '\n'.join(lines) + "\n\n" + help = help.replace(indent+marker+suffix, block, 1) + return help + + def _help_preprocess_help_list(self, help, cmdname=None): + marker = "${help_list}" + indent, indent_width = _get_indent(marker, help) + suffix = _get_trailing_whitespace(marker, help) + + # Determine the additional help topics, if any. + helpnames = {} + token2cmdname = self._get_canonical_map() + for attr in self.get_names(): + if not attr.startswith("help_"): continue + helpname = attr[5:] + if helpname not in token2cmdname: + helpnames[helpname] = True + + if helpnames: + helpnames = helpnames.keys() + helpnames.sort() + linedata = [(self.name+" help "+n, "") for n in helpnames] + + subindent = indent + ' '*4 + lines = _format_linedata(linedata, subindent, indent_width+4) + block = indent + "additional help topics:\n" \ + + '\n'.join(lines) + "\n\n" + else: + block = '' + help = help.replace(indent+marker+suffix, block, 1) + return help + + def _help_preprocess_cmd_name(self, help, cmdname=None): + marker = "${cmd_name}" + handler = self._get_cmd_handler(cmdname) + if not handler: + raise CmdlnError("cannot preprocess '%s' into help string: " + "could not find command handler for %r" + % (marker, cmdname)) + s = cmdname + if hasattr(handler, "aliases"): + s += " (%s)" % (", ".join(handler.aliases)) + help = help.replace(marker, s) + return help + + #TODO: this only makes sense as part of the Cmdln class. + # Add hooks to add help preprocessing template vars and put + # this one on that class. + def _help_preprocess_cmd_usage(self, help, cmdname=None): + marker = "${cmd_usage}" + handler = self._get_cmd_handler(cmdname) + if not handler: + raise CmdlnError("cannot preprocess '%s' into help string: " + "could not find command handler for %r" + % (marker, cmdname)) + indent, indent_width = _get_indent(marker, help) + suffix = _get_trailing_whitespace(marker, help) + + # Extract the introspection bits we need. + func = handler.im_func + if func.func_defaults: + func_defaults = list(func.func_defaults) + else: + func_defaults = [] + co_argcount = func.func_code.co_argcount + co_varnames = func.func_code.co_varnames + co_flags = func.func_code.co_flags + CO_FLAGS_ARGS = 4 + CO_FLAGS_KWARGS = 8 + + # Adjust argcount for possible *args and **kwargs arguments. + argcount = co_argcount + if co_flags & CO_FLAGS_ARGS: argcount += 1 + if co_flags & CO_FLAGS_KWARGS: argcount += 1 + + # Determine the usage string. + usage = "%s %s" % (self.name, cmdname) + if argcount <= 2: # handler ::= do_FOO(self, argv) + usage += " [ARGS...]" + elif argcount >= 3: # handler ::= do_FOO(self, subcmd, opts, ...) + argnames = list(co_varnames[3:argcount]) + tail = "" + if co_flags & CO_FLAGS_KWARGS: + name = argnames.pop(-1) + import warnings + # There is no generally accepted mechanism for passing + # keyword arguments from the command line. Could + # *perhaps* consider: arg=value arg2=value2 ... + warnings.warn("argument '**%s' on '%s.%s' command " + "handler will never get values" + % (name, self.__class__.__name__, + func.func_name)) + if co_flags & CO_FLAGS_ARGS: + name = argnames.pop(-1) + tail = "[%s...]" % name.upper() + while func_defaults: + func_defaults.pop(-1) + name = argnames.pop(-1) + tail = "[%s%s%s]" % (name.upper(), (tail and ' ' or ''), tail) + while argnames: + name = argnames.pop(-1) + tail = "%s %s" % (name.upper(), tail) + usage += ' ' + tail + + block_lines = [ + self.helpindent + "usage:", + self.helpindent + ' '*4 + usage + ] + block = '\n'.join(block_lines) + '\n\n' + + help = help.replace(indent+marker+suffix, block, 1) + return help + + #TODO: this only makes sense as part of the Cmdln class. + # Add hooks to add help preprocessing template vars and put + # this one on that class. + def _help_preprocess_cmd_option_list(self, help, cmdname=None): + marker = "${cmd_option_list}" + handler = self._get_cmd_handler(cmdname) + if not handler: + raise CmdlnError("cannot preprocess '%s' into help string: " + "could not find command handler for %r" + % (marker, cmdname)) + indent, indent_width = _get_indent(marker, help) + suffix = _get_trailing_whitespace(marker, help) + if hasattr(handler, "optparser"): + # Setup formatting options and format. + # - Indentation of 4 is better than optparse default of 2. + # C.f. Damian Conway's discussion of this in Perl Best + # Practices. + handler.optparser.formatter.indent_increment = 4 + handler.optparser.formatter.current_indent = indent_width + block = handler.optparser.format_option_help() + '\n' + else: + block = "" + + help = help.replace(indent+marker+suffix, block, 1) + return help + + def _get_canonical_cmd_name(self, token): + map = self._get_canonical_map() + return map.get(token, None) + + def _get_canonical_map(self): + """Return a mapping of available command names and aliases to + their canonical command name. + """ + cacheattr = "_token2canonical" + if not hasattr(self, cacheattr): + # Get the list of commands and their aliases, if any. + token2canonical = {} + cmd2funcname = {} # use a dict to strip duplicates + for attr in self.get_names(): + if attr.startswith("do_"): cmdname = attr[3:] + elif attr.startswith("_do_"): cmdname = attr[4:] + else: + continue + cmd2funcname[cmdname] = attr + token2canonical[cmdname] = cmdname + for cmdname, funcname in cmd2funcname.items(): # add aliases + func = getattr(self, funcname) + aliases = getattr(func, "aliases", []) + for alias in aliases: + if alias in cmd2funcname: + import warnings + warnings.warn("'%s' alias for '%s' command conflicts " + "with '%s' handler" + % (alias, cmdname, cmd2funcname[alias])) + continue + token2canonical[alias] = cmdname + setattr(self, cacheattr, token2canonical) + return getattr(self, cacheattr) + + def _get_cmd_handler(self, cmdname): + handler = None + try: + handler = getattr(self, 'do_' + cmdname) + except AttributeError: + try: + # Private command handlers begin with "_do_". + handler = getattr(self, '_do_' + cmdname) + except AttributeError: + pass + return handler + + def _do_EOF(self, argv): + # Default EOF handler + # Note: an actual EOF is redirected to this command. + #TODO: separate name for this. Currently it is available from + # command-line. Is that okay? + self.stdout.write('\n') + self.stdout.flush() + self.stop = True + + def emptyline(self): + # Different from cmd.Cmd: don't repeat the last command for an + # emptyline. + if self.cmdlooping: + pass + else: + return self.do_help(["help"]) + + +#---- optparse.py extension to fix (IMO) some deficiencies +# +# See the class _OptionParserEx docstring for details. +# + +class StopOptionProcessing(Exception): + """Indicate that option *and argument* processing should stop + cleanly. This is not an error condition. It is similar in spirit to + StopIteration. This is raised by _OptionParserEx's default "help" + and "version" option actions and can be raised by custom option + callbacks too. + + Hence the typical CmdlnOptionParser (a subclass of _OptionParserEx) + usage is: + + parser = CmdlnOptionParser(mycmd) + parser.add_option("-f", "--force", dest="force") + ... + try: + opts, args = parser.parse_args() + except StopOptionProcessing: + # normal termination, "--help" was probably given + sys.exit(0) + """ + +class _OptionParserEx(optparse.OptionParser): + """An optparse.OptionParser that uses exceptions instead of sys.exit. + + This class is an extension of optparse.OptionParser that differs + as follows: + - Correct (IMO) the default OptionParser error handling to never + sys.exit(). Instead OptParseError exceptions are passed through. + - Add the StopOptionProcessing exception (a la StopIteration) to + indicate normal termination of option processing. + See StopOptionProcessing's docstring for details. + + I'd also like to see the following in the core optparse.py, perhaps + as a RawOptionParser which would serve as a base class for the more + generally used OptionParser (that works as current): + - Remove the implicit addition of the -h|--help and --version + options. They can get in the way (e.g. if want '-?' and '-V' for + these as well) and it is not hard to do: + optparser.add_option("-h", "--help", action="help") + optparser.add_option("--version", action="version") + These are good practices, just not valid defaults if they can + get in the way. + """ + def error(self, msg): + raise optparse.OptParseError(msg) + + def exit(self, status=0, msg=None): + if status == 0: + raise StopOptionProcessing(msg) + else: + #TODO: don't lose status info here + raise optparse.OptParseError(msg) + + + +#---- optparse.py-based option processing support + +class CmdlnOptionParser(_OptionParserEx): + """An optparse.OptionParser class more appropriate for top-level + Cmdln options. For parsing of sub-command options, see + SubCmdOptionParser. + + Changes: + - disable_interspersed_args() by default, because a Cmdln instance + has sub-commands which may themselves have options. + - Redirect print_help() to the Cmdln.do_help() which is better + equiped to handle the "help" action. + - error() will raise a CmdlnUserError: OptionParse.error() is meant + to be called for user errors. Raising a well-known error here can + make error handling clearer. + - Also see the changes in _OptionParserEx. + """ + def __init__(self, cmdln, **kwargs): + self.cmdln = cmdln + kwargs["prog"] = self.cmdln.name + _OptionParserEx.__init__(self, **kwargs) + self.disable_interspersed_args() + + def print_help(self, file=None): + self.cmdln.onecmd(["help"]) + + def error(self, msg): + raise CmdlnUserError(msg) + + +class SubCmdOptionParser(_OptionParserEx): + def set_cmdln_info(self, cmdln, subcmd): + """Called by Cmdln to pass relevant info about itself needed + for print_help(). + """ + self.cmdln = cmdln + self.subcmd = subcmd + + def print_help(self, file=None): + self.cmdln.onecmd(["help", self.subcmd]) + + def error(self, msg): + raise CmdlnUserError(msg) + + +def option(*args, **kwargs): + """Decorator to add an option to the optparser argument of a Cmdln + subcommand. + + Example: + class MyShell(cmdln.Cmdln): + @cmdln.option("-f", "--force", help="force removal") + def do_remove(self, subcmd, opts, *args): + #... + """ + #XXX Is there a possible optimization for many options to not have a + # large stack depth here? + def decorate(f): + if not hasattr(f, "optparser"): + f.optparser = SubCmdOptionParser() + f.optparser.add_option(*args, **kwargs) + return f + return decorate + +def hide(*args): + """For obsolete calls, hide them in help listings. + + Example: + class MyShell(cmdln.Cmdln): + @cmdln.hide() + def do_shell(self, argv): + #...implement 'shell' command + """ + def decorate(f): + f.hidden = 1 + return f + return decorate + + +class Cmdln(RawCmdln): + """An improved (on cmd.Cmd) framework for building multi-subcommand + scripts (think "svn" & "cvs") and simple shells (think "pdb" and + "gdb"). + + A simple example: + + import cmdln + + class MySVN(cmdln.Cmdln): + name = "svn" + + @cmdln.aliases('stat', 'st') + @cmdln.option('-v', '--verbose', action='store_true' + help='print verbose information') + def do_status(self, subcmd, opts, *paths): + print "handle 'svn status' command" + + #... + + if __name__ == "__main__": + shell = MySVN() + retval = shell.main() + sys.exit(retval) + + 'Cmdln' extends 'RawCmdln' by providing optparse option processing + integration. See this class' _dispatch_cmd() docstring and + <http://trentm.com/projects/cmdln> for more information. + """ + def _dispatch_cmd(self, handler, argv): + """Introspect sub-command handler signature to determine how to + dispatch the command. The raw handler provided by the base + 'RawCmdln' class is still supported: + + def do_foo(self, argv): + # 'argv' is the vector of command line args, argv[0] is + # the command name itself (i.e. "foo" or an alias) + pass + + In addition, if the handler has more than 2 arguments option + processing is automatically done (using optparse): + + @cmdln.option('-v', '--verbose', action='store_true') + def do_bar(self, subcmd, opts, *args): + # subcmd = <"bar" or an alias> + # opts = <an optparse.Values instance> + if opts.verbose: + print "lots of debugging output..." + # args = <tuple of arguments> + for arg in args: + bar(arg) + + TODO: explain that "*args" can be other signatures as well. + + The `cmdln.option` decorator corresponds to an `add_option()` + method call on an `optparse.OptionParser` instance. + + You can declare a specific number of arguments: + + @cmdln.option('-v', '--verbose', action='store_true') + def do_bar2(self, subcmd, opts, bar_one, bar_two): + #... + + and an appropriate error message will be raised/printed if the + command is called with a different number of args. + """ + co_argcount = handler.im_func.func_code.co_argcount + if co_argcount == 2: # handler ::= do_foo(self, argv) + return handler(argv) + elif co_argcount >= 3: # handler ::= do_foo(self, subcmd, opts, ...) + try: + optparser = handler.optparser + except AttributeError: + optparser = handler.im_func.optparser = SubCmdOptionParser() + assert isinstance(optparser, SubCmdOptionParser) + optparser.set_cmdln_info(self, argv[0]) + try: + opts, args = optparser.parse_args(argv[1:]) + except StopOptionProcessing: + #TODO: this doesn't really fly for a replacement of + # optparse.py behaviour, does it? + return 0 # Normal command termination + + try: + return handler(argv[0], opts, *args) + except TypeError, ex: + # Some TypeError's are user errors: + # do_foo() takes at least 4 arguments (3 given) + # do_foo() takes at most 5 arguments (6 given) + # do_foo() takes exactly 5 arguments (6 given) + # Raise CmdlnUserError for these with a suitably + # massaged error message. + import sys + tb = sys.exc_info()[2] # the traceback object + if tb.tb_next is not None: + # If the traceback is more than one level deep, then the + # TypeError do *not* happen on the "handler(...)" call + # above. In that we don't want to handle it specially + # here: it would falsely mask deeper code errors. + raise + msg = ex.args[0] + match = _INCORRECT_NUM_ARGS_RE.search(msg) + if match: + msg = list(match.groups()) + msg[1] = int(msg[1]) - 3 + if msg[1] == 1: + msg[2] = msg[2].replace("arguments", "argument") + msg[3] = int(msg[3]) - 3 + msg = ''.join(map(str, msg)) + raise CmdlnUserError(msg) + else: + raise + else: + raise CmdlnError("incorrect argcount for %s(): takes %d, must " + "take 2 for 'argv' signature or 3+ for 'opts' " + "signature" % (handler.__name__, co_argcount)) + + + +#---- internal support functions + +def _format_linedata(linedata, indent, indent_width): + """Format specific linedata into a pleasant layout. + + "linedata" is a list of 2-tuples of the form: + (<item-display-string>, <item-docstring>) + "indent" is a string to use for one level of indentation + "indent_width" is a number of columns by which the + formatted data will be indented when printed. + + The <item-display-string> column is held to 15 columns. + """ + lines = [] + WIDTH = 78 - indent_width + SPACING = 3 + MAX_NAME_WIDTH = 15 + + NAME_WIDTH = min(max([len(s) for s,d in linedata]), MAX_NAME_WIDTH) + DOC_WIDTH = WIDTH - NAME_WIDTH - SPACING + for namestr, doc in linedata: + line = indent + namestr + if len(namestr) <= NAME_WIDTH: + line += ' ' * (NAME_WIDTH + SPACING - len(namestr)) + else: + lines.append(line) + line = indent + ' ' * (NAME_WIDTH + SPACING) + line += _summarize_doc(doc, DOC_WIDTH) + lines.append(line.rstrip()) + return lines + +def _summarize_doc(doc, length=60): + r"""Parse out a short one line summary from the given doclines. + + "doc" is the doc string to summarize. + "length" is the max length for the summary + + >>> _summarize_doc("this function does this") + 'this function does this' + >>> _summarize_doc("this function does this", 10) + 'this fu...' + >>> _summarize_doc("this function does this\nand that") + 'this function does this and that' + >>> _summarize_doc("this function does this\n\nand that") + 'this function does this' + """ + import re + if doc is None: + return "" + assert length > 3, "length <= 3 is absurdly short for a doc summary" + doclines = doc.strip().splitlines(0) + if not doclines: + return "" + + summlines = [] + for i, line in enumerate(doclines): + stripped = line.strip() + if not stripped: + break + summlines.append(stripped) + if len(''.join(summlines)) >= length: + break + + summary = ' '.join(summlines) + if len(summary) > length: + summary = summary[:length-3] + "..." + return summary + + +def line2argv(line): + r"""Parse the given line into an argument vector. + + "line" is the line of input to parse. + + This may get niggly when dealing with quoting and escaping. The + current state of this parsing may not be completely thorough/correct + in this respect. + + >>> from cmdln import line2argv + >>> line2argv("foo") + ['foo'] + >>> line2argv("foo bar") + ['foo', 'bar'] + >>> line2argv("foo bar ") + ['foo', 'bar'] + >>> line2argv(" foo bar") + ['foo', 'bar'] + + Quote handling: + + >>> line2argv("'foo bar'") + ['foo bar'] + >>> line2argv('"foo bar"') + ['foo bar'] + >>> line2argv(r'"foo\"bar"') + ['foo"bar'] + >>> line2argv("'foo bar' spam") + ['foo bar', 'spam'] + >>> line2argv("'foo 'bar spam") + ['foo bar', 'spam'] + >>> line2argv("'foo") + Traceback (most recent call last): + ... + ValueError: command line is not terminated: unfinished single-quoted segment + >>> line2argv('"foo') + Traceback (most recent call last): + ... + ValueError: command line is not terminated: unfinished double-quoted segment + >>> line2argv('some\tsimple\ttests') + ['some', 'simple', 'tests'] + >>> line2argv('a "more complex" test') + ['a', 'more complex', 'test'] + >>> line2argv('a more="complex test of " quotes') + ['a', 'more=complex test of ', 'quotes'] + >>> line2argv('a more" complex test of " quotes') + ['a', 'more complex test of ', 'quotes'] + >>> line2argv('an "embedded \\"quote\\""') + ['an', 'embedded "quote"'] + """ + import string + line = line.strip() + argv = [] + state = "default" + arg = None # the current argument being parsed + i = -1 + while 1: + i += 1 + if i >= len(line): break + ch = line[i] + + if ch == "\\": # escaped char always added to arg, regardless of state + if arg is None: arg = "" + i += 1 + arg += line[i] + continue + + if state == "single-quoted": + if ch == "'": + state = "default" + else: + arg += ch + elif state == "double-quoted": + if ch == '"': + state = "default" + else: + arg += ch + elif state == "default": + if ch == '"': + if arg is None: arg = "" + state = "double-quoted" + elif ch == "'": + if arg is None: arg = "" + state = "single-quoted" + elif ch in string.whitespace: + if arg is not None: + argv.append(arg) + arg = None + else: + if arg is None: arg = "" + arg += ch + if arg is not None: + argv.append(arg) + if state != "default": + raise ValueError("command line is not terminated: unfinished %s " + "segment" % state) + return argv + + +def argv2line(argv): + r"""Put together the given argument vector into a command line. + + "argv" is the argument vector to process. + + >>> from cmdln import argv2line + >>> argv2line(['foo']) + 'foo' + >>> argv2line(['foo', 'bar']) + 'foo bar' + >>> argv2line(['foo', 'bar baz']) + 'foo "bar baz"' + >>> argv2line(['foo"bar']) + 'foo"bar' + >>> print argv2line(['foo" bar']) + 'foo" bar' + >>> print argv2line(["foo' bar"]) + "foo' bar" + >>> argv2line(["foo'bar"]) + "foo'bar" + """ + escapedArgs = [] + for arg in argv: + if ' ' in arg and '"' not in arg: + arg = '"'+arg+'"' + elif ' ' in arg and "'" not in arg: + arg = "'"+arg+"'" + elif ' ' in arg: + arg = arg.replace('"', r'\"') + arg = '"'+arg+'"' + escapedArgs.append(arg) + return ' '.join(escapedArgs) + + +# Recipe: dedent (0.1) in /Users/trentm/tm/recipes/cookbook +def _dedentlines(lines, tabsize=8, skip_first_line=False): + """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines + + "lines" is a list of lines to dedent. + "tabsize" is the tab width to use for indent width calculations. + "skip_first_line" is a boolean indicating if the first line should + be skipped for calculating the indent width and for dedenting. + This is sometimes useful for docstrings and similar. + + Same as dedent() except operates on a sequence of lines. Note: the + lines list is modified **in-place**. + """ + DEBUG = False + if DEBUG: + print "dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\ + % (tabsize, skip_first_line) + indents = [] + margin = None + for i, line in enumerate(lines): + if i == 0 and skip_first_line: continue + indent = 0 + for ch in line: + if ch == ' ': + indent += 1 + elif ch == '\t': + indent += tabsize - (indent % tabsize) + elif ch in '\r\n': + continue # skip all-whitespace lines + else: + break + else: + continue # skip all-whitespace lines + if DEBUG: print "dedent: indent=%d: %r" % (indent, line) + if margin is None: + margin = indent + else: + margin = min(margin, indent) + if DEBUG: print "dedent: margin=%r" % margin + + if margin is not None and margin > 0: + for i, line in enumerate(lines): + if i == 0 and skip_first_line: continue + removed = 0 + for j, ch in enumerate(line): + if ch == ' ': + removed += 1 + elif ch == '\t': + removed += tabsize - (removed % tabsize) + elif ch in '\r\n': + if DEBUG: print "dedent: %r: EOL -> strip up to EOL" % line + lines[i] = lines[i][j:] + break + else: + raise ValueError("unexpected non-whitespace char %r in " + "line %r while removing %d-space margin" + % (ch, line, margin)) + if DEBUG: + print "dedent: %r: %r -> removed %d/%d"\ + % (line, ch, removed, margin) + if removed == margin: + lines[i] = lines[i][j+1:] + break + elif removed > margin: + lines[i] = ' '*(removed-margin) + lines[i][j+1:] + break + return lines + +def _dedent(text, tabsize=8, skip_first_line=False): + """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text + + "text" is the text to dedent. + "tabsize" is the tab width to use for indent width calculations. + "skip_first_line" is a boolean indicating if the first line should + be skipped for calculating the indent width and for dedenting. + This is sometimes useful for docstrings and similar. + + textwrap.dedent(s), but don't expand tabs to spaces + """ + lines = text.splitlines(1) + _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line) + return ''.join(lines) + + +def _get_indent(marker, s, tab_width=8): + """_get_indent(marker, s, tab_width=8) -> + (<indentation-of-'marker'>, <indentation-width>)""" + # Figure out how much the marker is indented. + INDENT_CHARS = tuple(' \t') + start = s.index(marker) + i = start + while i > 0: + if s[i-1] not in INDENT_CHARS: + break + i -= 1 + indent = s[i:start] + indent_width = 0 + for ch in indent: + if ch == ' ': + indent_width += 1 + elif ch == '\t': + indent_width += tab_width - (indent_width % tab_width) + return indent, indent_width + +def _get_trailing_whitespace(marker, s): + """Return the whitespace content trailing the given 'marker' in string 's', + up to and including a newline. + """ + suffix = '' + start = s.index(marker) + len(marker) + i = start + while i < len(s): + if s[i] in ' \t': + suffix += s[i] + elif s[i] in '\r\n': + suffix += s[i] + if s[i] == '\r' and i+1 < len(s) and s[i+1] == '\n': + suffix += s[i+1] + break + else: + break + i += 1 + return suffix + diff --git a/micng/utils/error.py b/micng/utils/error.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/micng/utils/error.py diff --git a/micng/utils/errors.py b/micng/utils/errors.py new file mode 100644 index 0000000..ba08563 --- /dev/null +++ b/micng/utils/errors.py @@ -0,0 +1,31 @@ +# +# errors.py : exception definitions +# +# Copyright 2007, Red Hat Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +class CreatorError(Exception): + """An exception base class for all imgcreate errors.""" + def __init__(self, msg): + Exception.__init__(self, msg) + +class KickstartError(CreatorError): + pass +class MountError(CreatorError): + pass +class SnapshotError(CreatorError): + pass +class SquashfsError(CreatorError): + pass diff --git a/micng/utils/fs_related.py b/micng/utils/fs_related.py new file mode 100644 index 0000000..ff2a07a --- /dev/null +++ b/micng/utils/fs_related.py @@ -0,0 +1,945 @@ +# +# fs.py : Filesystem related utilities and classes +# +# Copyright 2007, Red Hat Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + +import os +import sys +import errno +import stat +import subprocess +import random +import string +import logging +import time +import fcntl +import struct +import termios + +from errors import * +from urlgrabber.grabber import URLGrabber +from urlgrabber.grabber import URLGrabError + +def terminal_width(fd=1): + """ Get the real terminal width """ + try: + buf = 'abcdefgh' + buf = fcntl.ioctl(fd, termios.TIOCGWINSZ, buf) + return struct.unpack('hhhh', buf)[1] + except: # IOError + return 80 + +def truncate_url(url, width): + if len(url) > width: + return os.path.basename(url)[0:width] + return url + +class TextProgress(object): + def start(self, filename, url, *args, **kwargs): + self.url = url + self.termwidth = terminal_width() + sys.stdout.write("Retrieving %s " % truncate_url(self.url, self.termwidth - 17)) + sys.stdout.flush() + self.indicators = ["-", "\\", "|", "/"] + self.counter = 0 + def update(self, *args): + if sys.stdout.isatty(): + sys.stdout.write("\rRetrieving %s %s" % (truncate_url(self.url, self.termwidth - 17), self.indicators[self.counter%4])) + sys.stdout.flush() + self.counter += 1 + else: + pass + def end(self, *args): + if sys.stdout.isatty(): + sys.stdout.write("\rRetrieving %s ...OK\n" % (self.url,)) + else: + sys.stdout.write("...OK\n") + sys.stdout.flush() + +def find_binary_path(binary): + if os.environ.has_key("PATH"): + paths = os.environ["PATH"].split(":") + else: + paths = [] + if os.environ.has_key("HOME"): + paths += [os.environ["HOME"] + "/bin"] + paths += ["/usr/local/sbin", "/usr/local/bin", "/usr/sbin", "/usr/bin", "/sbin", "/bin"] + + for path in paths: + bin_path = "%s/%s" % (path, binary) + if os.path.exists(bin_path): + return bin_path + raise CreatorError("Command '%s' is not available." % binary) + +def makedirs(dirname): + """A version of os.makedirs() that doesn't throw an + exception if the leaf directory already exists. + """ + try: + os.makedirs(dirname) + except OSError, (err, msg): + if err != errno.EEXIST: + raise + +def mksquashfs(in_img, out_img): + fullpathmksquashfs = find_binary_path("mksquashfs") + args = [fullpathmksquashfs, in_img, out_img] + + if not sys.stdout.isatty(): + args.append("-no-progress") + + ret = subprocess.call(args, stdout=sys.stdout, stderr=sys.stderr) + if ret != 0: + raise SquashfsError("'%s' exited with error (%d)" % + (string.join(args, " "), ret)) + +def resize2fs(fs, size): + dev_null = os.open("/dev/null", os.O_WRONLY) + try: + resize2fs = find_binary_path("resize2fs") + return subprocess.call([resize2fs, fs, "%sK" % (size / 1024,)], + stdout = dev_null, stderr = dev_null) + finally: + os.close(dev_null) + +def my_fuser(file): + ret = False + fuser = find_binary_path("fuser") + if not os.path.exists(file): + return ret + dev_null = os.open("/dev/null", os.O_WRONLY) + rc = subprocess.call([fuser, "-s", file], stderr=dev_null) + if rc == 0: + fuser_proc = subprocess.Popen([fuser, file], stdout=subprocess.PIPE, stderr=dev_null) + pids = fuser_proc.communicate()[0].strip().split() + for pid in pids: + fd = open("/proc/%s/cmdline" % pid, "r") + cmdline = fd.read() + fd.close() + if cmdline[:-1] == "/bin/bash": + ret = True + break + os.close(dev_null) + return ret + +class BindChrootMount: + """Represents a bind mount of a directory into a chroot.""" + def __init__(self, src, chroot, dest = None, option = None): + self.src = src + self.root = os.path.abspath(os.path.expanduser(chroot)) + self.option = option + + if not dest: + dest = src + self.dest = self.root + "/" + dest + + self.mounted = False + self.mountcmd = find_binary_path("mount") + self.umountcmd = find_binary_path("umount") + + def ismounted(self): + ret = False + dev_null = os.open("/dev/null", os.O_WRONLY) + catcmd = find_binary_path("cat") + args = [ catcmd, "/proc/mounts" ] + proc_mounts = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=dev_null) + outputs = proc_mounts.communicate()[0].strip().split("\n") + for line in outputs: + if line.split()[1] == os.path.abspath(self.dest): + ret = True + break + os.close(dev_null) + return ret + + def has_chroot_instance(self): + lock = os.path.join(self.root, ".chroot.lock") + return my_fuser(lock) + + def mount(self): + if self.mounted or self.ismounted(): + return + + makedirs(self.dest) + rc = subprocess.call([self.mountcmd, "--bind", self.src, self.dest]) + if rc != 0: + raise MountError("Bind-mounting '%s' to '%s' failed" % + (self.src, self.dest)) + if self.option: + rc = subprocess.call([self.mountcmd, "-o", "remount,%s" % self.option, self.dest]) + if rc != 0: + raise MountError("Bind-remounting '%s' failed" % self.dest) + self.mounted = True + + def unmount(self): + if self.has_chroot_instance(): + return + + if self.ismounted(): + subprocess.call([self.umountcmd, "-l", self.dest]) + self.mounted = False + +class LoopbackMount: + """LoopbackMount compatibility layer for old API""" + def __init__(self, lofile, mountdir, fstype = None): + self.diskmount = DiskMount(LoopbackDisk(lofile,size = 0),mountdir,fstype,rmmountdir = True) + self.losetup = False + self.losetupcmd = find_binary_path("losetup") + + def cleanup(self): + self.diskmount.cleanup() + + def unmount(self): + self.diskmount.unmount() + + def lounsetup(self): + if self.losetup: + rc = subprocess.call([self.losetupcmd, "-d", self.loopdev]) + self.losetup = False + self.loopdev = None + + def loopsetup(self): + if self.losetup: + return + + losetupProc = subprocess.Popen([self.losetupcmd, "-f"], + stdout=subprocess.PIPE) + losetupOutput = losetupProc.communicate()[0] + + if losetupProc.returncode: + raise MountError("Failed to allocate loop device for '%s'" % + self.lofile) + + self.loopdev = losetupOutput.split()[0] + + rc = subprocess.call([self.losetupcmd, self.loopdev, self.lofile]) + if rc != 0: + raise MountError("Failed to allocate loop device for '%s'" % + self.lofile) + + self.losetup = True + + def mount(self): + self.diskmount.mount() + +class SparseLoopbackMount(LoopbackMount): + """SparseLoopbackMount compatibility layer for old API""" + def __init__(self, lofile, mountdir, size, fstype = None): + self.diskmount = DiskMount(SparseLoopbackDisk(lofile,size),mountdir,fstype,rmmountdir = True) + + def expand(self, create = False, size = None): + self.diskmount.disk.expand(create, size) + + def truncate(self, size = None): + self.diskmount.disk.truncate(size) + + def create(self): + self.diskmount.disk.create() + +class SparseExtLoopbackMount(SparseLoopbackMount): + """SparseExtLoopbackMount compatibility layer for old API""" + def __init__(self, lofile, mountdir, size, fstype, blocksize, fslabel): + self.diskmount = ExtDiskMount(SparseLoopbackDisk(lofile,size), mountdir, fstype, blocksize, fslabel, rmmountdir = True) + + + def __format_filesystem(self): + self.diskmount.__format_filesystem() + + def create(self): + self.diskmount.disk.create() + + def resize(self, size = None): + return self.diskmount.__resize_filesystem(size) + + def mount(self): + self.diskmount.mount() + + def __fsck(self): + self.extdiskmount.__fsck() + + def __get_size_from_filesystem(self): + return self.diskmount.__get_size_from_filesystem() + + def __resize_to_minimal(self): + return self.diskmount.__resize_to_minimal() + + def resparse(self, size = None): + return self.diskmount.resparse(size) + +class Disk: + """Generic base object for a disk + + The 'create' method must make the disk visible as a block device - eg + by calling losetup. For RawDisk, this is obviously a no-op. The 'cleanup' + method must undo the 'create' operation. + """ + def __init__(self, size, device = None): + self._device = device + self._size = size + + def create(self): + pass + + def cleanup(self): + pass + + def get_device(self): + return self._device + def set_device(self, path): + self._device = path + device = property(get_device, set_device) + + def get_size(self): + return self._size + size = property(get_size) + + +class RawDisk(Disk): + """A Disk backed by a block device. + Note that create() is a no-op. + """ + def __init__(self, size, device): + Disk.__init__(self, size, device) + + def fixed(self): + return True + + def exists(self): + return True + +class LoopbackDisk(Disk): + """A Disk backed by a file via the loop module.""" + def __init__(self, lofile, size): + Disk.__init__(self, size) + self.lofile = lofile + self.losetupcmd = find_binary_path("losetup") + + def fixed(self): + return False + + def exists(self): + return os.path.exists(self.lofile) + + def create(self): + if self.device is not None: + return + + losetupProc = subprocess.Popen([self.losetupcmd, "-f"], + stdout=subprocess.PIPE) + losetupOutput = losetupProc.communicate()[0] + + if losetupProc.returncode: + raise MountError("Failed to allocate loop device for '%s'" % + self.lofile) + + device = losetupOutput.split()[0] + + logging.debug("Losetup add %s mapping to %s" % (device, self.lofile)) + rc = subprocess.call([self.losetupcmd, device, self.lofile]) + if rc != 0: + raise MountError("Failed to allocate loop device for '%s'" % + self.lofile) + self.device = device + + def cleanup(self): + if self.device is None: + return + logging.debug("Losetup remove %s" % self.device) + rc = subprocess.call([self.losetupcmd, "-d", self.device]) + self.device = None + + + +class SparseLoopbackDisk(LoopbackDisk): + """A Disk backed by a sparse file via the loop module.""" + def __init__(self, lofile, size): + LoopbackDisk.__init__(self, lofile, size) + + def expand(self, create = False, size = None): + flags = os.O_WRONLY + if create: + flags |= os.O_CREAT + if not os.path.exists(self.lofile): + makedirs(os.path.dirname(self.lofile)) + + if size is None: + size = self.size + + logging.debug("Extending sparse file %s to %d" % (self.lofile, size)) + if create: + fd = os.open(self.lofile, flags, 0644) + else: + fd = os.open(self.lofile, flags) + + os.lseek(fd, size, os.SEEK_SET) + os.write(fd, '\x00') + os.close(fd) + + def truncate(self, size = None): + if size is None: + size = self.size + + logging.debug("Truncating sparse file %s to %d" % (self.lofile, size)) + fd = os.open(self.lofile, os.O_WRONLY) + os.ftruncate(fd, size) + os.close(fd) + + def create(self): + self.expand(create = True) + LoopbackDisk.create(self) + +class Mount: + """A generic base class to deal with mounting things.""" + def __init__(self, mountdir): + self.mountdir = mountdir + + def cleanup(self): + self.unmount() + + def mount(self, options = None): + pass + + def unmount(self): + pass + +class DiskMount(Mount): + """A Mount object that handles mounting of a Disk.""" + def __init__(self, disk, mountdir, fstype = None, rmmountdir = True): + Mount.__init__(self, mountdir) + + self.disk = disk + self.fstype = fstype + self.rmmountdir = rmmountdir + + self.mounted = False + self.rmdir = False + if fstype: + self.mkfscmd = find_binary_path("mkfs." + self.fstype) + else: + self.mkfscmd = None + self.mountcmd = find_binary_path("mount") + self.umountcmd = find_binary_path("umount") + + def cleanup(self): + Mount.cleanup(self) + self.disk.cleanup() + + def unmount(self): + if self.mounted: + logging.debug("Unmounting directory %s" % self.mountdir) + synccmd = find_binary_path("sync") + subprocess.call([synccmd]) # sync the data on this mount point + rc = subprocess.call([self.umountcmd, "-l", self.mountdir]) + if rc == 0: + self.mounted = False + else: + raise MountError("Failed to umount %s" % self.mountdir) + if self.rmdir and not self.mounted: + try: + os.rmdir(self.mountdir) + except OSError, e: + pass + self.rmdir = False + + + def __create(self): + self.disk.create() + + + def mount(self, options = None): + if self.mounted: + return + + if not os.path.isdir(self.mountdir): + logging.debug("Creating mount point %s" % self.mountdir) + os.makedirs(self.mountdir) + self.rmdir = self.rmmountdir + + self.__create() + + logging.debug("Mounting %s at %s" % (self.disk.device, self.mountdir)) + if options: + args = [ self.mountcmd, "-o", options, self.disk.device, self.mountdir ] + else: + args = [ self.mountcmd, self.disk.device, self.mountdir ] + if self.fstype: + args.extend(["-t", self.fstype]) + + rc = subprocess.call(args) + if rc != 0: + raise MountError("Failed to mount '%s' to '%s' with command '%s'. Retval: %s" % + (self.disk.device, self.mountdir, " ".join(args), rc)) + + self.mounted = True + +class ExtDiskMount(DiskMount): + """A DiskMount object that is able to format/resize ext[23] filesystems.""" + def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None): + DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir) + self.blocksize = blocksize + self.fslabel = fslabel.replace("/", "") + self.uuid = None + self.skipformat = skipformat + self.fsopts = fsopts + self.dumpe2fs = find_binary_path("dumpe2fs") + self.tune2fs = find_binary_path("tune2fs") + + def __parse_field(self, output, field): + for line in output.split("\n"): + if line.startswith(field + ":"): + return line[len(field) + 1:].strip() + + raise KeyError("Failed to find field '%s' in output" % field) + + def __format_filesystem(self): + if self.skipformat: + logging.debug("Skip filesystem format.") + return + logging.debug("Formating %s filesystem on %s" % (self.fstype, self.disk.device)) + rc = subprocess.call([self.mkfscmd, + "-F", "-L", self.fslabel, + "-m", "1", "-b", str(self.blocksize), + self.disk.device], stdout=sys.stdout, + stderr=sys.stderr) + # str(self.disk.size / self.blocksize)]) + if rc != 0: + raise MountError("Error creating %s filesystem on disk %s" % (self.fstype,self.disk.device)) + + dev_null = os.open("/dev/null", os.O_WRONLY) + try: + out = subprocess.Popen([self.dumpe2fs, '-h', self.disk.device], + stdout = subprocess.PIPE, + stderr = dev_null).communicate()[0] + finally: + os.close(dev_null) + + self.uuid = self.__parse_field(out, "Filesystem UUID") + logging.debug("Tuning filesystem on %s" % self.disk.device) + subprocess.call([self.tune2fs, "-c0", "-i0", "-Odir_index", + "-ouser_xattr,acl", self.disk.device], + stdout=sys.stdout, stderr=sys.stderr) + + def __resize_filesystem(self, size = None): + current_size = os.stat(self.disk.lofile)[stat.ST_SIZE] + + if size is None: + size = self.disk.size + + if size == current_size: + return + + if size > current_size: + self.disk.expand(size) + + self.__fsck() + + resize2fs(self.disk.lofile, size) + return size + + def __create(self): + resize = False + if not self.disk.fixed() and self.disk.exists(): + resize = True + + self.disk.create() + + if resize: + self.__resize_filesystem() + else: + self.__format_filesystem() + + def mount(self, options = None): + self.__create() + DiskMount.mount(self, options) + + def __fsck(self): + logging.debug("Checking filesystem %s" % self.disk.lofile) + subprocess.call(["/sbin/e2fsck", "-f", "-y", self.disk.lofile], stdout=sys.stdout, stderr=sys.stderr) + + def __get_size_from_filesystem(self): + dev_null = os.open("/dev/null", os.O_WRONLY) + try: + out = subprocess.Popen([self.dumpe2fs, '-h', self.disk.lofile], + stdout = subprocess.PIPE, + stderr = dev_null).communicate()[0] + finally: + os.close(dev_null) + + return int(self.__parse_field(out, "Block count")) * self.blocksize + + def __resize_to_minimal(self): + self.__fsck() + + # + # Use a binary search to find the minimal size + # we can resize the image to + # + bot = 0 + top = self.__get_size_from_filesystem() + while top != (bot + 1): + t = bot + ((top - bot) / 2) + + if not resize2fs(self.disk.lofile, t): + top = t + else: + bot = t + return top + + def resparse(self, size = None): + self.cleanup() + minsize = self.__resize_to_minimal() + self.disk.truncate(minsize) + self.__resize_filesystem(size) + return minsize + +class VfatDiskMount(DiskMount): + """A DiskMount object that is able to format vfat/msdos filesystems.""" + def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None): + DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir) + self.blocksize = blocksize + self.fslabel = fslabel.replace("/", "") + self.uuid = "%08X" % int(time.time()) + self.skipformat = skipformat + self.fsopts = fsopts + self.fsckcmd = find_binary_path("fsck." + self.fstype) + + def __format_filesystem(self): + if self.skipformat: + logging.debug("Skip filesystem format.") + return + logging.debug("Formating %s filesystem on %s" % (self.fstype, self.disk.device)) + blah = [self.mkfscmd, "-n", self.fslabel, "-i", self.uuid, self.disk.device] + rc = subprocess.call(blah) + if rc != 0: + raise MountError("Error creating %s filesystem on disk %s" % (self.fstype,self.disk.device)) + logging.debug("Tuning filesystem on %s" % self.disk.device) + + def __resize_filesystem(self, size = None): + current_size = os.stat(self.disk.lofile)[stat.ST_SIZE] + + if size is None: + size = self.disk.size + + if size == current_size: + return + + if size > current_size: + self.disk.expand(size) + + self.__fsck() + + #resize2fs(self.disk.lofile, size) + return size + + def __create(self): + resize = False + if not self.disk.fixed() and self.disk.exists(): + resize = True + + self.disk.create() + + if resize: + self.__resize_filesystem() + else: + self.__format_filesystem() + + def mount(self, options = None): + self.__create() + DiskMount.mount(self, options) + + def __fsck(self): + logging.debug("Checking filesystem %s" % self.disk.lofile) + subprocess.call([self.fsckcmd, "-y", self.disk.lofile]) + + def __get_size_from_filesystem(self): + return self.disk.size + + def __resize_to_minimal(self): + self.__fsck() + + # + # Use a binary search to find the minimal size + # we can resize the image to + # + bot = 0 + top = self.__get_size_from_filesystem() + return top + + def resparse(self, size = None): + self.cleanup() + minsize = self.__resize_to_minimal() + self.disk.truncate(minsize) + self.__resize_filesystem(size) + return minsize + +class BtrfsDiskMount(DiskMount): + """A DiskMount object that is able to format/resize btrfs filesystems.""" + def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None): + self.__check_btrfs() + DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir) + self.blocksize = blocksize + self.fslabel = fslabel.replace("/", "") + self.uuid = None + self.skipformat = skipformat + self.fsopts = fsopts + self.blkidcmd = find_binary_path("blkid") + self.btrfsckcmd = find_binary_path("btrfsck") + + def __check_btrfs(self): + found = False + """ Need to load btrfs module to mount it """ + load_module("btrfs") + for line in open("/proc/filesystems").xreadlines(): + if line.find("btrfs") > -1: + found = True + break + if not found: + raise MountError("Your system can't mount btrfs filesystem, please make sure your kernel has btrfs support and the module btrfs.ko has been loaded.") + + # disable selinux, selinux will block write + if os.path.exists("/usr/sbin/setenforce"): + subprocess.call(["/usr/sbin/setenforce", "0"]) + + def __parse_field(self, output, field): + for line in output.split(" "): + if line.startswith(field + "="): + return line[len(field) + 1:].strip().replace("\"", "") + + raise KeyError("Failed to find field '%s' in output" % field) + + def __format_filesystem(self): + if self.skipformat: + logging.debug("Skip filesystem format.") + return + logging.debug("Formating %s filesystem on %s" % (self.fstype, self.disk.device)) + rc = subprocess.call([self.mkfscmd, "-L", self.fslabel, self.disk.device]) + if rc != 0: + raise MountError("Error creating %s filesystem on disk %s" % (self.fstype,self.disk.device)) + + dev_null = os.open("/dev/null", os.O_WRONLY) + try: + out = subprocess.Popen([self.blkidcmd, self.disk.device], + stdout = subprocess.PIPE, + stderr = dev_null).communicate()[0] + finally: + os.close(dev_null) + + self.uuid = self.__parse_field(out, "UUID") + + def __resize_filesystem(self, size = None): + current_size = os.stat(self.disk.lofile)[stat.ST_SIZE] + + if size is None: + size = self.disk.size + + if size == current_size: + return + + if size > current_size: + self.disk.expand(size) + + self.__fsck() + return size + + def __create(self): + resize = False + if not self.disk.fixed() and self.disk.exists(): + resize = True + + self.disk.create() + + if resize: + self.__resize_filesystem() + else: + self.__format_filesystem() + + def mount(self, options = None): + self.__create() + DiskMount.mount(self, options) + + def __fsck(self): + logging.debug("Checking filesystem %s" % self.disk.lofile) + subprocess.call([self.btrfsckcmd, self.disk.lofile]) + + def __get_size_from_filesystem(self): + return self.disk.size + + def __resize_to_minimal(self): + self.__fsck() + + return self.__get_size_from_filesystem() + + def resparse(self, size = None): + self.cleanup() + minsize = self.__resize_to_minimal() + self.disk.truncate(minsize) + self.__resize_filesystem(size) + return minsize + +class DeviceMapperSnapshot(object): + def __init__(self, imgloop, cowloop): + self.imgloop = imgloop + self.cowloop = cowloop + + self.__created = False + self.__name = None + self.dmsetupcmd = find_binary_path("dmsetup") + + """Load dm_snapshot if it isn't loaded""" + load_module("dm_snapshot") + + def get_path(self): + if self.__name is None: + return None + return os.path.join("/dev/mapper", self.__name) + path = property(get_path) + + def create(self): + if self.__created: + return + + self.imgloop.create() + self.cowloop.create() + + self.__name = "imgcreate-%d-%d" % (os.getpid(), + random.randint(0, 2**16)) + + size = os.stat(self.imgloop.lofile)[stat.ST_SIZE] + + table = "0 %d snapshot %s %s p 8" % (size / 512, + self.imgloop.device, + self.cowloop.device) + + args = [self.dmsetupcmd, "create", self.__name, "--table", table] + if subprocess.call(args) != 0: + self.cowloop.cleanup() + self.imgloop.cleanup() + raise SnapshotError("Could not create snapshot device using: " + + string.join(args, " ")) + + self.__created = True + + def remove(self, ignore_errors = False): + if not self.__created: + return + + time.sleep(2) + rc = subprocess.call([self.dmsetupcmd, "remove", self.__name]) + if not ignore_errors and rc != 0: + raise SnapshotError("Could not remove snapshot device") + + self.__name = None + self.__created = False + + self.cowloop.cleanup() + self.imgloop.cleanup() + + def get_cow_used(self): + if not self.__created: + return 0 + + dev_null = os.open("/dev/null", os.O_WRONLY) + try: + out = subprocess.Popen([self.dmsetupcmd, "status", self.__name], + stdout = subprocess.PIPE, + stderr = dev_null).communicate()[0] + finally: + os.close(dev_null) + + # + # dmsetup status on a snapshot returns e.g. + # "0 8388608 snapshot 416/1048576" + # or, more generally: + # "A B snapshot C/D" + # where C is the number of 512 byte sectors in use + # + try: + return int((out.split()[3]).split('/')[0]) * 512 + except ValueError: + raise SnapshotError("Failed to parse dmsetup status: " + out) + +def create_image_minimizer(path, image, minimal_size): + """ + Builds a copy-on-write image which can be used to + create a device-mapper snapshot of an image where + the image's filesystem is as small as possible + + The steps taken are: + 1) Create a sparse COW + 2) Loopback mount the image and the COW + 3) Create a device-mapper snapshot of the image + using the COW + 4) Resize the filesystem to the minimal size + 5) Determine the amount of space used in the COW + 6) Restroy the device-mapper snapshot + 7) Truncate the COW, removing unused space + 8) Create a squashfs of the COW + """ + imgloop = LoopbackDisk(image, None) # Passing bogus size - doesn't matter + + cowloop = SparseLoopbackDisk(os.path.join(os.path.dirname(path), "osmin"), + 64L * 1024L * 1024L) + + snapshot = DeviceMapperSnapshot(imgloop, cowloop) + + try: + snapshot.create() + + resize2fs(snapshot.path, minimal_size) + + cow_used = snapshot.get_cow_used() + finally: + snapshot.remove(ignore_errors = (not sys.exc_info()[0] is None)) + + cowloop.truncate(cow_used) + + mksquashfs(cowloop.lofile, path) + + os.unlink(cowloop.lofile) + +def load_module(module): + found = False + for line in open('/proc/modules').xreadlines(): + if line.startswith("%s " % module): + found = True + break + if not found: + print "Loading %s..." % module + dev_null = os.open("/dev/null", os.O_WRONLY) + modprobecmd = find_binary_path("modprobe") + modprobe = subprocess.Popen([modprobecmd, module], + stdout=dev_null, stderr=dev_null) + os.waitpid(modprobe.pid, 0) + os.close(dev_null) + +def myurlgrab(url, filename, proxies): + g = URLGrabber() + if url.startswith("file:///"): + file = url.replace("file://", "") + if not os.path.exists(file): + raise CreatorError("URLGrabber error: can't find file %s" % file) + copycmd = find_binary_path("cp") + subprocess.call([copycmd, "-f", file, filename]) + else: + try: + filename = g.urlgrab(url = url, filename = filename, + ssl_verify_host = False, ssl_verify_peer = False, + proxies = proxies, http_headers = (('Pragma', 'no-cache'),)) + except URLGrabError, e: + raise CreatorError("URLGrabber error: %s: %s" % (e, url)) + except: + raise CreatorError("URLGrabber error: %s" % url) + return filename diff --git a/micng/utils/kickstart.py b/micng/utils/kickstart.py new file mode 100644 index 0000000..d6ebcd8 --- /dev/null +++ b/micng/utils/kickstart.py @@ -0,0 +1,815 @@ +# +# kickstart.py : Apply kickstart configuration to a system +# +# Copyright 2007, Red Hat Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +import os +import os.path +import shutil +import subprocess +import time +import logging +import string + +#import rhpl.keyboard + +import pykickstart.commands as kscommands +import pykickstart.constants as ksconstants +import pykickstart.errors as kserrors +import pykickstart.parser as ksparser +import pykickstart.version as ksversion +from pykickstart.handlers.control import commandMap +from pykickstart.handlers.control import dataMap + +import errors as errors +import fs_related as fs +import kscommands.desktop as desktop +import kscommands.moblinrepo as moblinrepo +import kscommands.micboot as micboot + +import sys +sys.path.append("~/0509/mic") +import misc as misc + +def read_kickstart(path): + """Parse a kickstart file and return a KickstartParser instance. + + This is a simple utility function which takes a path to a kickstart file, + parses it and returns a pykickstart KickstartParser instance which can + be then passed to an ImageCreator constructor. + + If an error occurs, a CreatorError exception is thrown. + + """ + #version = ksversion.makeVersion() + #ks = ksparser.KickstartParser(version) + + using_version = ksversion.DEVEL + commandMap[using_version]["desktop"] = desktop.Moblin_Desktop + commandMap[using_version]["repo"] = moblinrepo.Moblin_Repo + commandMap[using_version]["bootloader"] = micboot.Moblin_Bootloader + dataMap[using_version]["RepoData"] = moblinrepo.Moblin_RepoData + superclass = ksversion.returnClassForVersion(version=using_version) + + class KSHandlers(superclass): + def __init__(self, mapping={}): + superclass.__init__(self, mapping=commandMap[using_version]) + + ks = ksparser.KickstartParser(KSHandlers()) + + try: + ks.readKickstart(path) + except IOError, (err, msg): + raise errors.KickstartError("Failed to read kickstart file " + "'%s' : %s" % (path, msg)) + except kserrors.KickstartError, e: + raise errors.KickstartError("Failed to parse kickstart file " + "'%s' : %s" % (path, e)) + return ks + +def build_name(kscfg, prefix = None, suffix = None, maxlen = None): + """Construct and return an image name string. + + This is a utility function to help create sensible name and fslabel + strings. The name is constructed using the sans-prefix-and-extension + kickstart filename and the supplied prefix and suffix. + + If the name exceeds the maxlen length supplied, the prefix is first dropped + and then the kickstart filename portion is reduced until it fits. In other + words, the suffix takes precedence over the kickstart portion and the + kickstart portion takes precedence over the prefix. + + kscfg -- a path to a kickstart file + prefix -- a prefix to prepend to the name; defaults to None, which causes + no prefix to be used + suffix -- a suffix to append to the name; defaults to None, which causes + a YYYYMMDDHHMM suffix to be used + maxlen -- the maximum length for the returned string; defaults to None, + which means there is no restriction on the name length + + Note, if maxlen is less then the len(suffix), you get to keep both pieces. + + """ + name = os.path.basename(kscfg) + idx = name.rfind('.') + if idx >= 0: + name = name[:idx] + + if prefix is None: + prefix = "" + if suffix is None: + suffix = time.strftime("%Y%m%d%H%M") + + if name.startswith(prefix): + name = name[len(prefix):] + + ret = prefix + name + "-" + suffix + if not maxlen is None and len(ret) > maxlen: + ret = name[:maxlen - len(suffix) - 1] + "-" + suffix + + return ret + +class KickstartConfig(object): + """A base class for applying kickstart configurations to a system.""" + def __init__(self, instroot): + self.instroot = instroot + + def path(self, subpath): + return self.instroot + subpath + + def chroot(self): + os.chroot(self.instroot) + os.chdir("/") + + def call(self, args): + if not os.path.exists("%s/%s" %(self.instroot, args[0])): + print "%s/%s" %(self.instroot, args[0]) + raise errors.KickstartError("Unable to run %s!" %(args)) + subprocess.call(args, preexec_fn = self.chroot) + + def apply(self): + pass + +class LanguageConfig(KickstartConfig): + """A class to apply a kickstart language configuration to a system.""" + def apply(self, kslang): + lang = kslang.lang or "en_US.UTF-8" + + f = open(self.path("/etc/sysconfig/i18n"), "w+") + f.write("LANG=\"" + lang + "\"\n") + f.close() + +class KeyboardConfig(KickstartConfig): + """A class to apply a kickstart keyboard configuration to a system.""" + def apply(self, kskeyboard): + # + # FIXME: + # should this impact the X keyboard config too? + # or do we want to make X be able to do this mapping? + # + #k = rhpl.keyboard.Keyboard() + #if kskeyboard.keyboard: + # k.set(kskeyboard.keyboard) + #k.write(self.instroot) + pass + +class TimezoneConfig(KickstartConfig): + """A class to apply a kickstart timezone configuration to a system.""" + def apply(self, kstimezone): + tz = kstimezone.timezone or "America/New_York" + utc = str(kstimezone.isUtc) + + f = open(self.path("/etc/sysconfig/clock"), "w+") + f.write("ZONE=\"" + tz + "\"\n") + f.write("UTC=" + utc + "\n") + f.close() + try: + shutil.copyfile(self.path("/usr/share/zoneinfo/%s" %(tz,)), + self.path("/etc/localtime")) + except (IOError, OSError), (errno, msg): + raise errors.KickstartError("Error copying timezone info: %s" %(msg,)) + + +class AuthConfig(KickstartConfig): + """A class to apply a kickstart authconfig configuration to a system.""" + def apply(self, ksauthconfig): + auth = ksauthconfig.authconfig or "--useshadow --enablemd5" + args = ["/usr/share/authconfig/authconfig.py", "--update", "--nostart"] + self.call(args + auth.split()) + +class FirewallConfig(KickstartConfig): + """A class to apply a kickstart firewall configuration to a system.""" + def apply(self, ksfirewall): + # + # FIXME: should handle the rest of the options + # + if not os.path.exists(self.path("/usr/sbin/lokkit")): + return + if ksfirewall.enabled: + status = "--enabled" + else: + status = "--disabled" + + self.call(["/usr/sbin/lokkit", + "-f", "--quiet", "--nostart", status]) + +class RootPasswordConfig(KickstartConfig): + """A class to apply a kickstart root password configuration to a system.""" + def unset(self): + self.call(["/usr/bin/passwd", "-d", "root"]) + + def set_encrypted(self, password): + self.call(["/usr/sbin/usermod", "-p", password, "root"]) + + def set_unencrypted(self, password): + for p in ("/bin/echo", "/usr/sbin/chpasswd"): + if not os.path.exists("%s/%s" %(self.instroot, p)): + raise errors.KickstartError("Unable to set unencrypted password due to lack of %s" % p) + + p1 = subprocess.Popen(["/bin/echo", "root:%s" %password], + stdout = subprocess.PIPE, + preexec_fn = self.chroot) + p2 = subprocess.Popen(["/usr/sbin/chpasswd", "-m"], + stdin = p1.stdout, + stdout = subprocess.PIPE, + preexec_fn = self.chroot) + p2.communicate() + + def apply(self, ksrootpw): + if ksrootpw.isCrypted: + self.set_encrypted(ksrootpw.password) + elif ksrootpw.password != "": + self.set_unencrypted(ksrootpw.password) + else: + self.unset() + +class UserConfig(KickstartConfig): + def set_empty_passwd(self, user): + self.call(["/usr/bin/passwd", "-d", user]) + + def set_encrypted_passwd(self, user, password): + self.call(["/usr/sbin/usermod", "-p", "%s" % password, user]) + + def set_unencrypted_passwd(self, user, password): + for p in ("/bin/echo", "/usr/sbin/chpasswd"): + if not os.path.exists("%s/%s" %(self.instroot, p)): + raise errors.KickstartError("Unable to set unencrypted password due to lack of %s" % p) + + p1 = subprocess.Popen(["/bin/echo", "%s:%s" %(user, password)], + stdout = subprocess.PIPE, + preexec_fn = self.chroot) + p2 = subprocess.Popen(["/usr/sbin/chpasswd", "-m"], + stdin = p1.stdout, + stdout = subprocess.PIPE, + preexec_fn = self.chroot) + p2.communicate() + + def addUser(self, userconfig): + args = [ "/usr/sbin/useradd" ] + if userconfig.groups: + args += [ "--groups", string.join(userconfig.groups, ",") ] + if userconfig.name: + args.append(userconfig.name) + dev_null = os.open("/dev/null", os.O_WRONLY) + subprocess.call(args, + stdout = dev_null, + stderr = dev_null, + preexec_fn = self.chroot) + os.close(dev_null) + if userconfig.password not in (None, ""): + if userconfig.isCrypted: + self.set_encrypted_passwd(userconfig.name, userconfig.password) + else: + self.set_unencrypted_passwd(userconfig.name, userconfig.password) + else: + self.set_empty_passwd(userconfig.name) + else: + raise errors.KickstartError("Invalid kickstart command: %s" % userconfig.__str__()) + + def apply(self, user): + for userconfig in user.userList: + try: + self.addUser(userconfig) + except: + raise + +class ServicesConfig(KickstartConfig): + """A class to apply a kickstart services configuration to a system.""" + def apply(self, ksservices): + if not os.path.exists(self.path("/sbin/chkconfig")): + return + for s in ksservices.enabled: + self.call(["/sbin/chkconfig", s, "on"]) + for s in ksservices.disabled: + self.call(["/sbin/chkconfig", s, "off"]) + +class XConfig(KickstartConfig): + """A class to apply a kickstart X configuration to a system.""" + def apply(self, ksxconfig): + if ksxconfig.startX: + f = open(self.path("/etc/inittab"), "rw+") + buf = f.read() + buf = buf.replace("id:3:initdefault", "id:5:initdefault") + f.seek(0) + f.write(buf) + f.close() + if ksxconfig.defaultdesktop: + f = open(self.path("/etc/sysconfig/desktop"), "w") + f.write("DESKTOP="+ksxconfig.defaultdesktop+"\n") + f.close() + +class DesktopConfig(KickstartConfig): + """A class to apply a kickstart desktop configuration to a system.""" + def apply(self, ksdesktop): + if ksdesktop.defaultdesktop: + f = open(self.path("/etc/sysconfig/desktop"), "w") + f.write("DESKTOP="+ksdesktop.defaultdesktop+"\n") + f.close() + if os.path.exists(self.path("/etc/gdm/custom.conf")): + f = open(self.path("/etc/skel/.dmrc"), "w") + f.write("[Desktop]\n") + f.write("Session="+ksdesktop.defaultdesktop.lower()+"\n") + f.close() + if ksdesktop.session: + if os.path.exists(self.path("/etc/sysconfig/uxlaunch")): + f = open(self.path("/etc/sysconfig/uxlaunch"), "a+") + f.write("session="+ksdesktop.session.lower()+"\n") + f.close() + if ksdesktop.autologinuser: + f = open(self.path("/etc/sysconfig/desktop"), "a+") + f.write("AUTOLOGIN_USER=" + ksdesktop.autologinuser + "\n") + f.close() + if ksdesktop.session: + if os.path.exists(self.path("/etc/sysconfig/uxlaunch")): + f = open(self.path("/etc/sysconfig/uxlaunch"), "a+") + f.write("user="+ksdesktop.autologinuser+"\n") + f.close() + if os.path.exists(self.path("/etc/gdm/custom.conf")): + f = open(self.path("/etc/gdm/custom.conf"), "w") + f.write("[daemon]\n") + f.write("AutomaticLoginEnable=true\n") + f.write("AutomaticLogin=" + ksdesktop.autologinuser + "\n") + f.close() + +class MoblinRepoConfig(KickstartConfig): + """A class to apply a kickstart desktop configuration to a system.""" + def __create_repo_section(self, repo, type, fd): + baseurl = None + mirrorlist = None + reposuffix = {"base":"", "debuginfo":"-debuginfo", "source":"-source"} + reponame = repo.name + reposuffix[type] + if type == "base": + if repo.baseurl: + baseurl = repo.baseurl + if repo.mirrorlist: + mirrorlist = repo.mirrorlist + elif type == "debuginfo": + if repo.baseurl: + if repo.baseurl.endswith("/"): + baseurl = os.path.dirname(os.path.dirname(repo.baseurl)) + else: + baseurl = os.path.dirname(repo.baseurl) + baseurl += "/debug" + if repo.mirrorlist: + variant = repo.mirrorlist[repo.mirrorlist.find("$"):] + mirrorlist = repo.mirrorlist[0:repo.mirrorlist.find("$")] + mirrorlist += "debug" + "-" + variant + elif type == "source": + if repo.baseurl: + if repo.baseurl.endswith("/"): + baseurl = os.path.dirname(os.path.dirname(os.path.dirname(repo.baseurl))) + else: + baseurl = os.path.dirname(os.path.dirname(repo.baseurl)) + baseurl += "/source" + if repo.mirrorlist: + variant = repo.mirrorlist[repo.mirrorlist.find("$"):] + mirrorlist = repo.mirrorlist[0:repo.mirrorlist.find("$")] + mirrorlist += "source" + "-" + variant + + fd.write("[" + reponame + "]\n") + fd.write("name=" + reponame + "\n") + fd.write("failovermethod=priority\n") + if baseurl: + fd.write("baseurl=" + baseurl + "\n") + if mirrorlist: + fd.write("mirrorlist=" + mirrorlist + "\n") + """ Skip saving proxy settings """ + #if repo.proxy: + # fd.write("proxy=" + repo.proxy + "\n") + #if repo.proxy_username: + # fd.write("proxy_username=" + repo.proxy_username + "\n") + #if repo.proxy_password: + # fd.write("proxy_password=" + repo.proxy_password + "\n") + if repo.gpgkey: + fd.write("gpgkey=" + repo.gpgkey + "\n") + fd.write("gpgcheck=1\n") + else: + fd.write("gpgcheck=0\n") + if type == "source" or type == "debuginfo" or repo.disable: + fd.write("enabled=0\n") + else: + fd.write("enabled=1\n") + fd.write("\n") + + def __create_repo_file(self, repo, repodir): + if not os.path.exists(self.path(repodir)): + fs.makedirs(self.path(repodir)) + f = open(self.path(repodir + "/" + repo.name + ".repo"), "w") + self.__create_repo_section(repo, "base", f) + if repo.debuginfo: + self.__create_repo_section(repo, "debuginfo", f) + if repo.source: + self.__create_repo_section(repo, "source", f) + f.close() + + def apply(self, ksrepo, repodata): + for repo in ksrepo.repoList: + if repo.save: + #self.__create_repo_file(repo, "/etc/yum.repos.d") + self.__create_repo_file(repo, "/etc/zypp/repos.d") + """ Import repo gpg keys """ + if repodata: + dev_null = os.open("/dev/null", os.O_WRONLY) + for repo in repodata: + if repo['repokey']: + subprocess.call([fs.find_binary_path("rpm"), "--root=%s" % self.instroot, "--import", repo['repokey']], + stdout = dev_null, stderr = dev_null) + os.close(dev_null) + +class RPMMacroConfig(KickstartConfig): + """A class to apply the specified rpm macros to the filesystem""" + def apply(self, ks): + if not ks: + return + if not os.path.exists(self.path("/etc/rpm")): + os.mkdir(self.path("/etc/rpm")) + f = open(self.path("/etc/rpm/macros.imgcreate"), "w+") + if exclude_docs(ks): + f.write("%_excludedocs 1\n") + f.write("%__file_context_path %{nil}\n") + if inst_langs(ks) != None: + f.write("%_install_langs ") + f.write(inst_langs(ks)) + f.write("\n") + f.close() + +class NetworkConfig(KickstartConfig): + """A class to apply a kickstart network configuration to a system.""" + def write_ifcfg(self, network): + p = self.path("/etc/sysconfig/network-scripts/ifcfg-" + network.device) + + f = file(p, "w+") + os.chmod(p, 0644) + + f.write("DEVICE=%s\n" % network.device) + f.write("BOOTPROTO=%s\n" % network.bootProto) + + if network.bootProto.lower() == "static": + if network.ip: + f.write("IPADDR=%s\n" % network.ip) + if network.netmask: + f.write("NETMASK=%s\n" % network.netmask) + + if network.onboot: + f.write("ONBOOT=on\n") + else: + f.write("ONBOOT=off\n") + + if network.essid: + f.write("ESSID=%s\n" % network.essid) + + if network.ethtool: + if network.ethtool.find("autoneg") == -1: + network.ethtool = "autoneg off " + network.ethtool + f.write("ETHTOOL_OPTS=%s\n" % network.ethtool) + + if network.bootProto.lower() == "dhcp": + if network.hostname: + f.write("DHCP_HOSTNAME=%s\n" % network.hostname) + if network.dhcpclass: + f.write("DHCP_CLASSID=%s\n" % network.dhcpclass) + + if network.mtu: + f.write("MTU=%s\n" % network.mtu) + + f.close() + + def write_wepkey(self, network): + if not network.wepkey: + return + + p = self.path("/etc/sysconfig/network-scripts/keys-" + network.device) + f = file(p, "w+") + os.chmod(p, 0600) + f.write("KEY=%s\n" % network.wepkey) + f.close() + + def write_sysconfig(self, useipv6, hostname, gateway): + path = self.path("/etc/sysconfig/network") + f = file(path, "w+") + os.chmod(path, 0644) + + f.write("NETWORKING=yes\n") + + if useipv6: + f.write("NETWORKING_IPV6=yes\n") + else: + f.write("NETWORKING_IPV6=no\n") + + if hostname: + f.write("HOSTNAME=%s\n" % hostname) + else: + f.write("HOSTNAME=localhost.localdomain\n") + + if gateway: + f.write("GATEWAY=%s\n" % gateway) + + f.close() + + def write_hosts(self, hostname): + localline = "" + if hostname and hostname != "localhost.localdomain": + localline += hostname + " " + l = hostname.split(".") + if len(l) > 1: + localline += l[0] + " " + localline += "localhost.localdomain localhost" + + path = self.path("/etc/hosts") + f = file(path, "w+") + os.chmod(path, 0644) + f.write("127.0.0.1\t\t%s\n" % localline) + f.write("::1\t\tlocalhost6.localdomain6 localhost6\n") + f.close() + + def write_resolv(self, nodns, nameservers): + if nodns or not nameservers: + return + + path = self.path("/etc/resolv.conf") + f = file(path, "w+") + os.chmod(path, 0644) + + for ns in (nameservers): + if ns: + f.write("nameserver %s\n" % ns) + + f.close() + + def apply(self, ksnet): + fs.makedirs(self.path("/etc/sysconfig/network-scripts")) + + useipv6 = False + nodns = False + hostname = None + gateway = None + nameservers = None + + for network in ksnet.network: + if not network.device: + raise errors.KickstartError("No --device specified with " + "network kickstart command") + + if (network.onboot and network.bootProto.lower() != "dhcp" and + not (network.ip and network.netmask)): + raise errors.KickstartError("No IP address and/or netmask " + "specified with static " + "configuration for '%s'" % + network.device) + + self.write_ifcfg(network) + self.write_wepkey(network) + + if network.ipv6: + useipv6 = True + if network.nodns: + nodns = True + + if network.hostname: + hostname = network.hostname + if network.gateway: + gateway = network.gateway + + if network.nameserver: + nameservers = network.nameserver.split(",") + + self.write_sysconfig(useipv6, hostname, gateway) + self.write_hosts(hostname) + self.write_resolv(nodns, nameservers) + + +def get_image_size(ks, default = None): + __size = 0 + for p in ks.handler.partition.partitions: + if p.mountpoint == "/" and p.size: + __size = p.size + if __size > 0: + return int(__size) * 1024L * 1024L + else: + return default + +def get_image_fstype(ks, default = None): + for p in ks.handler.partition.partitions: + if p.mountpoint == "/" and p.fstype: + return p.fstype + return default + +def get_image_fsopts(ks, default = None): + for p in ks.handler.partition.partitions: + if p.mountpoint == "/" and p.fsopts: + return p.fstype + return default + +def get_modules(ks): + devices = [] + if isinstance(ks.handler.device, kscommands.device.FC3_Device): + devices.append(ks.handler.device) + else: + devices.extend(ks.handler.device.deviceList) + + modules = [] + for device in devices: + if not device.moduleName: + continue + modules.extend(device.moduleName.split(":")) + + return modules + +def get_timeout(ks, default = None): + if not hasattr(ks.handler.bootloader, "timeout"): + return default + if ks.handler.bootloader.timeout is None: + return default + return int(ks.handler.bootloader.timeout) + +def get_kernel_args(ks, default = "ro liveimg"): + if not hasattr(ks.handler.bootloader, "appendLine"): + return default + if ks.handler.bootloader.appendLine is None: + return default + return "%s %s" %(default, ks.handler.bootloader.appendLine) + +def get_menu_args(ks, default = "bootinstall"): + if not hasattr(ks.handler.bootloader, "menus"): + return default + if ks.handler.bootloader.menus is None: + return default + return "%s %s" %(default, ks.handler.bootloader.menus) + +def get_default_kernel(ks, default = None): + if not hasattr(ks.handler.bootloader, "default"): + return default + if not ks.handler.bootloader.default: + return default + return ks.handler.bootloader.default + +def get_repos(ks, repo_urls = {}): + repos = {} + for repo in ks.handler.repo.repoList: + inc = [] + if hasattr(repo, "includepkgs"): + inc.extend(repo.includepkgs) + + exc = [] + if hasattr(repo, "excludepkgs"): + exc.extend(repo.excludepkgs) + + baseurl = repo.baseurl + mirrorlist = repo.mirrorlist + + if repo.name in repo_urls: + baseurl = repo_urls[repo.name] + mirrorlist = None + + if repos.has_key(repo.name): + logging.warn("Overriding already specified repo %s" %(repo.name,)) + + proxy = None + if hasattr(repo, "proxy"): + proxy = repo.proxy + proxy_username = None + if hasattr(repo, "proxy_username"): + proxy_username = repo.proxy_username + proxy_password = None + if hasattr(repo, "proxy_password"): + proxy_password = repo.proxy_password + if hasattr(repo, "debuginfo"): + debuginfo = repo.debuginfo + if hasattr(repo, "source"): + source = repo.source + if hasattr(repo, "gpgkey"): + gpgkey = repo.gpgkey + if hasattr(repo, "disable"): + disable = repo.disable + + repos[repo.name] = (repo.name, baseurl, mirrorlist, inc, exc, proxy, proxy_username, proxy_password, debuginfo, source, gpgkey, disable) + + return repos.values() + +def convert_method_to_repo(ks): + try: + ks.handler.repo.methodToRepo() + except (AttributeError, kserrors.KickstartError): + pass + +def get_packages(ks, required = []): + return ks.handler.packages.packageList + required + +def get_groups(ks, required = []): + return ks.handler.packages.groupList + required + +def get_excluded(ks, required = []): + return ks.handler.packages.excludedList + required + +def get_partitions(ks, required = []): + return ks.handler.partition.partitions + +def ignore_missing(ks): + return ks.handler.packages.handleMissing == ksconstants.KS_MISSING_IGNORE + +def exclude_docs(ks): + return ks.handler.packages.excludeDocs + +def inst_langs(ks): + if hasattr(ks.handler.packages, "instLange"): + return ks.handler.packages.instLange + elif hasattr(ks.handler.packages, "instLangs"): + return ks.handler.packages.instLangs + return "" + +def get_post_scripts(ks): + scripts = [] + for s in ks.handler.scripts: + if s.type != ksparser.KS_SCRIPT_POST: + continue + scripts.append(s) + return scripts + +def add_repo(ks, repostr): + args = repostr.split() + repoobj = ks.handler.repo.parse(args[1:]) + if repoobj and repoobj not in ks.handler.repo.repoList: + ks.handler.repo.repoList.append(repoobj) + +def remove_all_repos(ks): + while len(ks.handler.repo.repoList) != 0: + del ks.handler.repo.repoList[0] + +def remove_duplicate_repos(ks): + i = 0 + j = i + 1 + while True: + if len(ks.handler.repo.repoList) < 2: + break + if i >= len(ks.handler.repo.repoList) - 1: + break + name = ks.handler.repo.repoList[i].name + baseurl = ks.handler.repo.repoList[i].baseurl + if j < len(ks.handler.repo.repoList): + if (ks.handler.repo.repoList[j].name == name or \ + ks.handler.repo.repoList[j].baseurl == baseurl): + del ks.handler.repo.repoList[j] + else: + j += 1 + if j >= len(ks.handler.repo.repoList): + i += 1 + j = i + 1 + else: + i += 1 + j = i + 1 + +def resolve_groups(creator, repometadata, use_comps = False): + pkgmgr = creator.pkgmgr.get_default_pkg_manager + iszypp = False + if creator.pkgmgr.managers.has_key("zypp") and creator.pkgmgr.managers['zypp'] == pkgmgr: + iszypp = True + ks = creator.ks + + for repo in repometadata: + """ Mustn't replace group with package list if repo is ready for the corresponding package manager """ + if iszypp and repo["patterns"] and not use_comps: + continue + if not iszypp and repo["comps"] and use_comps: + continue + + """ + But we also must handle such cases, use zypp but repo only has comps, + use yum but repo only has patterns, use zypp but use_comps is true, + use yum but use_comps is false. + """ + groupfile = None + if iszypp: + if (use_comps and repo["comps"]) or (not repo["patterns"] and repo["comps"]): + groupfile = repo["comps"] + get_pkglist_handler = misc.get_pkglist_in_comps + if not iszypp: + if (not use_comps and repo["patterns"]) or (not repo["comps"] and repo["patterns"]): + groupfile = repo["patterns"] + get_pkglist_handler = misc.get_pkglist_in_patterns + + if groupfile: + i = 0 + while True: + if i >= len(ks.handler.packages.groupList): + break + pkglist = get_pkglist_handler(ks.handler.packages.groupList[i].name, groupfile) + if pkglist: + del ks.handler.packages.groupList[i] + for pkg in pkglist: + if pkg not in ks.handler.packages.packageList: + ks.handler.packages.packageList.append(pkg) + else: + i = i + 1 diff --git a/micng/utils/kscommands/__init__.py b/micng/utils/kscommands/__init__.py new file mode 100644 index 0000000..7123ac1 --- /dev/null +++ b/micng/utils/kscommands/__init__.py @@ -0,0 +1,8 @@ +import desktop +import moblinrepo + +__all__ = ( + "Moblin_Desktop", + "Moblin_Repo", + "Moblin_RepoData", +) diff --git a/micng/utils/kscommands/desktop.py b/micng/utils/kscommands/desktop.py new file mode 100644 index 0000000..dfa5250 --- /dev/null +++ b/micng/utils/kscommands/desktop.py @@ -0,0 +1,78 @@ +#!/usr/bin/python -tt +# +# Yi Yang <yi.y.yang@intel.com> +# +# Copyright 2008, 2009, 2010 Intel, Inc. +# +# This copyrighted material is made available to anyone wishing to use, modify, +# copy, or redistribute it subject to the terms and conditions of the GNU +# General Public License v.2. This program is distributed in the hope that it +# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the +# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat +# trademarks that are incorporated in the source code or documentation are not +# subject to the GNU General Public License and may only be used or replicated +# with the express permission of Red Hat, Inc. +# +from pykickstart.base import * +from pykickstart.errors import * +from pykickstart.options import * + +class Moblin_Desktop(KickstartCommand): + def __init__(self, writePriority=0, defaultdesktop=None, defaultdm=None, autologinuser="meego", session="/usr/bin/mutter --sm-disable"): + KickstartCommand.__init__(self, writePriority) + + self.__new_version = False + self.op = self._getParser() + + self.defaultdesktop = defaultdesktop + self.autologinuser = autologinuser + self.defaultdm = defaultdm + self.session = session + + def __str__(self): + retval = "" + + if self.defaultdesktop != None: + retval += " --defaultdesktop=%s" % self.defaultdesktop + if self.session != None: + retval += " --session=\"%s\"" % self.session + if self.autologinuser != None: + retval += " --autologinuser=%s" % self.autologinuser + if self.defaultdm != None: + retval += " --defaultdm=%s" % self.defaultdm + + if retval != "": + retval = "# Default Desktop Settings\ndesktop %s\n" % retval + + return retval + + def _getParser(self): + try: + op = KSOptionParser(lineno=self.lineno) + except TypeError: + # the latest version has not lineno argument + op = KSOptionParser() + self.__new_version = True + + op.add_option("--defaultdesktop", dest="defaultdesktop", action="store", type="string", nargs=1) + op.add_option("--autologinuser", dest="autologinuser", action="store", type="string", nargs=1) + op.add_option("--defaultdm", dest="defaultdm", action="store", type="string", nargs=1) + op.add_option("--session", dest="session", action="store", type="string", nargs=1) + return op + + def parse(self, args): + if self.__new_version: + (opts, extra) = self.op.parse_args(args=args, lineno=self.lineno) + else: + (opts, extra) = self.op.parse_args(args=args) + + if extra: + mapping = {"command": "desktop", "options": extra} + raise KickstartValueError, formatErrorMsg(self.lineno, msg=_("Unexpected arguments to %(command)s command: %(options)s") % mapping) + + self._setToSelf(self.op, opts) diff --git a/micng/utils/kscommands/micboot.py b/micng/utils/kscommands/micboot.py new file mode 100644 index 0000000..9e26dae --- /dev/null +++ b/micng/utils/kscommands/micboot.py @@ -0,0 +1,45 @@ +#!/usr/bin/python -tt +# +# Anas Nashif +# +# Copyright 2008, 2009, 2010 Intel, Inc. +# +# This copyrighted material is made available to anyone wishing to use, modify, +# copy, or redistribute it subject to the terms and conditions of the GNU +# General Public License v.2. This program is distributed in the hope that it +# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the +# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat +# trademarks that are incorporated in the source code or documentation are not +# subject to the GNU General Public License and may only be used or replicated +# with the express permission of Red Hat, Inc. +# +from pykickstart.base import * +from pykickstart.errors import * +from pykickstart.options import * +from pykickstart.commands.bootloader import * +class Moblin_Bootloader(F8_Bootloader): + def __init__(self, writePriority=10, appendLine="", driveorder=None, + forceLBA=False, location="", md5pass="", password="", + upgrade=False, menus=""): + F8_Bootloader.__init__(self, writePriority, appendLine, driveorder, + forceLBA, location, md5pass, password, upgrade) + + self.menus = "" + + def _getArgsAsStr(self): + ret = F8_Bootloader._getArgsAsStr(self) + + if self.menus == "": + ret += " --menus=%s" %(self.menus,) + return ret + + def _getParser(self): + op = F8_Bootloader._getParser(self) + op.add_option("--menus", dest="menus") + return op + diff --git a/micng/utils/kscommands/moblinrepo.py b/micng/utils/kscommands/moblinrepo.py new file mode 100644 index 0000000..c022a0a --- /dev/null +++ b/micng/utils/kscommands/moblinrepo.py @@ -0,0 +1,97 @@ +#!/usr/bin/python -tt +# +# Yi Yang <yi.y.yang@intel.com> +# +# Copyright 2008, 2009, 2010 Intel, Inc. +# +# This copyrighted material is made available to anyone wishing to use, modify, +# copy, or redistribute it subject to the terms and conditions of the GNU +# General Public License v.2. This program is distributed in the hope that it +# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the +# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat +# trademarks that are incorporated in the source code or documentation are not +# subject to the GNU General Public License and may only be used or replicated +# with the express permission of Red Hat, Inc. +# +from pykickstart.base import * +from pykickstart.errors import * +from pykickstart.options import * +from pykickstart.commands.repo import * + +class Moblin_RepoData(F8_RepoData): + def __init__(self, baseurl="", mirrorlist="", name="", priority=None, + includepkgs=[], excludepkgs=[], save=False, proxy=None, + proxy_username=None, proxy_password=None, debuginfo=False, source=False, gpgkey=None, disable=False): + F8_RepoData.__init__(self, baseurl=baseurl, mirrorlist=mirrorlist, + name=name, includepkgs=includepkgs, + excludepkgs=excludepkgs) + self.save = save + self.proxy = proxy + self.proxy_username = proxy_username + self.proxy_password = proxy_password + self.debuginfo = debuginfo + self.disable = disable + self.source = source + self.gpgkey = gpgkey + + def _getArgsAsStr(self): + retval = F8_RepoData._getArgsAsStr(self) + + if self.save: + retval += " --save" + if self.proxy: + retval += " --proxy=%s" % self.proxy + if self.proxy_username: + retval += " --proxyuser=%s" % self.proxy_username + if self.proxy_password: + retval += " --proxypasswd=%s" % self.proxy_password + if self.debuginfo: + retval += " --debuginfo" + if self.source: + retval += " --source" + if self.gpgkey: + retval += " --gpgkey=%s" % self.gpgkey + if self.disable: + retval += " --disable" + + return retval + +class Moblin_Repo(F8_Repo): + def __init__(self, writePriority=0, repoList=None): + F8_Repo.__init__(self, writePriority, repoList) + + def __str__(self): + retval = "" + for repo in self.repoList: + retval += repo.__str__() + + return retval + + def _getParser(self): + def list_cb (option, opt_str, value, parser): + for d in value.split(','): + parser.values.ensure_value(option.dest, []).append(d) + + op = F8_Repo._getParser(self) + op.add_option("--save", action="store_true", dest="save", + default=False) + op.add_option("--proxy", type="string", action="store", dest="proxy", + default=None, nargs=1) + op.add_option("--proxyuser", type="string", action="store", dest="proxy_username", + default=None, nargs=1) + op.add_option("--proxypasswd", type="string", action="store", dest="proxy_password", + default=None, nargs=1) + op.add_option("--debuginfo", action="store_true", dest="debuginfo", + default=False) + op.add_option("--source", action="store_true", dest="source", + default=False) + op.add_option("--disable", action="store_true", dest="disable", + default=False) + op.add_option("--gpgkey", type="string", action="store", dest="gpgkey", + default=None, nargs=1) + return op diff --git a/micng/utils/logger.py b/micng/utils/logger.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/micng/utils/logger.py diff --git a/micng/utils/misc.py b/micng/utils/misc.py new file mode 100644 index 0000000..d36e5e6 --- /dev/null +++ b/micng/utils/misc.py @@ -0,0 +1,1146 @@ +# +# misc.py : miscellaneous utilities +# +# Copyright 2010, Intel Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + +import os +import sys +import subprocess +import logging +import tempfile +import re +import shutil +import glob +import xml.dom.minidom +import hashlib +import urlparse +import locale +import codecs + +try: + import sqlite3 as sqlite +except ImportError: + import sqlite +import _sqlitecache + +try: + from xml.etree import cElementTree +except ImportError: + import cElementTree +xmlparse = cElementTree.parse + +from errors import * +from fs_related import * + + +def setlocale(): + try: + locale.setlocale(locale.LC_ALL,'') + except locale.Error: + os.environ['LC_ALL'] = 'C' + locale.setlocale(locale.LC_ALL,'C') + sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout) + sys.stdout.errors = 'replace' + +def get_extension_name(path): + match = re.search("(?<=\.)\w+$", path) + if match: + return match.group(0) + else: + return None + +def get_image_type(path): + if os.path.isdir(path): + if ismeego(path): + return "fs" + return None + maptab = {"raw":"raw", "vmdk":"vmdk", "vdi":"vdi", "iso":"livecd", "usbimg":"liveusb"} + extension = get_extension_name(path) + if extension in ("raw", "vmdk", "vdi", "iso", "usbimg"): + return maptab[extension] + + fd = open(path, "rb") + file_header = fd.read(1024) + fd.close() + vdi_flag = "<<< Sun VirtualBox Disk Image >>>" + if file_header[0:len(vdi_flag)] == vdi_flag: + return maptab["vdi"] + + dev_null = os.open("/dev/null", os.O_WRONLY) + filecmd = find_binary_path("file") + args = [ filecmd, path ] + file = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=dev_null) + output = file.communicate()[0] + os.close(dev_null) + isoptn = re.compile(r".*ISO 9660 CD-ROM filesystem.*(bootable).*") + usbimgptn = re.compile(r".*x86 boot sector.*active.*") + rawptn = re.compile(r".*x86 boot sector.*") + vmdkptn = re.compile(r".*VMware. disk image.*") + ext3fsimgptn = re.compile(r".*Linux.*ext3 filesystem data.*") + if isoptn.match(output): + return maptab["iso"] + elif usbimgptn.match(output): + return maptab["usbimg"] + elif rawptn.match(output): + return maptab["raw"] + elif vmdkptn.match(output): + return maptab["vmdk"] + elif ext3fsimgptn.match(output): + return "ext3fsimg" + else: + return None + +def get_file_size(file): + """Return size in MB unit""" + du = find_binary_path("du") + dev_null = os.open("/dev/null", os.O_WRONLY) + duProc = subprocess.Popen([du, "-s", "-b", "-B", "1M", file], + stdout=subprocess.PIPE, stderr=dev_null) + duOutput = duProc.communicate()[0] + if duProc.returncode: + raise CreatorError("Failed to run %s" % du) + + size1 = int(duOutput.split()[0]) + duProc = subprocess.Popen([du, "-s", "-B", "1M", file], + stdout=subprocess.PIPE, stderr=dev_null) + duOutput = duProc.communicate()[0] + if duProc.returncode: + raise CreatorError("Failed to run %s" % du) + + size2 = int(duOutput.split()[0]) + os.close(dev_null) + if size1 > size2: + return size1 + else: + return size2 + +def get_filesystem_avail(fs): + vfstat = os.statvfs(fs) + return vfstat.f_bavail * vfstat.f_bsize + +def convert_image(srcimg, srcfmt, dstimg, dstfmt): + #convert disk format + if dstfmt != "raw": + raise CreatorError("Invalid destination image format: %s" % dstfmt) + logging.debug("converting %s image to %s" % (srcimg, dstimg)) + if srcfmt == "vmdk": + path = find_binary_path("qemu-img") + argv = [path, "convert", "-f", "vmdk", srcimg, "-O", dstfmt, dstimg] + elif srcfmt == "vdi": + path = find_binary_path("VBoxManage") + argv = [path, "internalcommands", "converttoraw", srcimg, dstimg] + else: + raise CreatorError("Invalid soure image format: %s" % srcfmt) + + rc = subprocess.call(argv) + if rc == 0: + logging.debug("convert successful") + if rc != 0: + raise CreatorError("Unable to convert disk to %s" % dstfmt) + +def myxcopytree(src, dst): + dev_null = os.open("/dev/null", os.O_WRONLY) + dirnames = os.listdir(src) + copycmd = find_binary_path("cp") + for dir in dirnames: + args = [ copycmd, "-af", src + "/" + dir, dst ] + subprocess.call(args, stdout=dev_null, stderr=dev_null) + os.close(dev_null) + ignores = ["dev/fd", "dev/stdin", "dev/stdout", "dev/stderr", "etc/mtab"] + for exclude in ignores: + if os.path.exists(dst + "/" + exclude): + os.unlink(dst + "/" + exclude) + +def uncompress_squashfs(squashfsimg, outdir): + """Uncompress file system from squshfs image""" + unsquashfs = find_binary_path("unsquashfs") + args = [ unsquashfs, "-d", outdir, squashfsimg ] + rc = subprocess.call(args) + if (rc != 0): + raise SquashfsError("Failed to uncompress %s." % squashfsimg) + +def mkdtemp(dir = "/var/tmp", prefix = "mic-tmp-"): + makedirs(dir) + return tempfile.mkdtemp(dir = dir, prefix = prefix) + +def ismeego(rootdir): + ret = False + if (os.path.exists(rootdir + "/etc/moblin-release") \ + or os.path.exists(rootdir + "/etc/meego-release")) \ + and os.path.exists(rootdir + "/etc/inittab") \ + and os.path.exists(rootdir + "/etc/rc.sysinit") \ + and glob.glob(rootdir + "/boot/vmlinuz-*"): + ret = True + + return ret + + +def is_meego_bootstrap(rootdir): + ret = False + if (os.path.exists(rootdir + "/etc/moblin-release") \ + or os.path.exists(rootdir + "/etc/meego-release")) \ + and os.path.exists(rootdir + "/usr/bin/python") \ + and os.path.exists(rootdir + "/usr/bin/mic-image-creator"): + ret = True + + return ret + + +_my_proxies = {} +_my_noproxy = None +_my_noproxy_list = [] + +def set_proxy_environ(): + global _my_noproxy, _my_proxies + if not _my_proxies: + return + for key in _my_proxies.keys(): + os.environ[key + "_proxy"] = _my_proxies[key] + if not _my_noproxy: + return + os.environ["no_proxy"] = _my_noproxy + +def unset_proxy_environ(): + if os.environ.has_key("http_proxy"): + del os.environ["http_proxy"] + if os.environ.has_key("https_proxy"): + del os.environ["https_proxy"] + if os.environ.has_key("ftp_proxy"): + del os.environ["ftp_proxy"] + if os.environ.has_key("all_proxy"): + del os.environ["all_proxy"] + if os.environ.has_key("no_proxy"): + del os.environ["no_proxy"] + if os.environ.has_key("HTTP_PROXY"): + del os.environ["HTTP_PROXY"] + if os.environ.has_key("HTTPS_PROXY"): + del os.environ["HTTPS_PROXY"] + if os.environ.has_key("FTP_PROXY"): + del os.environ["FTP_PROXY"] + if os.environ.has_key("ALL_PROXY"): + del os.environ["ALL_PROXY"] + if os.environ.has_key("NO_PROXY"): + del os.environ["NO_PROXY"] + +def _set_proxies(proxy = None, no_proxy = None): + """Return a dictionary of scheme -> proxy server URL mappings.""" + global _my_noproxy, _my_proxies + _my_proxies = {} + _my_noproxy = None + proxies = [] + if proxy: + proxies.append(("http_proxy", proxy)) + if no_proxy: + proxies.append(("no_proxy", no_proxy)) + + """Get proxy settings from environment variables if not provided""" + if not proxy and not no_proxy: + proxies = os.environ.items() + + """ Remove proxy env variables, urllib2 can't handle them correctly """ + unset_proxy_environ() + + for name, value in proxies: + name = name.lower() + if value and name[-6:] == '_proxy': + if name[0:2] != "no": + _my_proxies[name[:-6]] = value + else: + _my_noproxy = value + +def ip_to_int(ip): + ipint=0 + shift=24 + for dec in ip.split("."): + ipint |= int(dec) << shift + shift -= 8 + return ipint + +def int_to_ip(val): + ipaddr="" + shift=0 + for i in range(4): + dec = val >> shift + dec &= 0xff + ipaddr = ".%d%s" % (dec, ipaddr) + shift += 8 + return ipaddr[1:] + +def isip(host): + if host.replace(".", "").isdigit(): + return True + return False + +def set_noproxy_list(): + global _my_noproxy, _my_noproxy_list + _my_noproxy_list = [] + if not _my_noproxy: + return + for item in _my_noproxy.split(","): + item = item.strip() + if not item: + continue + if item[0] != '.' and item.find("/") == -1: + """ Need to match it """ + _my_noproxy_list.append({"match":0,"needle":item}) + elif item[0] == '.': + """ Need to match at tail """ + _my_noproxy_list.append({"match":1,"needle":item}) + elif item.find("/") > 3: + """ IP/MASK, need to match at head """ + needle = item[0:item.find("/")].strip() + ip = ip_to_int(needle) + netmask = 0 + mask = item[item.find("/")+1:].strip() + + if mask.isdigit(): + netmask = int(mask) + netmask = ~((1<<(32-netmask)) - 1) + ip &= netmask + else: + shift=24 + netmask=0 + for dec in mask.split("."): + netmask |= int(dec) << shift + shift -= 8 + ip &= netmask + _my_noproxy_list.append({"match":2,"needle":ip,"netmask":netmask}) + +def isnoproxy(url): + (scheme, host, path, parm, query, frag) = urlparse.urlparse(url) + if '@' in host: + user_pass, host = host.split('@', 1) + if ':' in host: + host, port = host.split(':', 1) + hostisip = isip(host) + for item in _my_noproxy_list: + if hostisip and item["match"] <= 1: + continue + if item["match"] == 2 and hostisip: + if (ip_to_int(host) & item["netmask"]) == item["needle"]: + return True + if item["match"] == 0: + if host == item["needle"]: + return True + if item["match"] == 1: + if host.rfind(item["needle"]) > 0: + return True + return False + +def set_proxies(proxy = None, no_proxy = None): + _set_proxies(proxy, no_proxy) + set_noproxy_list() + +def get_proxy(url): + if url[0:4] == "file" or isnoproxy(url): + return None + type = url[0:url.index(":")] + proxy = None + if _my_proxies.has_key(type): + proxy = _my_proxies[type] + elif _my_proxies.has_key("http"): + proxy = _my_proxies["http"] + else: + proxy = None + return proxy + +def remap_repostr(repostr, siteconf): + items = repostr.split(",") + name = None + baseurl = None + for item in items: + subitems = item.split(":") + if subitems[0] == "name": + name = subitems[1] + if subitems[0] == "baseurl": + baseurl = item[8:] + if not baseurl: + baseurl = repostr + + for section in siteconf._sections: + if section != "main": + if not siteconf.has_option(section, "enabled") or siteconf.get(section, "enabled") == "0": + continue + if siteconf.has_option(section, "equalto"): + equalto = siteconf.get(section, "equalto") + if (name and equalto == name) or (baseurl and equalto == baseurl): + remap_baseurl = siteconf.get(section, "baseurl") + repostr = repostr.replace(baseurl, remap_baseurl) + return repostr + + return repostr + + +def get_temp_reponame(baseurl): + md5obj = hashlib.md5(baseurl) + tmpreponame = "%s" % md5obj.hexdigest() + return tmpreponame + +def get_repostr(repo, siteconf = None): + if siteconf: + repo = remap_repostr(repo, siteconf) + keys = ("baseurl", "mirrorlist", "name", "cost", "includepkgs", "excludepkgs", "proxy", "save", "proxyuser", "proxypasswd", "debuginfo", "source", "gpgkey") + repostr = "repo" + items = repo.split(",") + if len(items) == 1: + subitems = items[0].split(":") + if len(subitems) == 1: + url = subitems[0] + repostr += " --baseurl=%s" % url + elif subitems[0] == "baseurl": + url = items[0][8:] + repostr += " --baseurl=%s" % url + elif subitems[0] in ("http", "ftp", "https", "ftps", "file"): + url = items[0] + repostr += " --baseurl=%s" % url + else: + raise ValueError("Invalid repo string") + if url.find("://") == -1 \ + or url[0:url.index("://")] not in ("http", "ftp", "https", "ftps", "file") \ + or url.find("/", url.index("://")+3) == -1: + raise ValueError("Invalid repo string") + else: + if repo.find("baseurl:") == -1 and repo.find("mirrorlist:") == -1: + raise ValueError("Invalid repo string") + url = None + for item in items: + if not item: + continue + subitems = item.split(":") + if subitems[0] in keys: + if subitems[0] in ("baseurl", "mirrorlist"): + url = item[len(subitems[0])+1:] + if subitems[0] in ("save", "debuginfo", "source"): + repostr += " --%s" % subitems[0] + elif subitems[0] in ("includepkgs", "excludepkgs"): + repostr += " --%s=%s" % (subitems[0], item[len(subitems[0])+1:].replace(";", ",")) + else: + repostr += " --%s=%s" % (subitems[0], item[len(subitems[0])+1:]) + else: + raise ValueError("Invalid repo string") + if url.find("://") != -1 \ + and url[0:url.index("://")] in ("http", "ftp", "https", "ftps", "file") \ + and url.find("/", url.index("://")+3) != -1: + if repostr.find("--proxy=") == -1: + proxy = get_proxy(url) + if proxy: + repostr += " --proxy=%s" % proxy + else: + raise ValueError("Invalid repo string") + + if repostr.find("--name=") == -1: + repostr += " --name=%s" % get_temp_reponame(url) + + return repostr + +DEFAULT_SITECONF_GLOBAL="/etc/mic2/mic2.conf" +DEFAULT_SITECONF_USER="~/.mic2.conf" + +def read_siteconf(siteconf = None): + from ConfigParser import SafeConfigParser + + my_siteconf_parser = SafeConfigParser() + if not siteconf: + global_siteconf = DEFAULT_SITECONF_GLOBAL + if os.path.isfile(global_siteconf): + my_siteconf_parser.read(global_siteconf) + + local_siteconf = os.path.expanduser(DEFAULT_SITECONF_USER) + if os.path.isfile(local_siteconf): + my_siteconf_parser.read(local_siteconf) + else: + my_siteconf_parser.read(siteconf) + + if not my_siteconf_parser.sections(): + return None + else: + return my_siteconf_parser + +def output_siteconf(siteconf): + output = "" + if not siteconf: + return output + + for section in siteconf.sections(): + output += "[%s]\n" % section + for option in siteconf.options(section): + output += "%s=%s\n" % (option, siteconf.get(section, option)) + output += "\n\n" + + print output + return output + +def get_repostrs_from_ks(ks): + kickstart_repos = [] + for repodata in ks.handler.repo.repoList: + repostr = "" + if hasattr(repodata, "name") and repodata.name: + repostr += ",name:" + repodata.name + if hasattr(repodata, "baseurl") and repodata.baseurl: + repostr += ",baseurl:" + repodata.baseurl + if hasattr(repodata, "mirrorlist") and repodata.mirrorlist: + repostr += ",mirrorlist:" + repodata.mirrorlist + if hasattr(repodata, "includepkgs") and repodata.includepkgs: + repostr += ",includepkgs:" + ";".join(repodata.includepkgs) + if hasattr(repodata, "excludepkgs") and repodata.excludepkgs: + repostr += ",excludepkgs:" + ";".join(repodata.excludepkgs) + if hasattr(repodata, "cost") and repodata.cost: + repostr += ",cost:%d" % repodata.cost + if hasattr(repodata, "save") and repodata.save: + repostr += ",save:" + if hasattr(repodata, "proxy") and repodata.proxy: + repostr += ",proxy:" + repodata.proxy + if hasattr(repodata, "proxyuser") and repodata.proxy_username: + repostr += ",proxyuser:" + repodata.proxy_username + if hasattr(repodata, "proxypasswd") and repodata.proxy_password: + repostr += ",proxypasswd:" + repodata.proxy_password + if repostr.find("name:") == -1: + repostr = ",name:%s" % get_temp_reponame(repodata.baseurl) + if hasattr(repodata, "debuginfo") and repodata.debuginfo: + repostr += ",debuginfo:" + if hasattr(repodata, "source") and repodata.source: + repostr += ",source:" + if hasattr(repodata, "gpgkey") and repodata.gpgkey: + repostr += ",gpgkey:" + repodata.gpgkey + kickstart_repos.append(repostr[1:]) + return kickstart_repos + +def get_repostrs_from_siteconf(siteconf): + site_repos = [] + if not siteconf: + return site_repos + + for section in siteconf._sections: + if section != "main": + repostr = "" + if siteconf.has_option(section, "enabled") \ + and siteconf.get(section, "enabled") == "1" \ + and (not siteconf.has_option(section, "equalto") or not siteconf.get(section, "equalto")): + if siteconf.has_option(section, "name") and siteconf.get(section, "name"): + repostr += ",name:%s" % siteconf.get(section, "name") + if siteconf.has_option(section, "baseurl") and siteconf.get(section, "baseurl"): + repostr += ",baseurl:%s" % siteconf.get(section, "baseurl") + if siteconf.has_option(section, "mirrorlist") and siteconf.get(section, "mirrorlist"): + repostr += ",mirrorlist:%s" % siteconf.get(section, "mirrorlist") + if siteconf.has_option(section, "includepkgs") and siteconf.get(section, "includepkgs"): + repostr += ",includepkgs:%s" % siteconf.get(section, "includepkgs").replace(",", ";") + if siteconf.has_option(section, "excludepkgs") and siteconf.get(section, "excludepkgs"): + repostr += ",excludepkgs:%s" % siteconf.get(section, "excludepkgs").replace(",", ";") + if siteconf.has_option(section, "cost") and siteconf.get(section, "cost"): + repostr += ",cost:%s" % siteconf.get(section, "cost") + if siteconf.has_option(section, "save") and siteconf.get(section, "save"): + repostr += ",save:" + if siteconf.has_option(section, "proxy") and siteconf.get(section, "proxy"): + repostr += ",proxy:%s" % siteconf.get(section, "proxy") + if siteconf.has_option(section, "proxy_username") and siteconf.get(section, "proxy_username"): + repostr += ",proxyuser:%s" % siteconf.get(section, "proxy_username") + if siteconf.has_option(section, "proxy_password") and siteconf.get(section, "proxy_password"): + repostr += ",proxypasswd:%s" % siteconf.get(section, "proxy_password") + if repostr != "": + if repostr.find("name:") == -1: + repostr = ",name:%s" % get_temp_reponame() + site_repos.append(repostr[1:]) + return site_repos + +def get_uncompressed_data_from_url(url, filename, proxies): + filename = myurlgrab(url, filename, proxies) + suffix = None + if filename.endswith(".gz"): + suffix = ".gz" + gunzip = find_binary_path('gunzip') + subprocess.call([gunzip, "-f", filename]) + elif filename.endswith(".bz2"): + suffix = ".bz2" + bunzip2 = find_binary_path('bunzip2') + subprocess.call([bunzip2, "-f", filename]) + if suffix: + filename = filename.replace(suffix, "") + return filename + +def get_metadata_from_repo(baseurl, proxies, cachedir, reponame, filename): + url = str(baseurl + "/" + filename) + filename_tmp = str("%s/%s/%s" % (cachedir, reponame, os.path.basename(filename))) + return get_uncompressed_data_from_url(url,filename_tmp,proxies) + +def get_metadata_from_repos(repostrs, cachedir): + if not cachedir: + CreatorError("No cache dir defined.") + + my_repo_metadata = [] + for repostr in repostrs: + reponame = None + baseurl = None + proxy = None + items = repostr.split(",") + for item in items: + subitems = item.split(":") + if subitems[0] == "name": + reponame = subitems[1] + if subitems[0] == "baseurl": + baseurl = item[8:] + if subitems[0] == "proxy": + proxy = item[6:] + if subitems[0] in ("http", "https", "ftp", "ftps", "file"): + baseurl = item + if not proxy: + proxy = get_proxy(baseurl) + proxies = None + if proxy: + proxies = {str(proxy.split(":")[0]):str(proxy)} + makedirs(cachedir + "/" + reponame) + url = str(baseurl + "/repodata/repomd.xml") + filename = str("%s/%s/repomd.xml" % (cachedir, reponame)) + repomd = myurlgrab(url, filename, proxies) + try: + root = xmlparse(repomd) + except SyntaxError: + raise CreatorError("repomd.xml syntax error.") + + ns = root.getroot().tag + ns = ns[0:ns.rindex("}")+1] + + patterns = None + for elm in root.getiterator("%sdata" % ns): + if elm.attrib["type"] == "patterns": + patterns = elm.find("%slocation" % ns).attrib['href'] + break + + comps = None + for elm in root.getiterator("%sdata" % ns): + if elm.attrib["type"] == "group_gz": + comps = elm.find("%slocation" % ns).attrib['href'] + break + if not comps: + for elm in root.getiterator("%sdata" % ns): + if elm.attrib["type"] == "group": + comps = elm.find("%slocation" % ns).attrib['href'] + break + + primary_type = None + for elm in root.getiterator("%sdata" % ns): + if elm.attrib["type"] == "primary_db": + primary_type=".sqlite" + break + + if not primary_type: + for elm in root.getiterator("%sdata" % ns): + if elm.attrib["type"] == "primary": + primary_type=".xml" + break + + if not primary_type: + continue + + primary = elm.find("%slocation" % ns).attrib['href'] + primary = get_metadata_from_repo(baseurl, proxies, cachedir, reponame, primary) + + if patterns: + patterns = get_metadata_from_repo(baseurl, proxies, cachedir, reponame, patterns) + + if comps: + comps = get_metadata_from_repo(baseurl, proxies, cachedir, reponame, comps) + + """ Get repo key """ + try: + repokey = get_metadata_from_repo(baseurl, proxies, cachedir, reponame, "repodata/repomd.xml.key") + except CreatorError: + repokey = None + print "Warning: can't get %s/%s" % (baseurl, "repodata/repomd.xml.key") + + my_repo_metadata.append({"name":reponame, "baseurl":baseurl, "repomd":repomd, "primary":primary, "cachedir":cachedir, "proxies":proxies, "patterns":patterns, "comps":comps, "repokey":repokey}) + return my_repo_metadata + +def get_arch(repometadata): + archlist = [] + for repo in repometadata: + if repo["primary"].endswith(".xml"): + root = xmlparse(repo["primary"]) + ns = root.getroot().tag + ns = ns[0:ns.rindex("}")+1] + for elm in root.getiterator("%spackage" % ns): + if elm.find("%sarch" % ns).text not in ("noarch", "src"): + arch = elm.find("%sarch" % ns).text + if arch not in archlist: + archlist.append(arch) + elif repo["primary"].endswith(".sqlite"): + con = sqlite.connect(repo["primary"]) + for row in con.execute("select arch from packages where arch not in (\"src\", \"noarch\")"): + if row[0] not in archlist: + archlist.append(row[0]) + + con.close() + return archlist + + +def get_package(pkg, repometadata, arch = None): + ver = "" + target_repo = None + for repo in repometadata: + if repo["primary"].endswith(".xml"): + root = xmlparse(repo["primary"]) + ns = root.getroot().tag + ns = ns[0:ns.rindex("}")+1] + for elm in root.getiterator("%spackage" % ns): + if elm.find("%sname" % ns).text == pkg: + if elm.find("%sarch" % ns).text != "src": + version = elm.find("%sversion" % ns) + tmpver = "%s-%s" % (version.attrib['ver'], version.attrib['rel']) + if tmpver > ver: + ver = tmpver + location = elm.find("%slocation" % ns) + pkgpath = "%s" % location.attrib['href'] + target_repo = repo + break + if repo["primary"].endswith(".sqlite"): + con = sqlite.connect(repo["primary"]) + if not arch: + for row in con.execute("select version, release,location_href from packages where name = \"%s\" and arch != \"src\"" % pkg): + tmpver = "%s-%s" % (row[0], row[1]) + if tmpver > ver: + pkgpath = "%s" % row[2] + target_repo = repo + break + else: + for row in con.execute("select version, release,location_href from packages where name = \"%s\"" % pkg): + tmpver = "%s-%s" % (row[0], row[1]) + if tmpver > ver: + pkgpath = "%s" % row[2] + target_repo = repo + break + con.close() + if target_repo: + makedirs("%s/%s/packages" % (target_repo["cachedir"], target_repo["name"])) + url = str(target_repo["baseurl"] + "/" + pkgpath) + filename = str("%s/%s/packages/%s" % (target_repo["cachedir"], target_repo["name"], os.path.basename(pkgpath))) + pkg = myurlgrab(url, filename, target_repo["proxies"]) + return pkg + else: + return None + +def get_source_name(pkg, repometadata): + + def get_bin_name(pkg): + m = re.match("(.*)-(.*)-(.*)\.(.*)\.rpm", pkg) + if m: + return m.group(1) + return None + + def get_src_name(srpm): + m = re.match("(.*)-(\d+.*)-(\d+\.\d+).src.rpm", srpm) + if m: + return m.group(1) + return None + + ver = "" + target_repo = None + + pkg_name = get_bin_name(pkg) + if not pkg_name: + return None + + for repo in repometadata: + if repo["primary"].endswith(".xml"): + root = xmlparse(repo["primary"]) + ns = root.getroot().tag + ns = ns[0:ns.rindex("}")+1] + for elm in root.getiterator("%spackage" % ns): + if elm.find("%sname" % ns).text == pkg_name: + if elm.find("%sarch" % ns).text != "src": + version = elm.find("%sversion" % ns) + tmpver = "%s-%s" % (version.attrib['ver'], version.attrib['rel']) + if tmpver > ver: + ver = tmpver + fmt = elm.find("%sformat" % ns) + if fmt: + fns = fmt.getchildren()[0].tag + fns = fns[0:fns.rindex("}")+1] + pkgpath = fmt.find("%ssourcerpm" % fns).text + target_repo = repo + break + + if repo["primary"].endswith(".sqlite"): + con = sqlite.connect(repo["primary"]) + for row in con.execute("select version, release, rpm_sourcerpm from packages where name = \"%s\" and arch != \"src\"" % pkg_name): + tmpver = "%s-%s" % (row[0], row[1]) + if tmpver > ver: + pkgpath = "%s" % row[2] + target_repo = repo + break + con.close() + if target_repo: + return get_src_name(pkgpath) + else: + return None + +def get_release_no(repometadata, distro="meego"): + cpio = find_binary_path("cpio") + rpm2cpio = find_binary_path("rpm2cpio") + release_pkg = get_package("%s-release" % distro, repometadata) + if release_pkg: + tmpdir = mkdtemp() + oldcwd = os.getcwd() + os.chdir(tmpdir) + p1 = subprocess.Popen([rpm2cpio, release_pkg], stdout = subprocess.PIPE) + p2 = subprocess.Popen([cpio, "-idv"], stdin = p1.stdout, stdout = subprocess.PIPE, stderr = subprocess.PIPE) + p2.communicate() + f = open("%s/etc/%s-release" % (tmpdir, distro), "r") + content = f.read() + f.close() + os.chdir(oldcwd) + shutil.rmtree(tmpdir, ignore_errors = True) + return content.split(" ")[2] + else: + return "UNKNOWN" + +def get_kickstarts_from_repos(repometadata): + kickstarts = [] + for repo in repometadata: + try: + root = xmlparse(repo["repomd"]) + except SyntaxError: + raise CreatorError("repomd.xml syntax error.") + + ns = root.getroot().tag + ns = ns[0:ns.rindex("}")+1] + + for elm in root.getiterator("%sdata" % ns): + if elm.attrib["type"] == "image-config": + break + + if elm.attrib["type"] != "image-config": + continue + + location = elm.find("%slocation" % ns) + image_config = str(repo["baseurl"] + "/" + location.attrib["href"]) + filename = str("%s/%s/image-config.xml%s" % (repo["cachedir"], repo["name"], suffix)) + + image_config = get_uncompressed_data_from_url(image_config,filename,repo["proxies"]) + + try: + root = xmlparse(image_config) + except SyntaxError: + raise CreatorError("image-config.xml syntax error.") + + for elm in root.getiterator("config"): + path = elm.find("path").text + path = path.replace("images-config", "image-config") + description = elm.find("description").text + makedirs(os.path.dirname("%s/%s/%s" % (repo["cachedir"], repo["name"], path))) + url = path + if "http" not in path: + url = str(repo["baseurl"] + "/" + path) + filename = str("%s/%s/%s" % (repo["cachedir"], repo["name"], path)) + path = myurlgrab(url, filename, repo["proxies"]) + kickstarts.append({"filename":path,"description":description}) + return kickstarts + +def select_ks(ksfiles): + print "Available kickstart files:" + i = 0 + for ks in ksfiles: + i += 1 + print "\t%d. %s (%s)" % (i, ks["description"], os.path.basename(ks["filename"])) + while True: + choice = raw_input("Please input your choice and press ENTER. [1..%d] ? " % i) + if choice.lower() == "q": + sys.exit(1) + if choice.isdigit(): + choice = int(choice) + if choice >= 1 and choice <= i: + break + + return ksfiles[choice-1]["filename"] + + +def get_pkglist_in_patterns(group, patterns): + found = False + pkglist = [] + try: + root = xmlparse(patterns) + except SyntaxError: + raise SyntaxError("%s syntax error." % patterns) + + for elm in list(root.getroot()): + ns = elm.tag + ns = ns[0:ns.rindex("}")+1] + name = elm.find("%sname" % ns) + summary = elm.find("%ssummary" % ns) + if name.text == group or summary.text == group: + found = True + break + + if not found: + return pkglist + + found = False + for requires in list(elm): + if requires.tag.endswith("requires"): + found = True + break + + if not found: + return pkglist + + for pkg in list(requires): + pkgname = pkg.attrib["name"] + if pkgname not in pkglist: + pkglist.append(pkgname) + + return pkglist + +def get_pkglist_in_comps(group, comps): + found = False + pkglist = [] + try: + root = xmlparse(comps) + except SyntaxError: + raise SyntaxError("%s syntax error." % comps) + + for elm in root.getiterator("group"): + id = elm.find("id") + name = elm.find("name") + if id.text == group or name.text == group: + packagelist = elm.find("packagelist") + found = True + break + + if not found: + return pkglist + + for require in elm.getiterator("packagereq"): + if require.tag.endswith("packagereq"): + pkgname = require.text + if pkgname not in pkglist: + pkglist.append(pkgname) + + return pkglist + +def is_statically_linked(binary): + ret = False + dev_null = os.open("/dev/null", os.O_WRONLY) + filecmd = find_binary_path("file") + args = [ filecmd, binary ] + file = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=dev_null) + output = file.communicate()[0] + os.close(dev_null) + if output.find(", statically linked, ") > 0: + ret = True + return ret + +def setup_qemu_emulator(rootdir, arch): + # mount binfmt_misc if it doesn't exist + if not os.path.exists("/proc/sys/fs/binfmt_misc"): + modprobecmd = find_binary_path("modprobe") + subprocess.call([modprobecmd, "binfmt_misc"]) + if not os.path.exists("/proc/sys/fs/binfmt_misc/register"): + mountcmd = find_binary_path("mount") + subprocess.call([mountcmd, "-t", "binfmt_misc", "none", "/proc/sys/fs/binfmt_misc"]) + + # qemu_emulator is a special case, we can't use find_binary_path + # qemu emulator should be a statically-linked executable file + qemu_emulator = "/usr/bin/qemu-arm" + if not os.path.exists(qemu_emulator) or not is_statically_linked(qemu_emulator): + qemu_emulator = "/usr/bin/qemu-arm-static" + if not os.path.exists(qemu_emulator): + raise CreatorError("Please install a statically-linked qemu-arm") + if not os.path.exists(rootdir + "/usr/bin"): + makedirs(rootdir + "/usr/bin") + shutil.copy(qemu_emulator, rootdir + qemu_emulator) + + # disable selinux, selinux will block qemu emulator to run + if os.path.exists("/usr/sbin/setenforce"): + subprocess.call(["/usr/sbin/setenforce", "0"]) + + node = "/proc/sys/fs/binfmt_misc/arm" + if is_statically_linked(qemu_emulator) and os.path.exists(node): + return qemu_emulator + + # unregister it if it has been registered and is a dynamically-linked executable + if not is_statically_linked(qemu_emulator) and os.path.exists(node): + qemu_unregister_string = "-1\n" + fd = open("/proc/sys/fs/binfmt_misc/arm", "w") + fd.write(qemu_unregister_string) + fd.close() + + # register qemu emulator for interpreting other arch executable file + if not os.path.exists(node): + qemu_arm_string = ":arm:M::\\x7fELF\\x01\\x01\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x28\\x00:\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xfa\\xff\\xff\\xff:%s:\n" % qemu_emulator + fd = open("/proc/sys/fs/binfmt_misc/register", "w") + fd.write(qemu_arm_string) + fd.close() + + return qemu_emulator + +def create_release(config, destdir, name, outimages, release): + """ TODO: This functionality should really be in creator.py inside the + ImageCreator class. """ + + # For virtual machine images, we have a subdir for it, this is unnecessary + # for release + thatsubdir = None + for i in range(len(outimages)): + file = outimages[i] + if not os.path.isdir(file) and os.path.dirname(file) != destdir: + thatsubdir = os.path.dirname(file) + newfile = os.path.join(destdir, os.path.basename(file)) + shutil.move(file, newfile) + outimages[i] = newfile + if thatsubdir: + shutil.rmtree(thatsubdir, ignore_errors = True) + + """ Create release directory and files """ + os.system ("cp %s %s/%s.ks" % (config, destdir, name)) + # When building a release we want to make sure the .ks + # file generates the same build even when --release= is not used. + fd = open(config, "r") + kscont = fd.read() + fd.close() + kscont = kscont.replace("@BUILD_ID@",release) + fd = open("%s/%s.ks" % (destdir,name), "w") + fd.write(kscont) + fd.close() + outimages.append("%s/%s.ks" % (destdir,name)) + + # Using system + mv, because of * in filename. + os.system ("mv %s/*-pkgs.txt %s/%s.packages" % (destdir, destdir, name)) + outimages.append("%s/%s.packages" % (destdir,name)) + + d = os.listdir(destdir) + for f in d: + if f.endswith(".iso"): + ff = f.replace(".iso", ".img") + os.rename("%s/%s" %(destdir, f ), "%s/%s" %(destdir, ff)) + outimages.append("%s/%s" %(destdir, ff)) + elif f.endswith(".usbimg"): + ff = f.replace(".usbimg", ".img") + os.rename("%s/%s" %(destdir, f ), "%s/%s" %(destdir, ff)) + outimages.append("%s/%s" %(destdir, ff)) + + fd = open(destdir + "/MANIFEST", "w") + d = os.listdir(destdir) + for f in d: + if f == "MANIFEST": + continue + if os.path.exists("/usr/bin/md5sum"): + p = subprocess.Popen(["/usr/bin/md5sum", "-b", "%s/%s" %(destdir, f )], + stdout=subprocess.PIPE) + (md5sum, errorstr) = p.communicate() + if p.returncode != 0: + logging.warning("Can't generate md5sum for image %s/%s" %(destdir, f )) + else: + md5sum = md5sum.split(" ")[0] + fd.write(md5sum+" "+f+"\n") + + outimages.append("%s/MANIFEST" % destdir) + fd.close() + + """ Update the file list. """ + updated_list = [] + for file in outimages: + if os.path.exists("%s" % file): + updated_list.append(file) + + return updated_list + +def get_local_distro(): + print "Local linux distribution:" + for file in glob.glob("/etc/*-release"): + fd = open(file, "r") + content = fd.read() + fd.close() + print content + if os.path.exists("/etc/issue"): + fd = open("/etc/issue", "r") + content = fd.read() + fd.close() + print content + print "Local Kernel version: " + os.uname()[2] + +def check_mic_installation(argv): + creator_name = os.path.basename(argv[0]) + if os.path.exists("/usr/local/bin/" + creator_name) \ + and os.path.exists("/usr/bin/" + creator_name): + raise CreatorError("There are two mic2 installations existing, this will result in some unpredictable errors, the reason is installation path of mic2 binary is different from installation path of mic2 source on debian-based distros, please remove one of them to ensure it can work normally.") + +def SrcpkgsDownload(pkgs, repometadata, instroot, cachedir): + + def get_source_repometadata(repometadata): + src_repometadata=[] + for repo in repometadata: + if repo["name"].endswith("-source"): + src_repometadata.append(repo) + if src_repometadata: + return src_repometadata + return None + + def get_src_name(srpm): + m = re.match("(.*)-(\d+.*)-(\d+\.\d+).src.rpm", srpm) + if m: + return m.group(1) + return None + + src_repometadata = get_source_repometadata(repometadata) + + if not src_repometadata: + print "No source repo found" + return None + + src_pkgs = [] + lpkgs_dict = {} + lpkgs_path = [] + for repo in src_repometadata: + cachepath = "%s/%s/packages/*.src.rpm" %(cachedir, repo["name"]) + lpkgs_path += glob.glob(cachepath) + + for lpkg in lpkgs_path: + lpkg_name = get_src_name(os.path.basename(lpkg)) + lpkgs_dict[lpkg_name] = lpkg + localpkgs = lpkgs_dict.keys() + + cached_count = 0 + destdir = instroot+'/usr/src/SRPMS' + if not os.path.exists(destdir): + os.makedirs(destdir) + + srcpkgset = set() + for _pkg in pkgs: + srcpkg_name = get_source_name(_pkg, repometadata) + if not srcpkg_name: + return None + srcpkgset.add(srcpkg_name) + + for pkg in list(srcpkgset): + if pkg in localpkgs: + cached_count += 1 + shutil.copy(lpkgs_dict[pkg], destdir) + src_pkgs.append(os.path.basename(lpkgs_dict[pkg])) + else: + src_pkg = get_package(pkg, src_repometadata, 'src') + if src_pkg: + shutil.copy(src_pkg, destdir) + src_pkgs.append(src_pkg) + print '--------------------------------------------------' + print "%d source packages gotten from cache" %cached_count + + return src_pkgs + +def add_optparser(arg): + def decorate(f): + if not hasattr(f, "optparser"): + f.optparser = arg + return f + return decorate diff --git a/micng/utils/pkgmanagers/__init__.py b/micng/utils/pkgmanagers/__init__.py new file mode 100644 index 0000000..c18877e --- /dev/null +++ b/micng/utils/pkgmanagers/__init__.py @@ -0,0 +1,55 @@ +#!/usr/bin/python + +import os +from micng.utils.errors import * + +class pkgManager: + def __init__(self): + self.managers = {} + self.default_pkg_manager = None + + def register_pkg_manager(self, name, manager): +# print "Registering package manager: %s" % name + if not self.managers.has_key(name): + self.managers[name] = manager + + def unregister_pkg_manager(self, name): + if self.managers.has_key(name): + del self.managers[name] + + def set_default_pkg_manager(self, name): + if self.managers.has_key(name): + self.default_pkg_manager = self.managers[name] + print "Use package manager %s" % name + + def get_default_pkg_manager(self): + if self.default_pkg_manager: + return self.default_pkg_manager + else: + if self.managers.has_key("zypp"): + print "Use package manager zypp" + return self.managers["zypp"] + elif self.managers.has_key("yum"): + print "Use package manager yum" + return self.managers["yum"] + else: + keys = self.managers.keys() + if keys: + print "Use package manager %s" % keys[0] + return self.managers[keys[0]] + else: + return None + + def load_pkg_managers(self): + mydir = os.path.dirname(os.path.realpath(__file__)) + for file in os.listdir(mydir): + if os.path.isfile(mydir + "/" + file) and file.endswith(".py") and file != "__init__.py": + pkgmgrmod = file[:file.rfind(".py")] + try: + exec("import micng.utils.pkgmanagers.%s as %s " % (pkgmgrmod, pkgmgrmod)) + exec("pkgmgr = %s._pkgmgr" % pkgmgrmod) + self.register_pkg_manager(pkgmgr[0], pkgmgr[1]) + except: + continue + if not self.managers.keys(): + raise CreatorError("No packag manager available") diff --git a/micng/utils/pkgmanagers/yumpkgmgr.py b/micng/utils/pkgmanagers/yumpkgmgr.py new file mode 100644 index 0000000..0f7e2ee --- /dev/null +++ b/micng/utils/pkgmanagers/yumpkgmgr.py @@ -0,0 +1,448 @@ +# +# yum.py : yum utilities +# +# Copyright 2007, Red Hat Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +import glob +import os +import sys +import logging + +import yum +import rpmUtils +import pykickstart.parser + +import urlparse +import urllib2 as u2 +import tempfile +import shutil +import subprocess + +from micng.utils.errors import * +from micng.utils.fs_related import * +from micng.imager.BaseImageCreator import ImageCreator + +class MyYumRepository(yum.yumRepo.YumRepository): + def __init__(self, repoid): + yum.yumRepo.YumRepository.__init__(self, repoid) + self.sslverify = False + + def _setupGrab(self): + self.sslverify = False + yum.yumRepo.YumRepository._setupGrab(self) + + def __del__(self): + pass + +class Yum(yum.YumBase): + def __init__(self, creator = None, recording_pkgs=None): + if not isinstance(creator, ImageCreator): + raise CreatorError("Invalid argument: creator") + yum.YumBase.__init__(self) + + self.creator = creator + + if self.creator.target_arch: + if rpmUtils.arch.arches.has_key(self.creator.target_arch): + self.arch.setup_arch(self.creator.target_arch) + else: + raise CreatorError("Invalid target arch: %s" % self.creator.target_arch) + + self.__recording_pkgs = recording_pkgs + self.__pkgs_content = {} + + def doFileLogSetup(self, uid, logfile): + # don't do the file log for the livecd as it can lead to open fds + # being left and an inability to clean up after ourself + pass + + def close(self): + try: + os.unlink(self.conf.installroot + "/yum.conf") + except: + pass + self.closeRpmDB() + yum.YumBase.close(self) + self._delRepos() + self._delSacks() + + if not os.path.exists("/etc/fedora-release") and not os.path.exists("/etc/meego-release"): + for i in range(3, os.sysconf("SC_OPEN_MAX")): + try: + os.close(i) + except: + pass + + def __del__(self): + pass + + def _writeConf(self, confpath, installroot): + conf = "[main]\n" + conf += "installroot=%s\n" % installroot + conf += "cachedir=/var/cache/yum\n" + conf += "plugins=0\n" + conf += "reposdir=\n" + conf += "failovermethod=priority\n" + conf += "http_caching=packages\n" + conf += "sslverify=0\n" + + f = file(confpath, "w+") + f.write(conf) + f.close() + + os.chmod(confpath, 0644) + + def _cleanupRpmdbLocks(self, installroot): + # cleans up temporary files left by bdb so that differing + # versions of rpm don't cause problems + for f in glob.glob(installroot + "/var/lib/rpm/__db*"): + os.unlink(f) + + def setup(self, confpath, installroot): + self._writeConf(confpath, installroot) + self._cleanupRpmdbLocks(installroot) + self.doConfigSetup(fn = confpath, root = installroot) + self.conf.cache = 0 + self.doTsSetup() + self.doRpmDBSetup() + self.doRepoSetup() + self.doSackSetup() + + def selectPackage(self, pkg): + """Select a given package. Can be specified with name.arch or name*""" + try: + self.install(pattern = pkg) + return None + except yum.Errors.InstallError, e: + return e + except yum.Errors.RepoError, e: + raise CreatorError("Unable to download from repo : %s" % (e,)) + except yum.Errors.YumBaseError, e: + raise CreatorError("Unable to install: %s" % (e,)) + + def deselectPackage(self, pkg): + """Deselect package. Can be specified as name.arch or name*""" + sp = pkg.rsplit(".", 2) + txmbrs = [] + if len(sp) == 2: + txmbrs = self.tsInfo.matchNaevr(name=sp[0], arch=sp[1]) + + if len(txmbrs) == 0: + exact, match, unmatch = yum.packages.parsePackages(self.pkgSack.returnPackages(), [pkg], casematch=1) + for p in exact + match: + txmbrs.append(p) + + if len(txmbrs) > 0: + for x in txmbrs: + self.tsInfo.remove(x.pkgtup) + # we also need to remove from the conditionals + # dict so that things don't get pulled back in as a result + # of them. yes, this is ugly. conditionals should die. + for req, pkgs in self.tsInfo.conditionals.iteritems(): + if x in pkgs: + pkgs.remove(x) + self.tsInfo.conditionals[req] = pkgs + else: + logging.warn("No such package %s to remove" %(pkg,)) + + def selectGroup(self, grp, include = pykickstart.parser.GROUP_DEFAULT): + try: + yum.YumBase.selectGroup(self, grp) + if include == pykickstart.parser.GROUP_REQUIRED: + map(lambda p: self.deselectPackage(p), grp.default_packages.keys()) + elif include == pykickstart.parser.GROUP_ALL: + map(lambda p: self.selectPackage(p), grp.optional_packages.keys()) + return None + except (yum.Errors.InstallError, yum.Errors.GroupsError), e: + return e + except yum.Errors.RepoError, e: + raise CreatorError("Unable to download from repo : %s" % (e,)) + except yum.Errors.YumBaseError, e: + raise CreatorError("Unable to install: %s" % (e,)) + + def __checkAndDownloadURL(self, u2opener, url, savepath): + try: + if u2opener: + f = u2opener.open(url) + else: + f = u2.urlopen(url) + except u2.HTTPError, httperror: + if httperror.code in (404, 503): + return None + else: + raise CreatorError(httperror) + except OSError, oserr: + if oserr.errno == 2: + return None + else: + raise CreatorError(oserr) + except IOError, oserr: + if hasattr(oserr, "reason") and oserr.reason.errno == 2: + return None + else: + raise CreatorError(oserr) + except u2.URLError, err: + raise CreatorError(err) + + # save to file + licf = open(savepath, "w") + licf.write(f.read()) + licf.close() + f.close() + + return savepath + + def __pagerFile(self, savepath): + if os.path.splitext(savepath)[1].upper() in ('.HTM', '.HTML'): + pagers = ('w3m', 'links', 'lynx', 'less', 'more') + else: + pagers = ('less', 'more') + + file_showed = None + for pager in pagers: + try: + subprocess.call([pager, savepath]) + except OSError: + continue + else: + file_showed = True + break + if not file_showed: + f = open(savepath) + print f.read() + f.close() + raw_input('press <ENTER> to continue...') + + def checkRepositoryEULA(self, name, repo): + """ This function is to check the LICENSE file if provided. """ + + # when proxy needed, make urllib2 follow it + proxy = repo.proxy + proxy_username = repo.proxy_username + proxy_password = repo.proxy_password + + handlers = [] + auth_handler = u2.HTTPBasicAuthHandler(u2.HTTPPasswordMgrWithDefaultRealm()) + u2opener = None + if proxy: + if proxy_username: + proxy_netloc = urlparse.urlsplit(proxy).netloc + if proxy_password: + proxy_url = 'http://%s:%s@%s' % (proxy_username, proxy_password, proxy_netloc) + else: + proxy_url = 'http://%s@%s' % (proxy_username, proxy_netloc) + else: + proxy_url = proxy + + proxy_support = u2.ProxyHandler({'http': proxy_url, + 'ftp': proxy_url}) + handlers.append(proxy_support) + + # download all remote files to one temp dir + baseurl = None + repo_lic_dir = tempfile.mkdtemp(prefix = 'repolic') + + for url in repo.baseurl: + if not url.endswith('/'): + url += '/' + tmphandlers = handlers + (scheme, host, path, parm, query, frag) = urlparse.urlparse(url) + if scheme not in ("http", "https", "ftp", "ftps", "file"): + raise CreatorError("Error: invalid url %s" % url) + if '@' in host: + try: + user_pass, host = host.split('@', 1) + if ':' in user_pass: + user, password = user_pass.split(':', 1) + except ValueError, e: + raise CreatorError('Bad URL: %s' % url) + print "adding HTTP auth: %s, %s" %(user, password) + auth_handler.add_password(None, host, user, password) + tmphandlers.append(auth_handler) + url = scheme + "://" + host + path + parm + query + frag + if len(tmphandlers) != 0: + u2opener = u2.build_opener(*tmphandlers) + # try to download + repo_eula_url = urlparse.urljoin(url, "LICENSE.txt") + repo_eula_path = self.__checkAndDownloadURL( + u2opener, + repo_eula_url, + os.path.join(repo_lic_dir, repo.id + '_LICENSE.txt')) + if repo_eula_path: + # found + baseurl = url + break + + if not baseurl: + return True + + # show the license file + print 'For the software packages in this yum repo:' + print ' %s: %s' % (name, baseurl) + print 'There is an "End User License Agreement" file that need to be checked.' + print 'Please read the terms and conditions outlined in it and answer the followed qustions.' + raw_input('press <ENTER> to continue...') + + self.__pagerFile(repo_eula_path) + + # Asking for the "Accept/Decline" + accept = True + while accept: + input_accept = raw_input('Would you agree to the terms and conditions outlined in the above End User License Agreement? (Yes/No): ') + if input_accept.upper() in ('YES', 'Y'): + break + elif input_accept.upper() in ('NO', 'N'): + accept = None + print 'Will not install pkgs from this repo.' + + if not accept: + #cleanup + shutil.rmtree(repo_lic_dir) + return None + + # try to find support_info.html for extra infomation + repo_info_url = urlparse.urljoin(baseurl, "support_info.html") + repo_info_path = self.__checkAndDownloadURL( + u2opener, + repo_info_url, + os.path.join(repo_lic_dir, repo.id + '_support_info.html')) + if repo_info_path: + print 'There is one more file in the repo for additional support information, please read it' + raw_input('press <ENTER> to continue...') + self.__pagerFile(repo_info_path) + + #cleanup + shutil.rmtree(repo_lic_dir) + return True + + def addRepository(self, name, url = None, mirrorlist = None, proxy = None, proxy_username = None, proxy_password = None, inc = None, exc = None): + def _varSubstitute(option): + # takes a variable and substitutes like yum configs do + option = option.replace("$basearch", rpmUtils.arch.getBaseArch()) + option = option.replace("$arch", rpmUtils.arch.getCanonArch()) + return option + + repo = MyYumRepository(name) + repo.sslverify = False + + """Set proxy""" + repo.proxy = proxy + repo.proxy_username = proxy_username + repo.proxy_password = proxy_password + + if url: + repo.baseurl.append(_varSubstitute(url)) + + # check LICENSE files + if not self.checkRepositoryEULA(name, repo): + return None + + if mirrorlist: + repo.mirrorlist = _varSubstitute(mirrorlist) + conf = yum.config.RepoConf() + for k, v in conf.iteritems(): + if v or not hasattr(repo, k): + repo.setAttribute(k, v) + repo.basecachedir = self.conf.cachedir + repo.failovermethod = "priority" + repo.metadata_expire = 0 + # Enable gpg check for verifying corrupt packages + repo.gpgcheck = 1 + repo.enable() + repo.setup(0) + repo.setCallback(TextProgress()) + self.repos.add(repo) + return repo + + def installHasFile(self, file): + provides_pkg = self.whatProvides(file, None, None) + dlpkgs = map(lambda x: x.po, filter(lambda txmbr: txmbr.ts_state in ("i", "u"), self.tsInfo.getMembers())) + for p in dlpkgs: + for q in provides_pkg: + if (p == q): + return True + return False + + def runInstall(self, checksize = 0): + os.environ["HOME"] = "/" + try: + (res, resmsg) = self.buildTransaction() + except yum.Errors.RepoError, e: + raise CreatorError("Unable to download from repo : %s" %(e,)) + if res != 2: + raise CreatorError("Failed to build transaction : %s" % str.join("\n", resmsg)) + + dlpkgs = map(lambda x: x.po, filter(lambda txmbr: txmbr.ts_state in ("i", "u"), self.tsInfo.getMembers())) + + # record the total size of installed pkgs + pkgs_total_size = sum(map(lambda x: int(x.size), dlpkgs)) + + # check needed size before actually download and install + if checksize and pkgs_total_size > checksize: + raise CreatorError("Size of specified root partition in kickstart file is too small to install all selected packages.") + + if self.__recording_pkgs: + # record all pkg and the content + for pkg in dlpkgs: + pkg_long_name = "%s-%s.%s.rpm" % (pkg.name, pkg.printVer(), pkg.arch) + self.__pkgs_content[pkg_long_name] = pkg.files + + total_count = len(dlpkgs) + cached_count = 0 + print "Checking packages cache and packages integrity..." + for po in dlpkgs: + local = po.localPkg() + if not os.path.exists(local): + continue + if not self.verifyPkg(local, po, False): + print "Package %s is damaged: %s" % (os.path.basename(local), local) + else: + cached_count +=1 + print "%d packages to be installed, %d packages gotten from cache, %d packages to be downloaded" % (total_count, cached_count, total_count - cached_count) + try: + self.downloadPkgs(dlpkgs) + # FIXME: sigcheck? + + self.initActionTs() + self.populateTs(keepold=0) + deps = self.ts.check() + if len(deps) != 0: + """ This isn't fatal, Ubuntu has this issue but it is ok. """ + print deps + logging.warn("Dependency check failed!") + rc = self.ts.order() + if rc != 0: + raise CreatorError("ordering packages for installation failed!") + + # FIXME: callback should be refactored a little in yum + sys.path.append('/usr/share/yum-cli') + import callback + cb = callback.RPMInstallCallback() + cb.tsInfo = self.tsInfo + cb.filelog = False + ret = self.runTransaction(cb) + print "" + self._cleanupRpmdbLocks(self.conf.installroot) + return ret + except yum.Errors.RepoError, e: + raise CreatorError("Unable to download from repo : %s" % (e,)) + except yum.Errors.YumBaseError, e: + raise CreatorError("Unable to install: %s" % (e,)) + + def getAllContent(self): + return self.__pkgs_content + +_pkgmgr = ["yum", Yum] diff --git a/micng/utils/pkgmanagers/zypppkgmgr.py b/micng/utils/pkgmanagers/zypppkgmgr.py new file mode 100644 index 0000000..49bc88f --- /dev/null +++ b/micng/utils/pkgmanagers/zypppkgmgr.py @@ -0,0 +1,752 @@ +#!/usr/bin/python + +import os +import sys +import glob +import re +import zypp +import rpm +import shutil +import tempfile +import urlparse +import urllib2 as u2 +from micng.utils.errors import * +from micng.imager.BaseImageCreator import ImageCreator +import pykickstart.parser +from micng.utils.fs_related import * +from micng.utils.misc import * +from micng.utils.rpmmisc import * + +class RepositoryStub: + def __init__(self): + self.name = None + self.baseurl = [] + self.mirrorlist = None + self.proxy = None + self.proxy_username = None + self.proxy_password = None + self.includepkgs = None + self.includepkgs = None + self.exclude = None + + self.enabled = True + self.autorefresh = True + self.keeppackages = True + +class RepoError(CreatorError): + pass + +class RpmError(CreatorError): + pass + +class Zypp: + def __init__(self, creator = None, recording_pkgs=None): + if not isinstance(creator, ImageCreator): + raise CreatorError("Invalid argument: creator") + + self.__recording_pkgs = recording_pkgs + self.__pkgs_content = {} + self.creator = creator + self.repos = [] + self.packages = [] + self.patterns = [] + self.localpkgs = {} + self.repo_manager = None + self.repo_manager_options = None + self.Z = None + self.ts = None + self.probFilterFlags = [] + self.bin_rpm = find_binary_path("rpm") + self.incpkgs = [] + self.excpkgs = [] + + def doFileLogSetup(self, uid, logfile): + # don't do the file log for the livecd as it can lead to open fds + # being left and an inability to clean up after ourself + pass + + def closeRpmDB(self): + pass + + def close(self): + try: + os.unlink(self.installroot + "/yum.conf") + except: + pass + self.closeRpmDB() + if not os.path.exists("/etc/fedora-release") and not os.path.exists("/etc/meego-release"): + for i in range(3, os.sysconf("SC_OPEN_MAX")): + try: + os.close(i) + except: + pass + if self.ts: + self.ts.closeDB() + self.ts = None + + def __del__(self): + self.close() + + def _writeConf(self, confpath, installroot): + conf = "[main]\n" + conf += "installroot=%s\n" % installroot + conf += "cachedir=/var/cache/yum\n" + conf += "plugins=0\n" + conf += "reposdir=\n" + conf += "failovermethod=priority\n" + conf += "http_caching=packages\n" + + f = file(confpath, "w+") + f.write(conf) + f.close() + + os.chmod(confpath, 0644) + + def _cleanupRpmdbLocks(self, installroot): + # cleans up temporary files left by bdb so that differing + # versions of rpm don't cause problems + for f in glob.glob(installroot + "/var/lib/rpm/__db*"): + os.unlink(f) + + def setup(self, confpath, installroot): + self._writeConf(confpath, installroot) + self._cleanupRpmdbLocks(installroot) + self.installroot = installroot + + def selectPackage(self, pkg): + """ Select a given package or package pattern, can be specified with name.arch or name* or *name """ + if not self.Z: + self.__initialize_zypp() + + found = False + startx = pkg.startswith("*") + endx = pkg.endswith("*") + ispattern = startx or endx + sp = pkg.rsplit(".", 2) + for item in self.Z.pool(): + kind = "%s" % item.kind() + if kind == "package": + name = "%s" % item.name() + if not ispattern: + if name in self.incpkgs or self.excpkgs: + found = True + break + if len(sp) == 2: + arch = "%s" % item.arch() + if name == sp[0] and arch == sp[1]: + found = True + if name not in self.packages: + self.packages.append(name) + item.status().setToBeInstalled (zypp.ResStatus.USER) + break + else: + if name == sp[0]: + found = True + if name not in self.packages: + self.packages.append(name) + item.status().setToBeInstalled (zypp.ResStatus.USER) + break + else: + if name in self.incpkgs or self.excpkgs: + found = True + continue + if startx and name.endswith(sp[0][1:]): + found = True + if name not in self.packages: + self.packages.append(name) + item.status().setToBeInstalled (zypp.ResStatus.USER) + + if endx and name.startswith(sp[0][:-1]): + found = True + if name not in self.packages: + self.packages.append(name) + item.status().setToBeInstalled (zypp.ResStatus.USER) + if found: + return None + else: + e = CreatorError("Unable to find package: %s" % (pkg,)) + return e + + def deselectPackage(self, pkg): + """Deselect package. Can be specified as name.arch or name*""" + + if not self.Z: + self.__initialize_zypp() + + startx = pkg.startswith("*") + endx = pkg.endswith("*") + ispattern = startx or endx + sp = pkg.rsplit(".", 2) + for item in self.Z.pool(): + kind = "%s" % item.kind() + if kind == "package": + name = "%s" % item.name() + if not ispattern: + if len(sp) == 2: + arch = "%s" % item.arch() + if name == sp[0] and arch == sp[1]: + if item.status().isToBeInstalled(): + item.status().resetTransact(zypp.ResStatus.USER) + if name in self.packages: + self.packages.remove(name) + break + else: + if name == sp[0]: + if item.status().isToBeInstalled(): + item.status().resetTransact(zypp.ResStatus.USER) + if name in self.packages: + self.packages.remove(name) + break + else: + if startx and name.endswith(sp[0][1:]): + if item.status().isToBeInstalled(): + item.status().resetTransact(zypp.ResStatus.USER) + if name in self.packages: + self.packages.remove(name) + + if endx and name.startswith(sp[0][:-1]): + if item.status().isToBeInstalled(): + item.status().resetTransact(zypp.ResStatus.USER) + if name in self.packages: + self.packages.remove(name) + + def __selectIncpkgs(self): + found = False + for pkg in self.incpkgs: + for item in self.Z.pool(): + kind = "%s" % item.kind() + if kind == "package": + name = "%s" % item.name() + repoalias = "%s" % item.repoInfo().alias() + if name == pkg and repoalias.endswith("include"): + found = True + if name not in self.packages: + self.packages.append(name) + item.status().setToBeInstalled (zypp.ResStatus.USER) + break + if not found: + raise CreatorError("Unable to find package: %s" % (pkg,)) + + def __selectExcpkgs(self): + found = False + for pkg in self.excpkgs: + for item in self.Z.pool(): + kind = "%s" % item.kind() + if kind == "package": + name = "%s" % item.name() + repoalias = "%s" % item.repoInfo().alias() + if name == pkg and not repoalias.endswith("exclude"): + found = True + if name not in self.packages: + self.packages.append(name) + item.status().setToBeInstalled (zypp.ResStatus.USER) + break + if not found: + raise CreatorError("Unable to find package: %s" % (pkg,)) + + + def selectGroup(self, grp, include = pykickstart.parser.GROUP_DEFAULT): + if not self.Z: + self.__initialize_zypp() + found = False + for item in self.Z.pool(): + kind = "%s" % item.kind() + if kind == "pattern": + summary = "%s" % item.summary() + name = "%s" % item.name() + if name == grp or summary == grp: + found = True + if name not in self.patterns: + self.patterns.append(name) + item.status().setToBeInstalled (zypp.ResStatus.USER) + break + + if found: + if include == pykickstart.parser.GROUP_REQUIRED: + map(lambda p: self.deselectPackage(p), grp.default_packages.keys()) + elif include == pykickstart.parser.GROUP_ALL: + map(lambda p: self.selectPackage(p), grp.optional_packages.keys()) + return None + else: + e = CreatorError("Unable to find pattern: %s" % (grp,)) + return e + + def __checkAndDownloadURL(self, u2opener, url, savepath): + try: + if u2opener: + f = u2opener.open(url) + else: + f = u2.urlopen(url) + except u2.HTTPError, httperror: + if httperror.code in (404, 503): + return None + else: + raise CreatorError(httperror) + except OSError, oserr: + if oserr.errno == 2: + return None + else: + raise CreatorError(oserr) + except IOError, oserr: + if hasattr(oserr, "reason") and oserr.reason.errno == 2: + return None + else: + raise CreatorError(oserr) + except u2.URLError, err: + raise CreatorError(err) + + # save to file + licf = open(savepath, "w") + licf.write(f.read()) + licf.close() + f.close() + + return savepath + + def __pagerFile(self, savepath): + if os.path.splitext(savepath)[1].upper() in ('.HTM', '.HTML'): + pagers = ('w3m', 'links', 'lynx', 'less', 'more') + else: + pagers = ('less', 'more') + + file_showed = None + for pager in pagers: + try: + subprocess.call([pager, savepath]) + except OSError: + continue + else: + file_showed = True + break + if not file_showed: + f = open(savepath) + print f.read() + f.close() + raw_input('press <ENTER> to continue...') + + def checkRepositoryEULA(self, name, repo): + """ This function is to check the LICENSE file if provided. """ + + # when proxy needed, make urllib2 follow it + proxy = repo.proxy + proxy_username = repo.proxy_username + proxy_password = repo.proxy_password + + handlers = [] + auth_handler = u2.HTTPBasicAuthHandler(u2.HTTPPasswordMgrWithDefaultRealm()) + u2opener = None + if proxy: + if proxy_username: + proxy_netloc = urlparse.urlsplit(proxy).netloc + if proxy_password: + proxy_url = 'http://%s:%s@%s' % (proxy_username, proxy_password, proxy_netloc) + else: + proxy_url = 'http://%s@%s' % (proxy_username, proxy_netloc) + else: + proxy_url = proxy + + proxy_support = u2.ProxyHandler({'http': proxy_url, + 'ftp': proxy_url}) + handlers.append(proxy_support) + + # download all remote files to one temp dir + baseurl = None + repo_lic_dir = tempfile.mkdtemp(prefix = 'repolic') + + for url in repo.baseurl: + if not url.endswith('/'): + url += '/' + tmphandlers = handlers + (scheme, host, path, parm, query, frag) = urlparse.urlparse(url) + if scheme not in ("http", "https", "ftp", "ftps", "file"): + raise CreatorError("Error: invalid url %s" % url) + if '@' in host: + try: + user_pass, host = host.split('@', 1) + if ':' in user_pass: + user, password = user_pass.split(':', 1) + except ValueError, e: + raise CreatorError('Bad URL: %s' % url) + print "adding HTTP auth: %s, %s" %(user, password) + auth_handler.add_password(None, host, user, password) + tmphandlers.append(auth_handler) + url = scheme + "://" + host + path + parm + query + frag + if len(tmphandlers) != 0: + u2opener = u2.build_opener(*tmphandlers) + # try to download + repo_eula_url = urlparse.urljoin(url, "LICENSE.txt") + repo_eula_path = self.__checkAndDownloadURL( + u2opener, + repo_eula_url, + os.path.join(repo_lic_dir, repo.id + '_LICENSE.txt')) + if repo_eula_path: + # found + baseurl = url + break + + if not baseurl: + return True + + # show the license file + print 'For the software packages in this yum repo:' + print ' %s: %s' % (name, baseurl) + print 'There is an "End User License Agreement" file that need to be checked.' + print 'Please read the terms and conditions outlined in it and answer the followed qustions.' + raw_input('press <ENTER> to continue...') + + self.__pagerFile(repo_eula_path) + + # Asking for the "Accept/Decline" + accept = True + while accept: + input_accept = raw_input('Would you agree to the terms and conditions outlined in the above End User License Agreement? (Yes/No): ') + if input_accept.upper() in ('YES', 'Y'): + break + elif input_accept.upper() in ('NO', 'N'): + accept = None + print 'Will not install pkgs from this repo.' + + if not accept: + #cleanup + shutil.rmtree(repo_lic_dir) + return None + + # try to find support_info.html for extra infomation + repo_info_url = urlparse.urljoin(baseurl, "support_info.html") + repo_info_path = self.__checkAndDownloadURL( + u2opener, + repo_info_url, + os.path.join(repo_lic_dir, repo.id + '_support_info.html')) + if repo_info_path: + print 'There is one more file in the repo for additional support information, please read it' + raw_input('press <ENTER> to continue...') + self.__pagerFile(repo_info_path) + + #cleanup + shutil.rmtree(repo_lic_dir) + return True + + def addRepository(self, name, url = None, mirrorlist = None, proxy = None, proxy_username = None, proxy_password = None, inc = None, exc = None): + if not self.repo_manager: + self.__initialize_repo_manager() + + repo = RepositoryStub() + repo.name = name + repo.id = name + repo.proxy = proxy + repo.proxy_username = proxy_username + repo.proxy_password = proxy_password + repo.baseurl.append(url) + repo_alias = repo.id + if inc: + repo_alias = name + "include" + self.incpkgs = inc + if exc: + repo_alias = name + "exclude" + self.excpkgs = exc + + # check LICENSE files + if not self.checkRepositoryEULA(name, repo): + return None + + if mirrorlist: + repo.mirrorlist = mirrorlist + + # Enable gpg check for verifying corrupt packages + repo.gpgcheck = 1 + self.repos.append(repo) + + + try: + repo_info = zypp.RepoInfo() + repo_info.setAlias(repo_alias) + repo_info.setName(repo.name) + repo_info.setEnabled(repo.enabled) + repo_info.setAutorefresh(repo.autorefresh) + repo_info.setKeepPackages(repo.keeppackages) + repo_info.addBaseUrl(zypp.Url(repo.baseurl[0])) + self.repo_manager.addRepository(repo_info) + self.__build_repo_cache(name) + except RuntimeError, e: + raise CreatorError("%s" % (e,)) + + return repo + + def installHasFile(self, file): + return False + + def runInstall(self, checksize = 0): + if self.incpkgs: + self.__selectIncpkgs() + if self.excpkgs: + self.__selectExcpkgs() + + os.environ["HOME"] = "/" + self.buildTransaction() + + todo = zypp.GetResolvablesToInsDel(self.Z.pool()) + installed_pkgs = todo._toInstall + dlpkgs = [] + for item in installed_pkgs: + if not zypp.isKindPattern(item): + dlpkgs.append(item) + + # record the total size of installed pkgs + pkgs_total_size = sum(map(lambda x: int(x.installSize()), dlpkgs)) + + # check needed size before actually download and install + if checksize and pkgs_total_size > checksize: + raise CreatorError("Size of specified root partition in kickstart file is too small to install all selected packages.") + + if self.__recording_pkgs: + # record all pkg and the content + for pkg in dlpkgs: + pkg_long_name = "%s-%s.%s.rpm" % (pkg.name(), pkg.edition(), pkg.arch()) + self.__pkgs_content[pkg_long_name] = {} #TBD: to get file list + + total_count = len(dlpkgs) + cached_count = 0 + localpkgs = self.localpkgs.keys() + print "Checking packages cache and packages integrity..." + for po in dlpkgs: + """ Check if it is cached locally """ + if po.name() in localpkgs: + cached_count += 1 + else: + local = self.getLocalPkgPath(po) + if os.path.exists(local): + if self.checkPkg(local) != 0: + os.unlink(local) + else: + cached_count += 1 + print "%d packages to be installed, %d packages gotten from cache, %d packages to be downloaded" % (total_count, cached_count, total_count - cached_count) + try: + print "downloading packages..." + self.downloadPkgs(dlpkgs) + self.installPkgs(dlpkgs) + + except RepoError, e: + raise CreatorError("Unable to download from repo : %s" % (e,)) + except RpmError, e: + raise CreatorError("Unable to install: %s" % (e,)) + + def getAllContent(self): + return self.__pkgs_content + + def __initialize_repo_manager(self): + if self.repo_manager: + return + + """ Clean up repo metadata """ + shutil.rmtree(self.creator.cachedir + "/var", ignore_errors = True) + shutil.rmtree(self.creator.cachedir + "/etc", ignore_errors = True) + shutil.rmtree(self.creator.cachedir + "/raw", ignore_errors = True) + shutil.rmtree(self.creator.cachedir + "/solv", ignore_errors = True) + + zypp.KeyRing.setDefaultAccept( zypp.KeyRing.ACCEPT_UNSIGNED_FILE + | zypp.KeyRing.ACCEPT_VERIFICATION_FAILED + | zypp.KeyRing.ACCEPT_UNKNOWNKEY + | zypp.KeyRing.TRUST_KEY_TEMPORARILY + ) + self.repo_manager_options = zypp.RepoManagerOptions(zypp.Pathname(self.creator._instroot)) + self.repo_manager_options.knownReposPath = zypp.Pathname(self.creator.cachedir + "/etc/zypp/repos.d") + self.repo_manager_options.repoCachePath = zypp.Pathname(self.creator.cachedir + "/var/cache/zypp") + self.repo_manager_options.repoRawCachePath = zypp.Pathname(self.creator.cachedir + "/raw") + self.repo_manager_options.repoSolvCachePath = zypp.Pathname(self.creator.cachedir + "/solv") + self.repo_manager_options.repoPackagesCachePath = zypp.Pathname(self.creator.cachedir + "/packages") + + self.repo_manager = zypp.RepoManager(self.repo_manager_options) + + + def __build_repo_cache(self, name): + repos = self.repo_manager.knownRepositories() + for repo in repos: + if not repo.enabled(): + continue + reponame = "%s" % repo.name() + if reponame != name: + continue + if self.repo_manager.isCached( repo ): + return + #print "Retrieving repo metadata from %s ..." % repo.url() + self.repo_manager.buildCache( repo, zypp.RepoManager.BuildIfNeeded ) + + + def __initialize_zypp(self): + if self.Z: + return + + zconfig = zypp.ZConfig_instance() + + """ Set system architecture """ + if self.creator.target_arch and self.creator.target_arch.startswith("arm"): + arches = ["armv7l", "armv7nhl", "armv7hl"] + if self.creator.target_arch not in arches: + raise CreatorError("Invalid architecture: %s" % self.creator.target_arch) + arch_map = {} + if self.creator.target_arch == "armv7l": + arch_map["armv7l"] = zypp.Arch_armv7l() + elif self.creator.target_arch == "armv7nhl": + arch_map["armv7nhl"] = zypp.Arch_armv7nhl() + elif self.creator.target_arch == "armv7hl": + arch_map["armv7hl"] = zypp.Arch_armv7hl() + zconfig.setSystemArchitecture(arch_map[self.creator.target_arch]) + + print "zypp architecture: %s" % zconfig.systemArchitecture() + + """ repoPackagesCachePath is corrected by this """ + self.repo_manager = zypp.RepoManager(self.repo_manager_options) + repos = self.repo_manager.knownRepositories() + for repo in repos: + if not repo.enabled(): + continue + if not self.repo_manager.isCached( repo ): + print "Retrieving repo metadata from %s ..." % repo.url() + self.repo_manager.buildCache( repo, zypp.RepoManager.BuildIfNeeded ) + else: + self.repo_manager.refreshMetadata(repo, zypp.RepoManager.BuildIfNeeded) + self.repo_manager.loadFromCache( repo ); + + self.Z = zypp.ZYppFactory_instance().getZYpp() + self.Z.initializeTarget( zypp.Pathname(self.creator._instroot) ) + self.Z.target().load(); + + + def buildTransaction(self): + if not self.Z.resolver().resolvePool(): + print "Problem count: %d" % len(self.Z.resolver().problems()) + for problem in self.Z.resolver().problems(): + print "Problem: %s, %s" % (problem.description().decode("utf-8"), problem.details().decode("utf-8")) + + def getLocalPkgPath(self, po): + repoinfo = po.repoInfo() + name = po.name() + cacheroot = repoinfo.packagesPath() + arch = po.arch() + edition = po.edition() + version = "%s-%s" % (edition.version(), edition.release()) + pkgpath = "%s/%s/%s-%s.%s.rpm" % (cacheroot, arch, name, version, arch) + return pkgpath + + def installLocal(self, pkg, po=None, updateonly=False): + if not self.ts: + self.__initialize_transaction() + pkgname = self.__get_pkg_name(pkg) + self.localpkgs[pkgname] = pkg + self.selectPackage(pkgname) + + def __get_pkg_name(self, pkgpath): + h = readRpmHeader(self.ts, pkgpath) + return h["name"] + + def downloadPkgs(self, package_objects): + localpkgs = self.localpkgs.keys() + for po in package_objects: + if po.name() in localpkgs: + continue + filename = self.getLocalPkgPath(po) + if os.path.exists(filename): + if self.checkPkg(filename) == 0: + continue + dir = os.path.dirname(filename) + if not os.path.exists(dir): + makedirs(dir) + baseurl = po.repoInfo().baseUrls()[0].__str__() + proxy = self.get_proxy(po.repoInfo()) + proxies = {} + if proxy: + proxies = {str(proxy.split(":")[0]):str(proxy)} + + location = zypp.asKindPackage(po).location() + location = location.filename().__str__() + if location.startswith("./"): + location = location[2:] + url = baseurl + "/%s" % location + try: + filename = myurlgrab(url, filename, proxies) + except CreatorError, e: + self.close() + raise CreatorError("%s" % e) + + def installPkgs(self, package_objects): + if not self.ts: + self.__initialize_transaction() + + """ Set filters """ + probfilter = 0 + for flag in self.probFilterFlags: + probfilter |= flag + self.ts.setProbFilter(probfilter) + + localpkgs = self.localpkgs.keys() + for po in package_objects: + pkgname = po.name() + if pkgname in localpkgs: + rpmpath = self.localpkgs[pkgname] + else: + rpmpath = self.getLocalPkgPath(po) + if not os.path.exists(rpmpath): + """ Maybe it is a local repo """ + baseurl = po.repoInfo().baseUrls()[0].__str__() + baseurl = baseurl.strip() + if baseurl.startswith("file:/"): + rpmpath = baseurl[5:] + "/%s/%s" % (po.arch(), os.path.basename(rpmpath)) + if not os.path.exists(rpmpath): + raise RpmError("Error: %s doesn't exist" % rpmpath) + h = readRpmHeader(self.ts, rpmpath) + self.ts.addInstall(h, rpmpath, 'u') + + unresolved_dependencies = self.ts.check() + if not unresolved_dependencies: + self.ts.order() + cb = RPMInstallCallback(self.ts) + self.ts.run(cb.callback, '') + self.ts.closeDB() + self.ts = None + else: + print unresolved_dependencies + raise RepoError("Error: Unresolved dependencies, transaction failed.") + + def __initialize_transaction(self): + if not self.ts: + self.ts = rpm.TransactionSet(self.creator._instroot) + # Set to not verify DSA signatures. + self.ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS) + + def checkPkg(self, pkg): + ret = 1 + if not os.path.exists(pkg): + return ret + ret = checkRpmIntegrity(self.bin_rpm, pkg) + if ret != 0: + print "Package %s is damaged: %s" % (os.path.basename(pkg), pkg) + return ret + + def zypp_install(self): + policy = zypp.ZYppCommitPolicy() + policy.downloadMode(zypp.DownloadInAdvance) + policy.dryRun( False ) + policy.syncPoolAfterCommit( False ) + result = self.Z.commit( policy ) + print result + + def _add_prob_flags(self, *flags): + for flag in flags: + if flag not in self.probFilterFlags: + self.probFilterFlags.append(flag) + + def get_proxy(self, repoinfo): + proxy = None + reponame = "%s" % repoinfo.name() + for repo in self.repos: + if repo.name == reponame: + proxy = repo.proxy + break + if proxy: + return proxy + else: + repourl = repoinfo.baseUrls()[0].__str__() + return get_proxy(repourl) + +_pkgmgr = ["zypp", Zypp] + diff --git a/micng/utils/rpmmisc.py b/micng/utils/rpmmisc.py new file mode 100644 index 0000000..e1030c2 --- /dev/null +++ b/micng/utils/rpmmisc.py @@ -0,0 +1,406 @@ +import rpm, os, sys, re +import locale +import subprocess +import logging + +class RPMInstallCallback: + """ + command line callback class for callbacks from the RPM library. + """ + + def __init__(self, ts, output=1): + self.output = output + self.callbackfilehandles = {} + self.total_actions = 0 + self.total_installed = 0 + self.installed_pkg_names = [] + self.total_removed = 0 + self.mark = "+" + self.marks = 40 + self.lastmsg = None + self.tsInfo = None # this needs to be set for anything else to work + self.ts = ts + self.logString = [] + + def _dopkgtup(self, hdr): + tmpepoch = hdr['epoch'] + if tmpepoch is None: epoch = '0' + else: epoch = str(tmpepoch) + + return (hdr['name'], hdr['arch'], epoch, hdr['version'], hdr['release']) + + def _makeHandle(self, hdr): + handle = '%s:%s.%s-%s-%s' % (hdr['epoch'], hdr['name'], hdr['version'], + hdr['release'], hdr['arch']) + + return handle + + def _localprint(self, msg): + if self.output: + print msg + + def _makefmt(self, percent, progress = True): + l = len(str(self.total_actions)) + size = "%s.%s" % (l, l) + fmt_done = "[%" + size + "s/%" + size + "s]" + done = fmt_done % (self.total_installed + self.total_removed, + self.total_actions) + marks = self.marks - (2 * l) + width = "%s.%s" % (marks, marks) + fmt_bar = "%-" + width + "s" + if progress: + bar = fmt_bar % (self.mark * int(marks * (percent / 100.0)), ) + fmt = "\r %-10.10s: " + bar + " " + done + else: + bar = fmt_bar % (self.mark * marks, ) + fmt = " %-10.10s: " + bar + " " + done + return fmt + + def _logPkgString(self, hdr): + """return nice representation of the package for the log""" + (n,a,e,v,r) = self._dopkgtup(hdr) + if e == '0': + pkg = '%s.%s %s-%s' % (n, a, v, r) + else: + pkg = '%s.%s %s:%s-%s' % (n, a, e, v, r) + + return pkg + + def callback(self, what, bytes, total, h, user): + if what == rpm.RPMCALLBACK_TRANS_START: + if bytes == 6: + self.total_actions = total + + elif what == rpm.RPMCALLBACK_TRANS_PROGRESS: + pass + + elif what == rpm.RPMCALLBACK_TRANS_STOP: + pass + + elif what == rpm.RPMCALLBACK_INST_OPEN_FILE: + self.lastmsg = None + hdr = None + if h is not None: + rpmloc = h + hdr = readRpmHeader(self.ts, h) + handle = self._makeHandle(hdr) + fd = os.open(rpmloc, os.O_RDONLY) + self.callbackfilehandles[handle]=fd + self.total_installed += 1 + self.installed_pkg_names.append(hdr['name']) + return fd + else: + self._localprint("No header - huh?") + + elif what == rpm.RPMCALLBACK_INST_CLOSE_FILE: + hdr = None + if h is not None: + rpmloc = h + hdr = readRpmHeader(self.ts, h) + handle = self._makeHandle(hdr) + os.close(self.callbackfilehandles[handle]) + fd = 0 + + # log stuff + #pkgtup = self._dopkgtup(hdr) + self.logString.append(self._logPkgString(hdr)) + + + elif what == rpm.RPMCALLBACK_INST_PROGRESS: + if h is not None: + percent = (self.total_installed*100L)/self.total_actions + if self.output and (sys.stdout.isatty() or self.total_installed == self.total_actions): + fmt = self._makefmt(percent) + msg = fmt % ("Installing") + if msg != self.lastmsg: + sys.stdout.write(msg) + sys.stdout.flush() + self.lastmsg = msg + if self.total_installed == self.total_actions: + sys.stdout.write("\n") + logging.info('\n'.join(self.logString)) + + elif what == rpm.RPMCALLBACK_UNINST_START: + pass + + elif what == rpm.RPMCALLBACK_UNINST_PROGRESS: + pass + + elif what == rpm.RPMCALLBACK_UNINST_STOP: + self.total_removed += 1 + + elif what == rpm.RPMCALLBACK_REPACKAGE_START: + pass + elif what == rpm.RPMCALLBACK_REPACKAGE_STOP: + pass + elif what == rpm.RPMCALLBACK_REPACKAGE_PROGRESS: + pass + +def readRpmHeader(ts, filename): + """ Read an rpm header. """ + fd = os.open(filename, os.O_RDONLY) + h = ts.hdrFromFdno(fd) + os.close(fd) + return h + +def splitFilename(filename): + """ + Pass in a standard style rpm fullname + + Return a name, version, release, epoch, arch, e.g.:: + foo-1.0-1.i386.rpm returns foo, 1.0, 1, i386 + 1:bar-9-123a.ia64.rpm returns bar, 9, 123a, 1, ia64 + """ + + if filename[-4:] == '.rpm': + filename = filename[:-4] + + archIndex = filename.rfind('.') + arch = filename[archIndex+1:] + + relIndex = filename[:archIndex].rfind('-') + rel = filename[relIndex+1:archIndex] + + verIndex = filename[:relIndex].rfind('-') + ver = filename[verIndex+1:relIndex] + + epochIndex = filename.find(':') + if epochIndex == -1: + epoch = '' + else: + epoch = filename[:epochIndex] + + name = filename[epochIndex + 1:verIndex] + return name, ver, rel, epoch, arch + +def getCanonX86Arch(arch): + # + if arch == "i586": + f = open("/proc/cpuinfo", "r") + lines = f.readlines() + f.close() + for line in lines: + if line.startswith("model name") and line.find("Geode(TM)") != -1: + return "geode" + return arch + # only athlon vs i686 isn't handled with uname currently + if arch != "i686": + return arch + + # if we're i686 and AuthenticAMD, then we should be an athlon + f = open("/proc/cpuinfo", "r") + lines = f.readlines() + f.close() + for line in lines: + if line.startswith("vendor") and line.find("AuthenticAMD") != -1: + return "athlon" + # i686 doesn't guarantee cmov, but we depend on it + elif line.startswith("flags") and line.find("cmov") == -1: + return "i586" + + return arch + +def getCanonX86_64Arch(arch): + if arch != "x86_64": + return arch + + vendor = None + f = open("/proc/cpuinfo", "r") + lines = f.readlines() + f.close() + for line in lines: + if line.startswith("vendor_id"): + vendor = line.split(':')[1] + break + if vendor is None: + return arch + + if vendor.find("Authentic AMD") != -1 or vendor.find("AuthenticAMD") != -1: + return "amd64" + if vendor.find("GenuineIntel") != -1: + return "ia32e" + return arch + +def getCanonArch(): + arch = os.uname()[4] + + if (len(arch) == 4 and arch[0] == "i" and arch[2:4] == "86"): + return getCanonX86Arch(arch) + + if arch == "x86_64": + return getCanonX86_64Arch(arch) + + return arch + +# dict mapping arch -> ( multicompat, best personality, biarch personality ) +multilibArches = { "x86_64": ( "athlon", "x86_64", "athlon" ), + "sparc64v": ( "sparc", "sparcv9v", "sparc64v" ), + "sparc64": ( "sparc", "sparcv9", "sparc64" ), + "ppc64": ( "ppc", "ppc", "ppc64" ), + "s390x": ( "s390", "s390x", "s390" ), + } + +arches = { + # ia32 + "athlon": "i686", + "i686": "i586", + "geode": "i586", + "i586": "i486", + "i486": "i386", + "i386": "noarch", + + # amd64 + "x86_64": "athlon", + "amd64": "x86_64", + "ia32e": "x86_64", + + # ppc + "ppc64pseries": "ppc64", + "ppc64iseries": "ppc64", + "ppc64": "ppc", + "ppc": "noarch", + + # s390{,x} + "s390x": "s390", + "s390": "noarch", + + # sparc + "sparc64v": "sparc64", + "sparc64v": "sparcv9v", + "sparc64": "sparcv9", + "sparcv9v": "sparcv9", + "sparcv9": "sparcv8", + "sparcv8": "sparc", + "sparc": "noarch", + + # alpha + "alphaev7": "alphaev68", + "alphaev68": "alphaev67", + "alphaev67": "alphaev6", + "alphaev6": "alphapca56", + "alphapca56": "alphaev56", + "alphaev56": "alphaev5", + "alphaev5": "alphaev45", + "alphaev45": "alphaev4", + "alphaev4": "alpha", + "alpha": "noarch", + + # arm + "armv7nhl": "armv7hl", + "armv7hl": "noarch", + "armv7l": "armv6l", + "armv6l": "armv5tejl", + "armv5tejl": "armv5tel", + "armv5tel": "noarch", + + # super-h + "sh4a": "sh4", + "sh4": "noarch", + "sh3": "noarch", + + #itanium + "ia64": "noarch", + } + +def isMultiLibArch(arch=None): + """returns true if arch is a multilib arch, false if not""" + if arch is None: + arch = canonArch + + if not arches.has_key(arch): # or we could check if it is noarch + return 0 + + if multilibArches.has_key(arch): + return 1 + + if multilibArches.has_key(arches[arch]): + return 1 + + return 0 + +def getBaseArch(): + myarch = getCanonArch() + if not arches.has_key(myarch): + return myarch + + if isMultiLibArch(arch=myarch): + if multilibArches.has_key(myarch): + return myarch + else: + return arches[myarch] + + if arches.has_key(myarch): + basearch = myarch + value = arches[basearch] + while value != 'noarch': + basearch = value + value = arches[basearch] + + return basearch + +def checkRpmIntegrity(bin_rpm, package): + argv = [bin_rpm, "--checksig", "--nogpg", package] + dev_null = os.open("/dev/null", os.O_WRONLY) + try: + ret = subprocess.call(argv, stdout = dev_null, stderr = dev_null) + finally: + os.close(dev_null) + return ret + +def checkSig(ts, package): + """Takes a transaction set and a package, check it's sigs, + return 0 if they are all fine + return 1 if the gpg key can't be found + return 2 if the header is in someway damaged + return 3 if the key is not trusted + return 4 if the pkg is not gpg or pgp signed""" + + value = 0 + currentflags = ts.setVSFlags(0) + fdno = os.open(package, os.O_RDONLY) + try: + hdr = ts.hdrFromFdno(fdno) + except rpm.error, e: + if str(e) == "public key not availaiable": + value = 1 + if str(e) == "public key not available": + value = 1 + if str(e) == "public key not trusted": + value = 3 + if str(e) == "error reading package header": + value = 2 + else: + error, siginfo = getSigInfo(hdr) + if error == 101: + os.close(fdno) + del hdr + value = 4 + else: + del hdr + + try: + os.close(fdno) + except OSError, e: # if we're not opened, don't scream about it + pass + + ts.setVSFlags(currentflags) # put things back like they were before + return value + +def getSigInfo(hdr): + """checks signature from an hdr hand back signature information and/or + an error code""" + + locale.setlocale(locale.LC_ALL, 'C') + string = '%|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|' + siginfo = hdr.sprintf(string) + if siginfo != '(none)': + error = 0 + sigtype, sigdate, sigid = siginfo.split(',') + else: + error = 101 + sigtype = 'MD5' + sigdate = 'None' + sigid = 'None' + + infotuple = (sigtype, sigdate, sigid) + return error, infotuple diff --git a/plugins/backend/yumpkgmgr.py b/plugins/backend/yumpkgmgr.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/plugins/backend/yumpkgmgr.py diff --git a/plugins/backend/zypppkgmgr.py b/plugins/backend/zypppkgmgr.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/plugins/backend/zypppkgmgr.py diff --git a/plugins/hook/_hook.py b/plugins/hook/_hook.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/plugins/hook/_hook.py diff --git a/plugins/imager/fs_plugin.py b/plugins/imager/fs_plugin.py new file mode 100644 index 0000000..f90b94a --- /dev/null +++ b/plugins/imager/fs_plugin.py @@ -0,0 +1,47 @@ +#!/usr/bin/python + +from micng.pluginbase.imager_plugin import ImagerPlugin +from micng.imager.fs import * +import micng.configmgr as configmgr +try: + import argparse +except: + import micng.utils.argparse + +class FsPlugin(ImagerPlugin): + """hello livecd + """ + @classmethod + def do_options(self, parser): + parser.add_argument('ksfile', nargs='?', help='kickstart file') + parser.add_argument('--release', help='fs options test') + + @classmethod + def do_create(self, args): + if args.release: + print "fs option release: ", args.release + if not args.ksfile: + print "please specify a kickstart file" + return +# print "ksfile", args.ksfile + self.configmgr = configmgr.getConfigMgr() + self.configmgr.setProperty('ksfile', args.ksfile) +# print "ksfile", self.configmgr.getProperty('ksfile') + self.ks = self.configmgr.getProperty('kickstart') + self.name = self.configmgr.getProperty('name') + fs = FsImageCreator(self.ks, self.name) + try: + fs.outdir = self.configmgr.getProperty('outdir') + fs.mount(None, self.configmgr.cache) + fs.install() + fs.configure(self.configmgr.repometadata) + fs.unmount() + fs.package(self.configmgr.outdir) + print "Finished" + except Exception, e: + print "failed to create image: %s" % e + finally: + fs.cleanup() + + +mic_plugin = ["fs", FsPlugin] diff --git a/plugins/imager/livecd_plugin.py b/plugins/imager/livecd_plugin.py new file mode 100644 index 0000000..9edddb8 --- /dev/null +++ b/plugins/imager/livecd_plugin.py @@ -0,0 +1,71 @@ +#!/usr/bin/python +from micng.pluginbase.imager_plugin import ImagerPlugin +import micng.imager as imager +import micng.configmgr as cfgmgr +import micng.utils as utils +import micng.utils.cmdln as cmdln +import os, time + +class LivecdPlugin(ImagerPlugin): + @classmethod + def do_options(self, parser): + parser.add_argument("-vid", "--volumeid", type="string", default=None, help="Specify volume id") + parser.add_argument("ksfile", help="kickstart file") + + @classmethod + def do_create(self, args): + if not args.ksfile: + print "please specify kickstart file" + return + + self.configmgr = cfgmgr.getConfigMgr() + self.configmgr.setProperty('ksfile', args.ksfile) + + fs_label = utils.kickstart.build_name( + args.ksfile, + "%s-" % self.configmgr.name, + maxlen = 32, + suffix = "%s-%s" %(os.uname()[4], time.strftime("%Y%m%d%H%M"))) + + creator = imager.livecd.LivecdImageCreator( + self.configmgr.kickstart, self.configmgr.name, fs_label) + + creator.skip_compression = False + creator.skip_minimize = False + + creator.tmpdir = self.configmgr.tmpdir + creator._alt_initrd_name = None + creator._recording_pkgs = None + creator._include_src = False + creator._local_pkgs_path = None + creator._genchecksum = False + creator.distro_name = self.configmgr.name + creator.image_format = "livecd" + + + utils.kickstart.resolve_groups(creator, self.configmgr.repometadata, False) + + imgname = creator.name + + try: + creator.check_depend_tools() + creator.mount(None, self.configmgr.cache) + creator.install() + + creator.configure(self.configmgr.repometadata) + creator.unmount() + creator.package(self.configmgr.outdir) + outimage = creator.outimage + + creator.package_output("livecd", self.configmgr.outdir, "none") + creator.print_outimage_info() + outimage = creator.outimage + + except Exception, e: + raise Exception("failed to create image : %s" % e) + finally: + creator.cleanup() + + print "Finished." + +mic_plugin = ["livecd", LivecdPlugin] diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..3e1a699 --- /dev/null +++ b/setup.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python + +import os, sys +from distutils.core import setup +#try: +# import setuptools +# # enable "setup.py develop", optional +#except ImportError: +# pass + +MOD_NAME = 'micng' + +version_path = 'VERSION' +if not os.path.isfile(version_path): + print 'No VERSION file in topdir, abort' + sys.exit(1) + +try: + # first line should be the version number + version = open(version_path).readline().strip() + if not version: + print 'VERSION file is invalid, abort' + sys.exit(1) + + ver_file = open('%s/__version__.py' % MOD_NAME, 'w') + ver_file.write("VERSION = \"%s\"\n" % version) + ver_file.close() +except IOError: + print 'WARNING: Cannot write version number file' + pass + +PACKAGES = [MOD_NAME, + MOD_NAME + '/utils', + MOD_NAME + '/utils/kscommands', + MOD_NAME + '/utils/pkgmanagers', + MOD_NAME + '/imager', + MOD_NAME + '/pluginbase', + ] +setup(name=MOD_NAME, + version = version, + description = 'New MeeGo Image Creator', + author='Jian-feng Ding', + author_email='jian-feng.ding@intel.com', + url='https://meego.gitorious.org/meego-developer-tools/image-creator', + scripts=[ + 'tools/micng', + 'tools/mic-image-create', + ], + packages = PACKAGES, +) + diff --git a/tests/meego-ivi-ia32-1.2.80.0.20110502.2.ks b/tests/meego-ivi-ia32-1.2.80.0.20110502.2.ks new file mode 100644 index 0000000..722a62e --- /dev/null +++ b/tests/meego-ivi-ia32-1.2.80.0.20110502.2.ks @@ -0,0 +1,69 @@ +# -*-mic2-options-*- -f livecd -*-mic2-options-*- + +# +# Do not Edit! Generated by: +# kickstarter.py +# + +lang en_US.UTF-8 +keyboard us +timezone --utc America/Los_Angeles +part / --size 2200 --ondisk sda --fstype=ext3 +rootpw meego +xconfig --startxonboot +bootloader --timeout=0 --append="quiet" +desktop --autologinuser=meego --defaultdesktop=X-IVI --session="/usr/bin/startivi" +user --name meego --groups audio,video --password meego + +repo --name=oss --baseurl=http://linux-ftp.jf.intel.com/pub/mirrors/MeeGo/builds/1.2.80/1.2.80.0.20110503.2/repos/oss/ia32/packages/ --save --debuginfo --source --gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-meego +repo --name=non-oss --baseurl=http://linux-ftp.jf.intel.com/pub/mirrors/MeeGo/builds/1.2.80/1.2.80.0.20110503.2/repos/non-oss/ia32/packages/ --save --debuginfo --source --gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-meego + +%packages --ignoremissing + +@MeeGo Core +@MeeGo Compliance +@IVI Desktop +@MeeGo X Window System +@MeeGo IVI Applications +@MeeGo Base Development +@X for IVI +@Chinese Support +@Japanese Support +@Korean Support + +kernel-adaptation-intel-automotive + +meego-ux-daemon +meegotouch-compositor +-dsme +-ngfd +-mce +-qmsystem +%end + +%post +# save a little bit of space at least... +rm -f /boot/initrd* + +# Prelink can reduce boot time +if [ -x /usr/sbin/prelink ]; then + /usr/sbin/prelink -aRqm +fi + +rm -f /var/lib/rpm/__db* +rpm --rebuilddb + +# Get rid of /etc/xdg/autostart/applauncherd.desktop line that causes IVI duicontrolpanel issues +# until fixed upstream. BMC#13570 +sed -ri '/OnlyShowIn=X-MEEGO-HS;/d' /etc/xdg/autostart/applauncherd.desktop + + +%end + +%post --nochroot +if [ -n "$IMG_NAME" ]; then + echo "BUILD: $IMG_NAME" >> $INSTALL_ROOT/etc/meego-release +fi + + +%end diff --git a/tests/micng.conf b/tests/micng.conf new file mode 100644 index 0000000..69e77bf --- /dev/null +++ b/tests/micng.conf @@ -0,0 +1,4 @@ +[main] +cache=./cache +outdir=. +tmpdir=/var/tmp diff --git a/tools/mic-image-create b/tools/mic-image-create new file mode 100755 index 0000000..49bf4b4 --- /dev/null +++ b/tools/mic-image-create @@ -0,0 +1,67 @@ +#!/usr/bin/python -t + +import sys, os, os.path, string +import micng.utils.argparse as argparse +import micng.configmgr as configmgr +import micng.pluginmgr as pluginmgr + +class Creator(object): + name = 'create' + + def __init__(self): + self.configmgr = configmgr.getConfigMgr() + self.pluginmgr = pluginmgr.PluginMgr() + self.pluginmgr.loadPlugins() + self.plugincmds = self.pluginmgr.getPluginByCateg('imager') + + def main(self, argv=None): +# import pdb +# pdb.set_trace() + if os.getuid() != 0: + print "Please run the program as root" + return 0 + prog = os.path.basename(sys.argv[0]) + parser = argparse.ArgumentParser( + usage='%s [COMMONOPT] <subcommand> [SUBOPT] ARGS' % prog, + ) + parser.add_argument('-k', '--cache', dest='cache', help='cache diretory') + parser.add_argument('-o', '--outdir', dest='outdir', help='output diretory') + parser.add_argument('-t', '--tmpdir', dest='tmpdir', help='temp diretory') + + + subparsers = parser.add_subparsers(title='subcommands') + for subcmd, klass in self.plugincmds: + subcmd_help = 'create ' + subcmd + ' image' + subcmd_parser = subparsers.add_parser( + subcmd, + usage=prog+' [COMMONOPT] '+subcmd+' [SUBOPT] ARGS', + help=subcmd_help + ) + if hasattr(klass, 'do_options'): + add_subopt = getattr(klass, 'do_options') + add_subopt(subcmd_parser) + if hasattr(klass, 'do_create'): + do_create = getattr(klass, 'do_create') + subcmd_parser.set_defaults(func=do_create) + + if not argv: + parser.print_help() + return True + + args = parser.parse_args(argv) + if args.outdir: + self.configmgr.setProperty('outdir', args.outdir) + if args.tmpdir: + self.configmgr.setProperty('tmpdir', args.tmpdir) + if args.cache: + self.configmgr.setProperty('cache', args.cache) +# print 'outdir', self.configmgr.getProperty('outdir') +# print 'tmpdir', self.configmgr.getProperty('tmpdir') +# print 'cache', self.configmgr.getProperty('cache') + args.func(args) + return True + +if __name__ == "__main__": + create = Creator() + ret = create.main(sys.argv[1:]) + sys.exit(ret) diff --git a/tools/micng b/tools/micng new file mode 100755 index 0000000..ea98dbd --- /dev/null +++ b/tools/micng @@ -0,0 +1,35 @@ +#!/usr/bin/python -t + +import sys, os +import subprocess +import micng.utils.cmdln as cmdln + +class Mic(cmdln.Cmdln): + def run_subcmd(self, subcmd, opts, args): + creator = "mic-image-create" + tools = { + "cr":creator, "create":creator, + } + + argv = [tools[subcmd]] + argv.extend(args) + subprocess.call(argv) + + @cmdln.alias("cr") + def do_create(self, argv): + """${cmd_name}: create image + + ${cmd_usage} + ${cmd_option_list} + """ + self.run_subcmd("create", None, argv[1:]) + + @cmdln.alias("cv") + def do_convert(self, argv): + """${cmd_name}: convert an image format to another one + """ + +if __name__ == "__main__": + mic = Mic() + ret = mic.main() + sys.exit(ret) diff --git a/tools/micng.ref b/tools/micng.ref new file mode 100755 index 0000000..5d2ad7b --- /dev/null +++ b/tools/micng.ref @@ -0,0 +1,120 @@ +#!/usr/bin/python + +# Copyright (C) 2010 Intel Inc. All rights reserved. +# This program is free software; it may be used, copied, modified +# and distributed under the terms of the GNU General Public Licence, +# either version 2, or version 3 (at your option). + +import sys +import mic3.cmdln as cmdln +import optparse as _optparse + +try: + import mic3.__version__ + VERSION = mic3.__version__.version +except: + VERSION = 'unknown' + +class MIC3(cmdln.Cmdln): + """Usage: mic [GLOBALOPTS] SUBCOMMAND [OPTS] [ARGS...] + or: mic help SUBCOMMAND + + MeeGo Image Tool. + Type 'mic help <subcommand>' for help on a specific subcommand. + + ${command_list} + ${help_list} + global ${option_list} + For additional information, see + * http://www.meego.com/ + """ + + name = 'mic' + version = VERSION + + @cmdln.option("-v", "--verbose", action="store_true", + help="print extra information") + + def get_cmd_help(self, cmdname): + doc = self._get_cmd_handler(cmdname).__doc__ + doc = self._help_reindent(doc) + doc = self._help_preprocess(doc, cmdname) + doc = doc.rstrip() + '\n' # trim down trailing space + return self._str(doc) + + """ create image """ + @cmdln.alias('cr') + @cmdln.option("-c", "--config", type="string", dest="config", + help="Path to kickstart config file") + + @cmdln.option("-f", "--format", type="string", dest="format", + help="Image format, you can specify as fs, livecd, liveusb, loop, raw, nand, mrstnand, ubi, jffs2, vdi or vmdk") + + @cmdln.option("-t", "--tmpdir", type="string", + dest="tmpdir", + help="Temporary directory to use (default: /var/tmp)") + @cmdln.option("-k", "--cache", type="string", + dest="cachedir", default=None, + help="Cache directory to use (default: private cache)") + @cmdln.option("-o", "--outdir", type="string", + dest="outdir", default=None, + help="Output directory to use (default: current work dir)") + @cmdln.option("", "--release", type="string", + dest="release", default=None, + help="Generate a MeeGo release with all necessary files for publishing.") + @cmdln.option("", "--genchecksum", action="store_true", + dest="genchecksum", default=False, + help="Generate checksum for image file if this option is provided") + @cmdln.option("-P", "--prefix", type="string", + dest="prefix", default=None, + help="Image name prefix (default: meego)") + @cmdln.option("-S", "--suffix", type="string", + dest="suffix", default=None, + help="Image name suffix (default: date stamp)") + @cmdln.option("-a", "--arch", type="string", + dest="arch", default=None, + help="Specify target arch of image, for example: arm") + @cmdln.option("", "--use-comps", action="store_true", + dest="use_comps", default=False, + help="Use comps instead of patterns if comps exists") + @cmdln.option("", "--record-pkgs", type="string", + dest="record_pkgs", default=None, + help="Record the installed packages, valid values: name, content") + @cmdln.option("", "--fstype", type="string", + dest="fstype", default="vfat", + help="File system type for live USB file image, ext3 or vfat, the default is vfat.") + @cmdln.option("", "--overlay-size-mb", type="int", default=64, + help="Overlay size in MB as unit, it means how size changes you can save in your live USB disk.") + @cmdln.option('-d', '--debug', action='store_true', + help='Output debugging information') + @cmdln.option('-v', '--verbose', dest='verbose', action='store_true', + help='Output verbose information') + @cmdln.option('', '--logfile', type="string", dest="file", + help='Save debug information to FILE') + @cmdln.option("", "--save-kernel", action="store_true", + dest="save_kernel", default=False, + help="Save kernel image file into outdir") + @cmdln.option("", "--pkgmgr", type="string", + help="Specify the package manager, the available package managers have zypper and yum currently.") + @cmdln.option("", "--volumeid", type="string", default=None, + help="Specify volume id, valid only for livecd") + def do_create(self, subcmd, opts, *args): + """${cmd_name}: Create an image + + This command is used to create various images, including + live CD, live USB, loop, raw/KVM/QEMU, VMWare/vmdk, + VirtualBox/vdi, Moorestown/mrstnand, jffs2 and ubi. + + Examples: + mic create # create an image according to the default config + mic create --format=liveusb # create a live USB image + + ${cmd_usage} + ${cmd_option_list} + """ + + print subcmd, opts, args + +if __name__ == "__main__": + mic = MIC3() + sys.exit(mic.main(sys.argv)) |