summaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorMichael Schroeder <mls@suse.de>2011-03-03 16:46:14 +0100
committerMichael Schroeder <mls@suse.de>2011-03-03 16:46:14 +0100
commitca0bde44286e2363a1984b9bed957b6f5e846d79 (patch)
treec52ed63df7873b2d5091873e8c891df91fb95aa7 /examples
parentf7dc6ef9777348a23f0f8d2795a508467e4f978d (diff)
downloadlibsolv-ca0bde44286e2363a1984b9bed957b6f5e846d79.tar.gz
libsolv-ca0bde44286e2363a1984b9bed957b6f5e846d79.tar.bz2
libsolv-ca0bde44286e2363a1984b9bed957b6f5e846d79.zip
- rename pysolv.pt to pysolv, commit latest versions
Diffstat (limited to 'examples')
-rwxr-xr-xexamples/pysolv1118
-rw-r--r--examples/pysolv.py976
-rw-r--r--examples/solv.c235
-rw-r--r--examples/solv.i94
4 files changed, 1306 insertions, 1117 deletions
diff --git a/examples/pysolv b/examples/pysolv
new file mode 100755
index 0000000..1ebf931
--- /dev/null
+++ b/examples/pysolv
@@ -0,0 +1,1118 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2011, Novell Inc.
+#
+# This program is licensed under the BSD license, read LICENSE.BSD
+# for further information
+#
+
+# pysolv a little software installer demoing the sat solver library/bindings
+
+# things it does:
+# - understands globs for package names / dependencies
+# - understands .arch suffix
+# - repository data caching
+# - on demand loading of secondary repository data
+# - checksum verification
+# - deltarpm support
+# - installation of commandline packages
+#
+# things not yet ported:
+# - gpg verification
+# - file conflicts
+# - fastestmirror implementation
+#
+# things available in the library but missing from pysolv:
+# - vendor policy loading
+# - soft locks file handling
+# - multi version handling
+
+import sys
+import os
+import glob
+import solv
+import re
+import tempfile
+import time
+import subprocess
+import fnmatch
+import rpm
+from stat import *
+from solv import Pool, Repo, Dataiterator, Job, Solver, Transaction
+from iniparse import INIConfig
+from optparse import OptionParser
+
+#import gc
+#gc.set_debug(gc.DEBUG_LEAK)
+
+def calc_cookie_file(filename):
+ chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256)
+ chksum.add("1.1")
+ chksum.add_stat(filename)
+ return chksum.raw()
+
+def calc_cookie_fp(fp):
+ chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256)
+ chksum.add_fp(fp)
+ return chksum.raw()
+
+class generic_repo(dict):
+ def __init__(self, attribs):
+ for k in attribs:
+ self[k] = attribs[k]
+
+ def cachepath(self, ext = None):
+ path = re.sub(r'^\.', '_', self['alias'])
+ if ext:
+ path += "_" + ext + ".solvx"
+ else:
+ path += ".solv"
+ return "/var/cache/solv/" + re.sub(r'[/]', '_', path)
+
+ def load(self, pool):
+ self['handle'] = pool.add_repo(repo['alias'])
+ self['handle'].appdata = repo
+ self['handle'].priority = 99 - repo['priority']
+ if self['autorefresh']:
+ dorefresh = True
+ if dorefresh:
+ try:
+ st = os.stat(self.cachepath())
+ if time.time() - st[ST_MTIME] < self['metadata_expire']:
+ dorefresh = False
+ except OSError, e:
+ pass
+ self['cookie'] = ''
+ if not dorefresh and self.usecachedrepo(None):
+ print "repo: '%s': cached" % self['alias']
+ return True
+ return self.load_if_changed()
+
+ def load_ext(repodata):
+ return False
+
+ def setfromurls(self, urls):
+ if not urls:
+ return
+ url = urls[0]
+ print "[using mirror %s]" % re.sub(r'^(.*?/...*?)/.*$', r'\1', url)
+ self['baseurl'] = url
+
+ def setfrommetalink(self, metalink):
+ nf = self.download(metalink, False, None)
+ if not nf:
+ return None
+ f = os.fdopen(os.dup(solv.xfileno(nf)), 'r')
+ solv.xfclose(nf)
+ urls = []
+ chksum = None
+ for l in f.readlines():
+ l = l.strip()
+ m = re.match(r'^<hash type="sha256">([0-9a-fA-F]{64})</hash>', l)
+ if m:
+ chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256, m.group(1))
+ m = re.match(r'^<url.*>(https?://.+)repodata/repomd.xml</url>', l)
+ if m:
+ urls.append(m.group(1))
+ if not urls:
+ chksum = None # in case the metalink is about a different file
+ f.close()
+ self.setfromurls(urls)
+ return chksum
+
+ def setfrommirrorlist(self, mirrorlist):
+ nf = self.download(mirrorlist, False, None)
+ if not nf:
+ return
+ f = os.fdopen(os.dup(solv.xfileno(nf)), 'r')
+ solv.xfclose(nf)
+ urls = []
+ for l in f.readline():
+ l = l.strip()
+ if l[0:6] == 'http://' or l[0:7] == 'https://':
+ urls.append(l)
+ self.setfromurls(urls)
+ f.close()
+
+ def download(self, file, uncompress, chksum, markincomplete=False):
+ url = None
+ if 'baseurl' not in self:
+ if 'metalink' in self:
+ if file != self['metalink']:
+ metalinkchksum = self.setfrommetalink(self['metalink'])
+ if file == 'repodata/repomd.xml' and metalinkchksum and not chksum:
+ chksum = metalinkchksum
+ else:
+ url = file
+ elif 'mirrorlist' in self:
+ if file != self['mirrorlist']:
+ self.setfrommirrorlist(self['mirrorlist'])
+ else:
+ url = file
+ if not url:
+ if 'baseurl' not in self:
+ print "%s: no baseurl" % self['alias']
+ return None
+ url = re.sub(r'/$', '', self['baseurl']) + '/' + file
+ f = tempfile.TemporaryFile()
+ st = subprocess.call(['curl', '-f', '-s', '-L', url], stdout=f.fileno())
+ if os.lseek(f.fileno(), 0, os.SEEK_CUR) == 0 and (st == 0 or not chksum):
+ return None
+ os.lseek(f.fileno(), 0, os.SEEK_SET)
+ if st:
+ print "%s: download error %d" % (file, st)
+ if markincomplete:
+ self['incomplete'] = True
+ return None
+ if chksum:
+ fchksum = solv.Chksum(chksum.type)
+ if not fchksum:
+ print "%s: unknown checksum type" % file
+ if markincomplete:
+ self['incomplete'] = True
+ return None
+ fchksum.add_fd(f.fileno())
+ if not fchksum.matches(chksum):
+ print "%s: checksum mismatch" % file
+ if markincomplete:
+ self['incomplete'] = True
+ return None
+ if uncompress:
+ return solv.xfopen_fd(file, os.dup(f.fileno()))
+ return solv.xfopen_fd("", os.dup(f.fileno()))
+
+ def usecachedrepo(self, ext, mark=False):
+ if not ext:
+ cookie = self['cookie']
+ else:
+ cookie = self['extcookie']
+ handle = self['handle']
+ try:
+ repopath = self.cachepath(ext)
+ f = open(repopath, 'r')
+ f.seek(-32, os.SEEK_END)
+ fcookie = f.read(32)
+ if len(fcookie) != 32:
+ return False
+ if cookie and fcookie != cookie:
+ return False
+ if self['alias'] != '@System' and not ext:
+ f.seek(-32 * 2, os.SEEK_END)
+ fextcookie = f.read(32)
+ if len(fextcookie) != 32:
+ return False
+ f.seek(0)
+ flags = 0
+ if ext:
+ flags = Repo.REPO_USE_LOADING|Repo.REPO_EXTEND_SOLVABLES
+ if ext != 'DL':
+ flags |= Repo.REPO_LOCALPOOL
+ if not self['handle'].add_solv(f, flags):
+ return False
+ if self['alias'] != '@System' and not ext:
+ self['cookie'] = fcookie
+ self['extcookie'] = fextcookie
+ if mark:
+ # no futimes in python?
+ try:
+ os.utime(repopath, None)
+ except Exception, e:
+ pass
+ except IOError, e:
+ return False
+ return True
+
+ def genextcookie(self, f):
+ chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256)
+ chksum.add(self['cookie'])
+ if f:
+ stat = os.fstat(f.fileno())
+ chksum.add(str(stat[ST_DEV]))
+ chksum.add(str(stat[ST_INO]))
+ chksum.add(str(stat[ST_SIZE]))
+ chksum.add(str(stat[ST_MTIME]))
+ extcookie = chksum.raw()
+ # compatibility to c code
+ if ord(extcookie[0]) == 0:
+ extcookie[0] = chr(1)
+ self['extcookie'] = extcookie
+
+ def writecachedrepo(self, ext, info=None):
+ try:
+ if not os.path.isdir("/var/cache/solv"):
+ os.mkdir("/var/cache/solv", 0755)
+ (fd, tmpname) = tempfile.mkstemp(prefix='.newsolv-', dir='/var/cache/solv')
+ os.fchmod(fd, 0444)
+ f = os.fdopen(fd, 'w+')
+ if not info:
+ self['handle'].write(f)
+ elif ext:
+ info.write(f)
+ else: # rewrite_repos case
+ self['handle'].write_first_repodata(f)
+ if self['alias'] != '@System' and not ext:
+ if 'extcookie' not in self:
+ self.genextcookie(f)
+ f.write(self['extcookie'])
+ if not ext:
+ f.write(self['cookie'])
+ else:
+ f.write(self['extcookie'])
+ f.close()
+ if self['handle'].iscontiguous():
+ # switch to saved repo to activate paging and save memory
+ nf = solv.xfopen(tmpname)
+ if not ext:
+ # main repo
+ self['handle'].empty()
+ if not self['handle'].add_solv(nf, Repo.SOLV_ADD_NO_STUBS):
+ sys.exit("internal error, cannot reload solv file")
+ else:
+ # extension repodata
+ # need to extend to repo boundaries, as this is how
+ # info.write() has written the data
+ info.extend_to_repo()
+ # LOCALPOOL does not help as pool already contains all ids
+ info.read_solv_flags(nf, Repo.REPO_EXTEND_SOLVABLES)
+ solv.xfclose(nf)
+ os.rename(tmpname, self.cachepath(ext))
+ except IOError, e:
+ if tmpname:
+ os.unlink(tmpname)
+
+ def updateaddedprovides(self, addedprovides):
+ if 'incomplete' in self:
+ return
+ if 'handle' not in self:
+ return
+ if not self['handle'].nsolvables:
+ return
+ # make sure there's just one real repodata with extensions
+ repodata = self['handle'].first_repodata()
+ if not repodata:
+ return
+ oldaddedprovides = repodata.lookup_idarray(solv.SOLVID_META, solv.REPOSITORY_ADDEDFILEPROVIDES)
+ if not set(addedprovides) <= set(oldaddedprovides):
+ for id in addedprovides:
+ repodata.add_idarray(solv.SOLVID_META, solv.REPOSITORY_ADDEDFILEPROVIDES, id)
+ repodata.internalize()
+ self.writecachedrepo(None, repodata)
+
+class repomd_repo(generic_repo):
+ def load_if_changed(self):
+ print "rpmmd repo '%s':" % self['alias'],
+ sys.stdout.flush()
+ f = self.download("repodata/repomd.xml", False, None, None)
+ if not f:
+ print "no repomd.xml file, skipped"
+ self['handle'].free(True)
+ del self['handle']
+ return False
+ self['cookie'] = calc_cookie_fp(f)
+ if self.usecachedrepo(None, True):
+ print "cached"
+ solv.xfclose(f)
+ return True
+ self['handle'].add_repomdxml(f, 0)
+ solv.xfclose(f)
+ print "fetching"
+ (filename, filechksum) = self.find('primary')
+ if filename:
+ f = self.download(filename, True, filechksum, True)
+ if f:
+ self['handle'].add_rpmmd(f, None, 0)
+ solv.xfclose(f)
+ if 'incomplete' in self:
+ return False # hopeless, need good primary
+ (filename, filechksum) = self.find('updateinfo')
+ if filename:
+ f = self.download(filename, True, filechksum, True)
+ if f:
+ self['handle'].add_updateinfoxml(f, 0)
+ solv.xfclose(f)
+ self.add_exts()
+ if 'incomplete' not in self:
+ self.writecachedrepo(None)
+ # must be called after writing the repo
+ self['handle'].create_stubs()
+ return True
+
+ def find(self, what):
+ di = self['handle'].dataiterator_new(solv.SOLVID_META, solv.REPOSITORY_REPOMD_TYPE, what, Dataiterator.SEARCH_STRING)
+ di.prepend_keyname(solv.REPOSITORY_REPOMD)
+ for d in di:
+ d.setpos_parent()
+ filename = d.pool.lookup_str(solv.SOLVID_POS, solv.REPOSITORY_REPOMD_LOCATION)
+ chksum = d.pool.lookup_checksum(solv.SOLVID_POS, solv.REPOSITORY_REPOMD_CHECKSUM)
+ if filename and not chksum:
+ print "no %s file checksum!" % filename
+ filename = None
+ chksum = None
+ if filename:
+ return (filename, chksum)
+ return (None, None)
+
+ def add_ext(self, repodata, what, ext):
+ filename, chksum = self.find(what)
+ if not filename and what == 'deltainfo':
+ filename, chksum = self.find('prestodelta')
+ if not filename:
+ return
+ handle = repodata.new_handle()
+ repodata.set_poolstr(handle, solv.REPOSITORY_REPOMD_TYPE, what)
+ repodata.set_str(handle, solv.REPOSITORY_REPOMD_LOCATION, filename)
+ repodata.set_bin_checksum(handle, solv.REPOSITORY_REPOMD_CHECKSUM, chksum)
+ if what == 'deltainfo':
+ repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOSITORY_DELTAINFO)
+ repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_FLEXARRAY)
+ elif what == 'filelists':
+ repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_FILELIST)
+ repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRSTRARRAY)
+ repodata.add_flexarray(solv.SOLVID_META, solv.REPOSITORY_EXTERNAL, handle)
+
+ def add_exts(self):
+ repodata = self['handle'].add_repodata(0)
+ self.add_ext(repodata, 'deltainfo', 'DL')
+ self.add_ext(repodata, 'filelists', 'FL')
+ repodata.internalize()
+
+ def load_ext(self, repodata):
+ repomdtype = repodata.lookup_str(solv.SOLVID_META, solv.REPOSITORY_REPOMD_TYPE)
+ if repomdtype == 'filelists':
+ ext = 'FL'
+ elif repomdtype == 'deltainfo':
+ ext = 'DL'
+ else:
+ return False
+ sys.stdout.write("[%s:%s" % (self['alias'], ext))
+ if self.usecachedrepo(ext):
+ sys.stdout.write(" cached]\n")
+ sys.stdout.flush()
+ return True
+ sys.stdout.write(" fetching]\n")
+ sys.stdout.flush()
+ filename = repodata.lookup_str(solv.SOLVID_META, solv.REPOSITORY_REPOMD_LOCATION)
+ filechksum = repodata.lookup_checksum(solv.SOLVID_META, solv.REPOSITORY_REPOMD_CHECKSUM)
+ f = self.download(filename, True, filechksum)
+ if not f:
+ return False
+ if ext == 'FL':
+ self['handle'].add_rpmmd(f, 'FL', Repo.REPO_USE_LOADING|Repo.REPO_EXTEND_SOLVABLES)
+ elif ext == 'DL':
+ self['handle'].add_deltainfoxml(f, Repo.REPO_USE_LOADING)
+ solv.xfclose(f)
+ self.writecachedrepo(ext, repodata)
+ return True
+
+class susetags_repo(generic_repo):
+ def load_if_changed(self):
+ print "susetags repo '%s':" % self['alias'],
+ sys.stdout.flush()
+ f = self.download("content", False, None, None)
+ if not f:
+ print "no content file, skipped"
+ self['handle'].free(True)
+ del self['handle']
+ return False
+ self['cookie'] = calc_cookie_fp(f)
+ if self.usecachedrepo(None, True):
+ print "cached"
+ solv.xfclose(f)
+ return True
+ self['handle'].add_content(f, 0)
+ solv.xfclose(f)
+ print "fetching"
+ defvendorid = self['handle'].lookup_id(solv.SOLVID_META, solv.SUSETAGS_DEFAULTVENDOR)
+ descrdir = self['handle'].lookup_str(solv.SOLVID_META, solv.SUSETAGS_DESCRDIR)
+ if not descrdir:
+ descrdir = "suse/setup/descr"
+ (filename, filechksum) = self.find('packages.gz')
+ if not filename:
+ (filename, filechksum) = self.find('packages')
+ if filename:
+ f = self.download(descrdir + '/' + filename, True, filechksum, True)
+ if f:
+ self['handle'].add_susetags(f, defvendorid, None, Repo.REPO_NO_INTERNALIZE|Repo.SUSETAGS_RECORD_SHARES)
+ solv.xfclose(f)
+ (filename, filechksum) = self.find('packages.en.gz')
+ if not filename:
+ (filename, filechksum) = self.find('packages.en')
+ if filename:
+ f = self.download(descrdir + '/' + filename, True, filechksum, True)
+ if f:
+ self['handle'].add_susetags(f, defvendorid, None, Repo.REPO_NO_INTERNALIZE|Repo.REPO_REUSE_REPODATA|Repo.REPO_EXTEND_SOLVABLES)
+ solv.xfclose(f)
+ self['handle'].internalize()
+ self.add_exts()
+ if 'incomplete' not in self:
+ self.writecachedrepo(None)
+ # must be called after writing the repo
+ self['handle'].create_stubs()
+ return True
+
+ def find(self, what):
+ di = self['handle'].dataiterator_new(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME, what, Dataiterator.SEARCH_STRING)
+ di.prepend_keyname(solv.SUSETAGS_FILE)
+ for d in di:
+ d.setpos_parent()
+ chksum = d.pool.lookup_checksum(solv.SOLVID_POS, solv.SUSETAGS_FILE_CHECKSUM)
+ return (what, chksum)
+ return (None, None)
+
+ def add_ext(self, repodata, what, ext):
+ (filename, chksum) = self.find(what)
+ if not filename:
+ return
+ handle = repodata.new_handle()
+ repodata.set_str(handle, solv.SUSETAGS_FILE_NAME, filename)
+ if chksum:
+ repodata.set_bin_checksum(handle, solv.SUSETAGS_FILE_CHECKSUM, chksum)
+ if ext == 'DU':
+ repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_DISKUSAGE)
+ repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRNUMNUMARRAY)
+ elif ext == 'FL':
+ repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_FILELIST)
+ repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRSTRARRAY)
+ else:
+ for langtag, langtagtype in [
+ (solv.SOLVABLE_SUMMARY, solv.REPOKEY_TYPE_STR),
+ (solv.SOLVABLE_DESCRIPTION, solv.REPOKEY_TYPE_STR),
+ (solv.SOLVABLE_EULA, solv.REPOKEY_TYPE_STR),
+ (solv.SOLVABLE_MESSAGEINS, solv.REPOKEY_TYPE_STR),
+ (solv.SOLVABLE_MESSAGEDEL, solv.REPOKEY_TYPE_STR),
+ (solv.SOLVABLE_CATEGORY, solv.REPOKEY_TYPE_ID)
+ ]:
+ repodata.add_idarray(handle, solv.REPOSITORY_KEYS, self['handle'].pool.id2langid(langtag, ext, 1))
+ repodata.add_idarray(handle, solv.REPOSITORY_KEYS, langtagtype)
+ repodata.add_flexarray(solv.SOLVID_META, solv.REPOSITORY_EXTERNAL, handle)
+
+ def add_exts(self):
+ repodata = self['handle'].add_repodata(0)
+ di = self['handle'].dataiterator_new(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME, None, 0)
+ di.prepend_keyname(solv.SUSETAGS_FILE)
+ for d in di:
+ filename = d.match_str()
+ if not filename:
+ continue
+ if filename[0:9] != "packages.":
+ continue
+ if len(filename) == 11 and filename != "packages.gz":
+ ext = filename[9:11]
+ elif filename[11:12] == ".":
+ ext = filename[9:11]
+ else:
+ continue
+ if ext == "en":
+ continue
+ self.add_ext(repodata, filename, ext)
+ repodata.internalize()
+
+ def load_ext(self, repodata):
+ filename = repodata.lookup_str(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME)
+ ext = filename[9:11]
+ sys.stdout.write("[%s:%s" % (self['alias'], ext))
+ if self.usecachedrepo(ext):
+ sys.stdout.write(" cached]\n")
+ sys.stdout.flush()
+ return True
+ sys.stdout.write(" fetching]\n")
+ sys.stdout.flush()
+ defvendorid = self['handle'].lookup_id(solv.SOLVID_META, solv.SUSETAGS_DEFAULTVENDOR)
+ descrdir = self['handle'].lookup_str(solv.SOLVID_META, solv.SUSETAGS_DESCRDIR)
+ if not descrdir:
+ descrdir = "suse/setup/descr"
+ filechksum = repodata.lookup_checksum(solv.SOLVID_META, solv.SUSETAGS_FILE_CHECKSUM)
+ f = self.download(descrdir + '/' + filename, True, filechksum)
+ if not f:
+ return False
+ self['handle'].add_susetags(f, defvendorid, None, Repo.REPO_USE_LOADING|Repo.REPO_EXTEND_SOLVABLES)
+ solv.xfclose(f)
+ self.writecachedrepo(ext, repodata)
+ return True
+
+class unknown_repo(generic_repo):
+ def load(self, pool):
+ print "unsupported repo '%s': skipped" % self['alias']
+ return False
+
+class system_repo(generic_repo):
+ def load(self, pool):
+ self['handle'] = pool.add_repo(self['alias'])
+ self['handle'].appdata = self
+ pool.installed = self['handle']
+ print "rpm database:",
+ self['cookie'] = calc_cookie_file("/var/lib/rpm/Packages")
+ if self.usecachedrepo(None):
+ print "cached"
+ return True
+ print "reading"
+ self['handle'].add_products("/etc/products.d", Repo.REPO_NO_INTERNALIZE)
+ self['handle'].add_rpmdb(None)
+ self.writecachedrepo(None)
+ return True
+
+class cmdline_repo(generic_repo):
+ def load(self, pool):
+ self['handle'] = pool.add_repo(self['alias'])
+ self['handle'].appdata = self
+ return True
+
+def validarch(pool, arch):
+ if not arch:
+ return False
+ id = pool.str2id(arch, False)
+ if not id:
+ return False
+ return pool.isknownarch(id)
+
+def limitjobs(pool, jobs, flags, evr):
+ njobs = []
+ for j in jobs:
+ how = j.how
+ sel = how & Job.SOLVER_SELECTMASK
+ what = pool.rel2id(j.what, evr, flags)
+ if flags == solv.REL_ARCH:
+ how |= Job.SOLVER_SETARCH
+ if flags == solv.REL_EQ and sel == Job.SOLVER_SOLVABLE_NAME:
+ if pool.id2str(evr).find('-') >= 0:
+ how |= Job.SOLVER_SETEVR
+ else:
+ how |= Job.SOLVER_SETEV
+ njobs.append(pool.Job(how, what))
+ return njobs
+
+def limitjobs_arch(pool, jobs, flags, evr):
+ m = re.match(r'(.+)\.(.+?)$', evr)
+ if m and validarch(pool, m.group(2)):
+ jobs = limitjobs(pool, jobs, solv.REL_ARCH, pool.str2id(m.group(2)))
+ return limitjobs(pool, jobs, flags, pool.str2id(m.group(1)))
+ else:
+ return limitjobs(pool, jobs, flags, pool.str2id(evr))
+
+def mkjobs_filelist(pool, cmd, arg):
+ if re.search(r'[[*?]', arg):
+ type = Dataiterator.SEARCH_GLOB
+ else:
+ type = Dataiterator.SEARCH_STRING
+ if cmd == 'rm' or cmd == 'erase':
+ di = pool.installed.dataiterator_new(0, solv.SOLVABLE_FILELIST, arg, type | Dataiterator.SEARCH_FILES|Dataiterator.SEARCH_COMPLETE_FILELIST)
+ else:
+ di = pool.dataiterator_new(0, solv.SOLVABLE_FILELIST, arg, type | Dataiterator.SEARCH_FILES|Dataiterator.SEARCH_COMPLETE_FILELIST)
+ matches = []
+ for d in di:
+ s = d.solvable
+ if s and s.installable():
+ matches.append(s.id)
+ di.skip_solvable() # one match is enough
+ if matches:
+ print "[using file list match for '%s']" % arg
+ if len(matches) > 1:
+ return [ pool.Job(Job.SOLVER_SOLVABLE_ONE_OF, pool.towhatprovides(matches)) ]
+ else:
+ return [ pool.Job(Job.SOLVER_SOLVABLE | Job.SOLVER_NOAUTOSET, matches[0]) ]
+ return []
+
+def mkjobs_rel(pool, cmd, name, rel, evr):
+ flags = 0
+ if rel.find('<') >= 0: flags |= solv.REL_LT
+ if rel.find('=') >= 0: flags |= solv.REL_EQ
+ if rel.find('>') >= 0: flags |= solv.REL_GT
+ jobs = depglob(pool, name, True, True)
+ if jobs:
+ return limitjobs(pool, jobs, flags, pool.str2id(evr))
+ m = re.match(r'(.+)\.(.+?)$', name)
+ if m and validarch(pool, m.group(2)):
+ jobs = depglob(pool, m.group(1), True, True)
+ if jobs:
+ jobs = limitjobs(pool, jobs, solv.REL_ARCH, pool.str2id(m.group(2)))
+ return limitjobs(pool, jobs, flags, pool.str2id(evr))
+ return []
+
+def mkjobs_nevra(pool, cmd, arg):
+ jobs = depglob(pool, arg, True, True)
+ if jobs:
+ return jobs
+ m = re.match(r'(.+)\.(.+?)$', arg)
+ if m and validarch(pool, m.group(2)):
+ jobs = depglob(pool, m.group(1), True, True)
+ if jobs:
+ return limitjobs(pool, jobs, solv.REL_ARCH, pool.str2id(m.group(2)))
+ m = re.match(r'(.+)-(.+?)$', arg)
+ if m:
+ jobs = depglob(pool, m.group(1), True, False)
+ if jobs:
+ return limitjobs_arch(pool, jobs, solv.REL_EQ, m.group(2))
+ m = re.match(r'(.+)-(.+?-.+?)$', arg)
+ if m:
+ jobs = depglob(pool, m.group(1), True, False)
+ if jobs:
+ return limitjobs_arch(pool, jobs, solv.REL_EQ, m.group(2))
+ return []
+
+def mkjobs(pool, cmd, arg):
+ if len(arg) and arg[0] == '/':
+ jobs = mkjobs_filelist(pool, cmd, arg)
+ if jobs:
+ return jobs
+ m = re.match(r'(.+?)\s*([<=>]+)\s*(.+?)$', arg)
+ if m:
+ return mkjobs_rel(pool, cmd, m.group(1), m.group(2), m.group(3))
+ else:
+ return mkjobs_nevra(pool, cmd, arg)
+
+def depglob(pool, name, globname, globdep):
+ id = pool.str2id(name, False)
+ if id:
+ match = False
+ for s in pool.providers(id):
+ if globname and s.nameid == id:
+ return [ pool.Job(Job.SOLVER_SOLVABLE_NAME, id) ]
+ match = True
+ if match:
+ if globname and globdep:
+ print "[using capability match for '%s']" % name
+ return [ pool.Job(Job.SOLVER_SOLVABLE_PROVIDES, id) ]
+ if not re.search(r'[[*?]', name):
+ return []
+ if globname:
+ # try name glob
+ idmatches = {}
+ for s in pool.solvables:
+ if s.installable() and fnmatch.fnmatch(s.name, name):
+ idmatches[s.nameid] = True
+ if idmatches:
+ return [ pool.Job(Job.SOLVER_SOLVABLE_NAME, id) for id in sorted(idmatches.keys()) ]
+ if globdep:
+ # try dependency glob
+ idmatches = {}
+ for id in pool.allprovidingids():
+ if fnmatch.fnmatch(pool.id2str(id), name):
+ idmatches[id] = True
+ if idmatches:
+ print "[using capability match for '%s']" % name
+ return [ pool.Job(Job.SOLVER_SOLVABLE_PROVIDES, id) for id in sorted(idmatches.keys()) ]
+ return []
+
+
+def load_stub(repodata):
+ repo = repodata.repo.appdata
+ if repo:
+ return repo.load_ext(repodata)
+ return False
+
+
+parser = OptionParser(usage="usage: solv.py [options] COMMAND")
+(options, args) = parser.parse_args()
+if not args:
+ parser.print_help(sys.stderr)
+ sys.exit(1)
+
+cmd = args[0]
+args = args[1:]
+if cmd == 'li':
+ cmd = 'list'
+if cmd == 'in':
+ cmd = 'install'
+if cmd == 'rm':
+ cmd = 'erase'
+if cmd == 've':
+ cmd = 'verify'
+if cmd == 'se':
+ cmd = 'search'
+
+
+pool = solv.Pool()
+pool.setarch(os.uname()[4])
+pool.set_loadcallback(load_stub)
+
+# read all repo configs
+repos = []
+for reposdir in ["/etc/zypp/repos.d"]:
+ if not os.path.isdir(reposdir):
+ continue
+ for reponame in sorted(glob.glob('%s/*.repo' % reposdir)):
+ cfg = INIConfig(open(reponame))
+ for alias in cfg:
+ repoattr = {'alias': alias, 'enabled': 0, 'priority': 99, 'autorefresh': 1, 'type': 'rpm-md', 'metadata_expire': 900}
+ for k in cfg[alias]:
+ repoattr[k] = cfg[alias][k]
+ if 'mirrorlist' in repoattr and 'metalink' not in repoattr:
+ if repoattr['mirrorlist'].find('/metalink'):
+ repoattr['metalink'] = repoattr['mirrorlist']
+ del repoattr['mirrorlist']
+ if repoattr['type'] == 'rpm-md':
+ repo = repomd_repo(repoattr)
+ elif repoattr['type'] == 'yast2':
+ repo = susetags_repo(repoattr)
+ else:
+ repo = unknown_repo(repoattr)
+ repos.append(repo)
+
+# now load all enabled repos into the pool
+sysrepo = system_repo({ 'alias': '@System', 'type': 'system' })
+sysrepo.load(pool)
+for repo in repos:
+ if int(repo['enabled']):
+ repo.load(pool)
+
+if cmd == 'search':
+ matches = {}
+ di = pool.dataiterator_new(0, solv.SOLVABLE_NAME, args[0], Dataiterator.SEARCH_SUBSTRING|Dataiterator.SEARCH_NOCASE)
+ for d in di:
+ matches[d.solvid] = True
+ for solvid in sorted(matches.keys()):
+ print " - %s [%s]: %s" % (pool.solvid2str(solvid), pool.solvables[solvid].repo.name, pool.lookup_str(solvid, solv.SOLVABLE_SUMMARY))
+ sys.exit(0)
+
+cmdlinerepo = None
+if cmd == 'list' or cmd == 'info' or cmd == 'install':
+ for arg in args:
+ if arg.endswith(".rpm") and os.access(arg, os.R_OK):
+ if not cmdlinerepo:
+ cmdlinerepo = cmdline_repo({ 'alias': '@commandline', 'type': 'commandline' })
+ cmdlinerepo.load(pool)
+ cmdlinerepo['packages'] = {}
+ cmdlinerepo['packages'][arg] = cmdlinerepo['handle'].add_rpm(arg, Repo.REPO_REUSE_REPODATA|Repo.REPO_NO_INTERNALIZE)
+ if cmdlinerepo:
+ cmdlinerepo['handle'].internalize()
+
+addedprovides = pool.addfileprovides_ids()
+if addedprovides:
+ sysrepo.updateaddedprovides(addedprovides)
+ for repo in repos:
+ repo.updateaddedprovides(addedprovides)
+
+pool.createwhatprovides()
+
+# convert arguments into jobs
+jobs = []
+for arg in args:
+ if cmdlinerepo and arg in cmdlinerepo['packages']:
+ jobs.append(pool.Job(Job.SOLVER_SOLVABLE, cmdlinerepo['packages'][arg]))
+ else:
+ jobs += mkjobs(pool, cmd, arg)
+
+if cmd == 'list' or cmd == 'info':
+ if not jobs:
+ print "no package matched."
+ sys.exit(1)
+ for job in jobs:
+ for s in pool.jobsolvables(job):
+ if cmd == 'info':
+ print "Name: %s" % s.str()
+ print "Repo: %s" % s.repo.name
+ print "Summary: %s" % s.lookup_str(solv.SOLVABLE_SUMMARY)
+ str = s.lookup_str(solv.SOLVABLE_URL)
+ if str:
+ print "Url: %s" % str
+ str = s.lookup_str(solv.SOLVABLE_LICENSE)
+ if str:
+ print "License: %s" % str
+ print "Description:\n%s" % s.lookup_str(solv.SOLVABLE_DESCRIPTION)
+ print
+ else:
+ print " - %s [%s]" % (s.str(), s.repo.name)
+ print " %s" % s.lookup_str(solv.SOLVABLE_SUMMARY)
+ sys.exit(0)
+
+if cmd == 'install' or cmd == 'erase' or cmd == 'up' or cmd == 'dup' or cmd == 'verify':
+ if not jobs:
+ if cmd == 'up' or cmd == 'verify':
+ jobs = [ pool.Job(Job.SOLVER_SOLVABLE_ALL, 0) ]
+ elif cmd == 'dup':
+ pass
+ else:
+ print "no package matched."
+ sys.exit(1)
+ for job in jobs:
+ if cmd == 'up':
+ # up magic: use install instead of update if no installed package matches
+ if job.how == Job.SOLVER_SOLVABLE_ALL or filter(lambda s: s.isinstalled(), pool.jobsolvables(job)):
+ job.how |= Job.SOLVER_UPDATE
+ else:
+ job.how |= Job.SOLVER_INSTALL
+ elif cmd == 'install':
+ job.how |= Job.SOLVER_INSTALL
+ elif cmd == 'erase':
+ job.how |= Job.SOLVER_ERASE
+ elif cmd == 'dup':
+ job.how |= Job.SOLVER_DISTUPGRADE
+ elif cmd == 'verify':
+ job.how |= Job.SOLVER_VERIFY
+
+ #pool.set_debuglevel(2)
+ solver = None
+ while True:
+ solver = pool.create_solver()
+ solver.ignorealreadyrecommended = True
+ if cmd == 'erase':
+ solver.allowuninstall = True
+ if cmd == 'dup' and not jobs:
+ solver.distupgrade = True
+ solver.updatesystem = True
+ solver.allowdowngrade = True
+ solver.allowvendorchange = True
+ solver.dosplitprovides = True
+ if cmd == 'up' and len(jobs) == 1 and jobs[0].how == (Job.SOLVER_UPDATE | Job.SOLVER_SOLVABLE_ALL):
+ solver.dosplitprovides = True
+ problems = solver.solve(jobs)
+ if not problems:
+ break
+ for problem in problems:
+ print "Problem %d:" % problem.id
+ r = problem.findproblemrule()
+ type, source, target, dep = r.info()
+ if type == Solver.SOLVER_RULE_DISTUPGRADE:
+ print "%s does not belong to a distupgrade repository" % source.str()
+ elif type == Solver.SOLVER_RULE_INFARCH:
+ print "%s has inferiour architecture" % source.str()
+ elif type == Solver.SOLVER_RULE_UPDATE:
+ print "problem with installed package %s" % source.str()
+ elif type == Solver.SOLVER_RULE_JOB:
+ print "conflicting requests"
+ elif type == Solver.SOLVER_RULE_JOB_NOTHING_PROVIDES_DEP:
+ print "nothing provides requested %s" % pool.dep2str(dep)
+ elif type == Solver.SOLVER_RULE_RPM:
+ print "some dependency problem"
+ elif type == Solver.SOLVER_RULE_RPM_NOT_INSTALLABLE:
+ print "package %s is not installable" % source.str()
+ elif type == Solver.SOLVER_RULE_RPM_NOTHING_PROVIDES_DEP:
+ print "nothing provides %s needed by %s" % (pool.dep2str(dep), source.str())
+ elif type == Solver.SOLVER_RULE_RPM_SAME_NAME:
+ print "cannot install both %s and %s" % (source.str(), target.str())
+ elif type == Solver.SOLVER_RULE_RPM_PACKAGE_CONFLICT:
+ print "package %s conflicts with %s provided by %s" % (source.str(), pool.dep2str(dep), target.str())
+ elif type == Solver.SOLVER_RULE_RPM_PACKAGE_OBSOLETES:
+ print "package %s obsoletes %s provided by %s" % (source.str(), pool.dep2str(dep), target.str())
+ elif type == Solver.SOLVER_RULE_RPM_INSTALLEDPKG_OBSOLETES:
+ print "installed package %s obsoletes %s provided by %s" % (source.str(), pool.dep2str(dep), target.str())
+ elif type == Solver.SOLVER_RULE_RPM_IMPLICIT_OBSOLETES:
+ print "package %s implicitely obsoletes %s provided by %s" % (source.str(), pool.dep2str(dep), target.str())
+ elif type == Solver.SOLVER_RULE_RPM_PACKAGE_REQUIRES:
+ print "package %s requires %s, but none of the providers can be installed" % (source.str(), pool.dep2str(dep))
+ elif type == Solver.SOLVER_RULE_RPM_SELF_CONFLICT:
+ print "package %s conflicts with %s provided by itself" % (source.str(), pool.dep2str(dep))
+ else:
+ print "bad rule type", type
+ solutions = problem.solutions()
+ for solution in solutions:
+ print " Solution %d:" % solution.id
+ elements = solution.elements()
+ for element in elements:
+ etype = element.type
+ if etype == Solver.SOLVER_SOLUTION_JOB:
+ print " - do not ask to", jobs[element.jobidx].str()
+ elif etype == Solver.SOLVER_SOLUTION_INFARCH:
+ if element.solvable.isinstalled():
+ print " - keep %s despite the inferior architecture" % element.solvable.str()
+ else:
+ print " - install %s despite the inferior architecture" % element.solvable.str()
+ elif etype == Solver.SOLVER_SOLUTION_DISTUPGRADE:
+ if element.solvable.isinstalled():
+ print " - keep obsolete %s" % element.solvable.str()
+ else:
+ print " - install %s from excluded repository" % element.solvable.str()
+ elif etype == Solver.SOLVER_SOLUTION_REPLACE:
+ illegal = element.illegalreplace()
+ if illegal & solver.POLICY_ILLEGAL_DOWNGRADE:
+ print " - allow downgrade of %s to %s" % (element.solvable.str(), element.replacement.str())
+ if illegal & solver.POLICY_ILLEGAL_ARCHCHANGE:
+ print " - allow architecture change of %s to %s" % (element.solvable.str(), element.replacement.str())
+ if illegal & solver.POLICY_ILLEGAL_VENDORCHANGE:
+ if element.replacement.vendorid:
+ print " - allow vendor change from '%s' (%s) to '%s' (%s)" % (element.solvable.vendor, element.solvable.str(), element.replacement.vendor, element.replacement.str())
+ else:
+ print " - allow vendor change from '%s' (%s) to no vendor (%s)" % (element.solvable.vendor, element.solvable.str(), element.replacement.str())
+ if illegal == 0:
+ print " - allow replacement of %s with %s" % (element.solvable.str(), element.replacement.str())
+ elif etype == Solver.SOLVER_SOLUTION_DEINSTALL:
+ print " - allow deinstallation of %s" % element.solvable.str()
+ sol = ''
+ while not (sol == 's' or sol == 'q' or (sol.isdigit() and int(sol) >= 1 and int(sol) <= len(solutions))):
+ sys.stdout.write("Please choose a solution: ")
+ sys.stdout.flush()
+ sol = sys.stdin.readline().strip()
+ if sol == 's':
+ continue # skip problem
+ if sol == 'q':
+ sys.exit(1)
+ solution = solutions[int(sol) - 1]
+ for element in solution.elements():
+ etype = element.type
+ newjob = None
+ if etype == Solver.SOLVER_SOLUTION_JOB:
+ jobs[element.jobidx] = pool.Job(Job.SOLVER_NOOP, 0)
+ elif etype == Solver.SOLVER_SOLUTION_INFARCH or etype == Solver.SOLVER_SOLUTION_DISTUPGRADE:
+ newjob = pool.Job(Job.SOLVER_INSTALL|Job.SOLVER_SOLVABLE, element.solvable.id)
+ elif etype == Solver.SOLVER_SOLUTION_REPLACE:
+ newjob = pool.Job(Job.SOLVER_INSTALL|Job.SOLVER_SOLVABLE, element.replacement.id)
+ elif etype == Solver.SOLVER_SOLUTION_DEINSTALL:
+ newjob = pool.Job(Job.SOLVER_ERASE|Job.SOLVER_SOLVABLE, element.solvable.id)
+ if newjob:
+ for job in jobs:
+ if job.how == newjob.how and job.what == newjob.what:
+ newjob = None
+ break
+ if newjob:
+ jobs.append(newjob)
+ # no problems, show transaction
+ trans = solver.transaction()
+ del solver
+ if trans.isempty():
+ print "Nothing to do."
+ sys.exit(0)
+ print
+ print "Transaction summary:"
+ print
+ for ctype, pkgs, fromid, toid in trans.classify():
+ if ctype == Transaction.SOLVER_TRANSACTION_ERASE:
+ print "%d erased packages:" % len(pkgs)
+ elif ctype == Transaction.SOLVER_TRANSACTION_INSTALL:
+ print "%d installed packages:" % len(pkgs)
+ elif ctype == Transaction.SOLVER_TRANSACTION_REINSTALLED:
+ print "%d reinstalled packages:" % len(pkgs)
+ elif ctype == Transaction.SOLVER_TRANSACTION_DOWNGRADED:
+ print "%d downgraded packages:" % len(pkgs)
+ elif ctype == Transaction.SOLVER_TRANSACTION_CHANGED:
+ print "%d changed packages:" % len(pkgs)
+ elif ctype == Transaction.SOLVER_TRANSACTION_UPGRADED:
+ print "%d upgraded packages:" % len(pkgs)
+ elif ctype == Transaction.SOLVER_TRANSACTION_VENDORCHANGE:
+ print "%d vendor changes from '%s' to '%s':" % (len(pkgs), pool.id2str(fromid), pool.id2str(toid))
+ elif ctype == Transaction.SOLVER_TRANSACTION_ARCHCHANGE:
+ print "%d arch changes from '%s' to '%s':" % (len(pkgs), pool.id2str(fromid), pool.id2str(toid))
+ else:
+ continue
+ for p in pkgs:
+ if ctype == Transaction.SOLVER_TRANSACTION_UPGRADED or ctype == Transaction.SOLVER_TRANSACTION_DOWNGRADED:
+ op = trans.othersolvable(p)
+ print " - %s -> %s" % (p.str(), op.str())
+ else:
+ print " - %s" % p.str()
+ print
+ print "install size change: %d K" % trans.calc_installsizechange()
+ print
+
+# vim: sw=4 et
+ while True:
+ sys.stdout.write("OK to continue (y/n)? ")
+ sys.stdout.flush()
+ yn = sys.stdin.readline().strip()
+ if yn == 'y': break
+ if yn == 'n': sys.exit(1)
+ newpkgs, keptpkgs = trans.installedresult()
+ newpkgsfp = {}
+ if newpkgs:
+ downloadsize = 0
+ for p in newpkgs:
+ downloadsize += p.lookup_num(solv.SOLVABLE_DOWNLOADSIZE)
+ print "Downloading %d packages, %d K" % (len(newpkgs), downloadsize)
+ for p in newpkgs:
+ repo = p.repo.appdata
+ location, medianr = p.lookup_location()
+ if not location:
+ continue
+ if repo['type'] == 'commandline':
+ f = solv.xfopen(location)
+ if not f:
+ sys.exit("\n%s: %s not found" % location)
+ newpkgsfp[p.id] = f
+ continue
+ if sysrepo['handle'].nsolvables and os.access('/usr/bin/applydeltarpm', os.X_OK):
+ pname = p.name
+ di = p.repo.dataiterator_new(solv.SOLVID_META, solv.DELTA_PACKAGE_NAME, pname, Dataiterator.SEARCH_STRING)
+ di.prepend_keyname(solv.REPOSITORY_DELTAINFO)
+ for d in di:
+ d.setpos_parent()
+ if pool.lookup_id(solv.SOLVID_POS, solv.DELTA_PACKAGE_EVR) != p.evrid or pool.lookup_id(solv.SOLVID_POS, solv.DELTA_PACKAGE_ARCH) != p.archid:
+ continue
+ baseevrid = pool.lookup_id(solv.SOLVID_POS, solv.DELTA_BASE_EVR)
+ candidate = None
+ for installedp in pool.providers(p.nameid):
+ if installedp.isinstalled() and installedp.nameid == p.nameid and installedp.archid == p.archid and installedp.evrid == baseevrid:
+ candidate = installedp
+ if not candidate:
+ continue
+ seq = pool.lookup_str(solv.SOLVID_POS, solv.DELTA_SEQ_NAME) + '-' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_SEQ_EVR) + '-' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_SEQ_NUM)
+ st = subprocess.call(['/usr/bin/applydeltarpm', '-a', p.arch, '-c', '-s', seq])
+ if st:
+ continue
+ chksum = pool.lookup_checksum(solv.SOLVID_POS, solv.DELTA_CHECKSUM)
+ if not chksum:
+ continue
+ dloc = pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_DIR) + '/' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_NAME) + '-' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_EVR) + '.' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_SUFFIX)
+ f = repo.download(dloc, False, chksum)
+ if not f:
+ continue
+ nf = tempfile.TemporaryFile()
+ nf = os.dup(nf.fileno())
+ st = subprocess.call(['/usr/bin/applydeltarpm', '-a', p.arch, "/dev/fd/%d" % solv.xfileno(f), "/dev/fd/%d" % nf])
+ solv.xfclose(f)
+ os.lseek(nf, 0, os.SEEK_SET)
+ newpkgsfp[p.id] = solv.xfopen_fd("", nf)
+ break
+ if p.id in newpkgsfp:
+ sys.stdout.write("d")
+ sys.stdout.flush()
+ continue
+
+ if repo['type'] == 'yast2':
+ datadir = repo['handle'].lookup_str(solv.SOLVID_META, solv.SUSETAGS_DATADIR)
+ if not datadir:
+ datadir = 'suse'
+ location = datadir + '/' + location
+ chksum = p.lookup_checksum(solv.SOLVABLE_CHECKSUM)
+ f = repo.download(location, False, chksum)
+ if not f:
+ sys.exit("\n%s: %s not found in repository" % (repo['alias'], location))
+ newpkgsfp[p.id] = f
+ sys.stdout.write(".")
+ sys.stdout.flush()
+ print
+ print "Committing transaction:"
+ print
+ ts = rpm.TransactionSet('/')
+ ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
+ erasenamehelper = {}
+ for p in trans.steps():
+ type = trans.steptype(p, Transaction.SOLVER_TRANSACTION_RPM_ONLY)
+ if type == Transaction.SOLVER_TRANSACTION_ERASE:
+ rpmdbid = p.lookup_num(solv.RPM_RPMDBID)
+ erasenamehelper[p.name] = p
+ if not rpmdbid:
+ sys.exit("\ninternal error: installed package %s has no rpmdbid\n" % p.str())
+ ts.addErase(rpmdbid)
+ elif type == Transaction.SOLVER_TRANSACTION_INSTALL:
+ f = newpkgsfp[p.id]
+ h = ts.hdrFromFdno(solv.xfileno(f))
+ os.lseek(solv.xfileno(f), 0, os.SEEK_SET)
+ ts.addInstall(h, p, 'u')
+ elif type == Transaction.SOLVER_TRANSACTION_MULTIINSTALL:
+ f = newpkgsfp[p.id]
+ h = ts.hdrFromFdno(solv.xfileno(f))
+ os.lseek(solv.xfileno(f), 0, os.SEEK_SET)
+ ts.addInstall(h, p, 'i')
+ checkproblems = ts.check()
+ if checkproblems:
+ print checkproblems
+ sys.exit("Sorry.")
+ ts.order()
+ def runCallback(reason, amount, total, p, d):
+ if reason == rpm.RPMCALLBACK_INST_OPEN_FILE:
+ return solv.xfileno(newpkgsfp[p.id])
+ if reason == rpm.RPMCALLBACK_INST_START:
+ print "install", p.str()
+ if reason == rpm.RPMCALLBACK_UNINST_START:
+ # argh, p is just the name of the package
+ if p in erasenamehelper:
+ p = erasenamehelper[p]
+ print "erase", p.str()
+ runproblems = ts.run(runCallback, '')
+ if runproblems:
+ print runproblems
+ sys.exit(1)
+ sys.exit(0)
+
+print "unknown command", cmd
+sys.exit(1)
diff --git a/examples/pysolv.py b/examples/pysolv.py
deleted file mode 100644
index c83310b..0000000
--- a/examples/pysolv.py
+++ /dev/null
@@ -1,976 +0,0 @@
-#!/usr/bin/python
-
-#
-# Copyright (c) 2011, Novell Inc.
-#
-# This program is licensed under the BSD license, read LICENSE.BSD
-# for further information
-#
-
-# pysolv a little software installer demoing the sat solver library/bindings
-
-# things it does:
-# - understands globs for package names / dependencies
-# - understands .arch suffix
-# - repository data caching
-# - on demand loading of secondary repository data
-# - checksum verification
-# - deltarpm support
-# - installation of commandline packages
-#
-# things not yet ported:
-# - gpg verification
-# - file conflicts
-# - fastestmirror implementation
-#
-# things available in the library but missing from pysolv:
-# - vendor policy loading
-# - soft locks file handling
-# - multi version handling
-
-import sys
-import os
-import glob
-import solv
-import re
-import tempfile
-import time
-import subprocess
-import fnmatch
-import rpm
-from stat import *
-from solv import Pool, Repo, Dataiterator, Job, Solver, Transaction
-from iniparse import INIConfig
-from optparse import OptionParser
-
-#import gc
-#gc.set_debug(gc.DEBUG_LEAK)
-
-def calc_cookie_stat(stat):
- chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256)
- chksum.add("1.1")
- chksum.add(str(stat[ST_DEV]))
- chksum.add(str(stat[ST_INO]))
- chksum.add(str(stat[ST_SIZE]))
- chksum.add(str(stat[ST_MTIME]))
- return chksum.raw()
-
-def calc_cookie_fp(fp):
- chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256)
- chksum.add_fp(fp)
- return chksum.raw()
-
-def calccachepath(repo, repoext = None):
- path = re.sub(r'^\.', '_', repo['alias'])
- if repoext:
- path += "_" + repoext + ".solvx"
- else:
- path += ".solv"
- return "/var/cache/solv/" + re.sub(r'[/]', '_', path)
-
-def usecachedrepo(repo, repoext, mark=False):
- if not repoext:
- cookie = repo['cookie']
- else:
- cookie = repo['extcookie']
- handle = repo['handle']
- try:
- repopath = calccachepath(repo, repoext)
- f = open(repopath, 'r')
- f.seek(-32, os.SEEK_END)
- fcookie = f.read(32)
- if len(fcookie) != 32:
- return False
- if cookie and fcookie != cookie:
- return False
- if repo['alias'] != '@System' and not repoext:
- f.seek(-32 * 2, os.SEEK_END)
- fextcookie = f.read(32)
- if len(fextcookie) != 32:
- return False
- f.seek(0)
- flags = 0
- if repoext:
- flags = Repo.REPO_USE_LOADING|Repo.REPO_EXTEND_SOLVABLES
- if repoext != 'DL':
- flags |= Repo.REPO_LOCALPOOL
- if not repo['handle'].add_solv(f, flags):
- return False
- if repo['alias'] != '@System' and not repoext:
- repo['cookie'] = fcookie
- repo['extcookie'] = fextcookie
- if mark:
- # no futimes in python?
- try:
- os.utime(repopath, None)
- except Exception, e:
- pass
- except IOError, e:
- return False
- return True
-
-def writecachedrepo(repo, repoext, info=None):
- try:
- if not os.path.isdir("/var/cache/solv"):
- os.mkdir("/var/cache/solv", 0755)
- (fd, tmpname) = tempfile.mkstemp(prefix='.newsolv-', dir='/var/cache/solv')
- os.fchmod(fd, 0444)
- f = os.fdopen(fd, 'w+')
- if not info:
- repo['handle'].write(f)
- elif repoext:
- info.write(f)
- else:
- # rewrite_repos case
- repo['handle'].write_first_repodata(f)
- if repo['alias'] != '@System' and not repoext:
- if 'extcookie' not in repo:
- # create unique id
- extcookie = calc_cookie_stat(os.fstat(f.fileno()))
- extcookie = ''.join(chr(ord(s)^ord(c)) for s,c in zip(extcookie, repo['cookie']))
- if ord(extcookie[0]) == 0:
- extcookie[0] = chr(1)
- repo['extcookie'] = extcookie
- f.write(repo['extcookie'])
- if not repoext:
- f.write(repo['cookie'])
- else:
- f.write(repo['extcookie'])
- f.close()
- if repo['handle'].iscontiguous():
- # switch to saved repo to activate paging and save memory
- nf = solv.xfopen(tmpname)
- if not repoext:
- # main repo
- repo['handle'].empty()
- if not repo['handle'].add_solv(nf, Repo.SOLV_ADD_NO_STUBS):
- sys.exit("internal error, cannot reload solv file")
- else:
- # extension repodata
- # need to extend to repo boundaries, as this is how
- # info.write() has written the data
- info.extend_to_repo()
- # LOCALPOOL does not help as pool already contains all ids
- info.read_solv_flags(nf, Repo.REPO_EXTEND_SOLVABLES)
- solv.xfclose(nf)
- os.rename(tmpname, calccachepath(repo, repoext))
- except IOError, e:
- if tmpname:
- os.unlink(tmpname)
-
-def curlfopen(repo, file, uncompress, chksum, badchecksum=None):
- baseurl = repo['baseurl']
- url = re.sub(r'/$', '', baseurl) + '/' + file
- f = tempfile.TemporaryFile()
- st = subprocess.call(['curl', '-f', '-s', '-L', url], stdout=f.fileno())
- if os.lseek(f.fileno(), 0, os.SEEK_CUR) == 0 and (st == 0 or not chksum):
- return None
- os.lseek(f.fileno(), 0, os.SEEK_SET)
- if st:
- print "%s: download error %d" % (file, st)
- if badchecksum:
- badchecksum['True'] = 'True'
- return None
- if chksum:
- fchksum = solv.Chksum(chksum.type)
- if not fchksum:
- print "%s: unknown checksum type" % file
- if badchecksum:
- badchecksum['True'] = 'True'
- return None
- fchksum.add_fd(f.fileno())
- if not fchksum.matches(chksum):
- print "%s: checksum mismatch" % file
- if badchecksum:
- badchecksum['True'] = 'True'
- return None
- if uncompress:
- return solv.xfopen_fd(file, os.dup(f.fileno()))
- return solv.xfopen_fd("", os.dup(f.fileno()))
-
-def repomd_find(repo, what):
- di = repo['handle'].dataiterator_new(solv.SOLVID_META, solv.REPOSITORY_REPOMD_TYPE, what, Dataiterator.SEARCH_STRING)
- di.prepend_keyname(solv.REPOSITORY_REPOMD)
- for d in di:
- d.setpos_parent()
- filename = d.pool.lookup_str(solv.SOLVID_POS, solv.REPOSITORY_REPOMD_LOCATION)
- chksum = d.pool.lookup_checksum(solv.SOLVID_POS, solv.REPOSITORY_REPOMD_CHECKSUM)
- if filename and not chksum:
- print "no %s file checksum!" % filename
- filename = None
- chksum = None
- if filename:
- return (filename, chksum)
- return (None, None)
-
-def repomd_add_ext(repo, repodata, what, ext):
- filename, chksum = repomd_find(repo, what)
- if not filename:
- return False
- if what == 'prestodelta':
- what = 'deltainfo'
- handle = repodata.new_handle()
- repodata.set_poolstr(handle, solv.REPOSITORY_REPOMD_TYPE, what)
- repodata.set_str(handle, solv.REPOSITORY_REPOMD_LOCATION, filename)
- repodata.set_bin_checksum(handle, solv.REPOSITORY_REPOMD_CHECKSUM, chksum)
- if what == 'deltainfo':
- repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOSITORY_DELTAINFO)
- repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_FLEXARRAY)
- elif what == 'filelists':
- repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_FILELIST)
- repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRSTRARRAY)
- repodata.add_flexarray(solv.SOLVID_META, solv.REPOSITORY_EXTERNAL, handle)
- return True
-
-def repomd_add_exts(repo):
- repodata = repo['handle'].add_repodata(0)
- if not repomd_add_ext(repo, repodata, 'deltainfo', 'DL'):
- repomd_add_ext(repo, repodata, 'prestodelta', 'DL')
- repomd_add_ext(repo, repodata, 'filelists', 'FL')
- repodata.internalize()
-
-def repomd_load_ext(repo, repodata):
- repomdtype = repodata.lookup_str(solv.SOLVID_META, solv.REPOSITORY_REPOMD_TYPE)
- if repomdtype == 'filelists':
- ext = 'FL'
- elif repomdtype == 'deltainfo':
- ext = 'DL'
- else:
- return False
- sys.stdout.write("[%s:%s" % (repo['alias'], ext))
- if usecachedrepo(repo, ext):
- sys.stdout.write(" cached]\n")
- sys.stdout.flush()
- return True
- sys.stdout.write(" fetching]\n")
- sys.stdout.flush()
- filename = repodata.lookup_str(solv.SOLVID_META, solv.REPOSITORY_REPOMD_LOCATION)
- filechksum = repodata.lookup_checksum(solv.SOLVID_META, solv.REPOSITORY_REPOMD_CHECKSUM)
- f = curlfopen(repo, filename, True, filechksum)
- if not f:
- return False
- if ext == 'FL':
- repo['handle'].add_rpmmd(f, 'FL', Repo.REPO_USE_LOADING|Repo.REPO_EXTEND_SOLVABLES)
- elif ext == 'DL':
- repo['handle'].add_deltainfoxml(f, Repo.REPO_USE_LOADING)
- solv.xfclose(f)
- writecachedrepo(repo, ext, repodata)
- return True
-
-def susetags_find(repo, what):
- di = repo['handle'].dataiterator_new(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME, what, Dataiterator.SEARCH_STRING)
- di.prepend_keyname(solv.SUSETAGS_FILE)
- for d in di:
- d.setpos_parent()
- chksum = d.pool.lookup_checksum(solv.SOLVID_POS, solv.SUSETAGS_FILE_CHECKSUM)
- return (what, chksum)
- return (None, None)
-
-def susetags_add_ext(repo, repodata, what, ext):
- (filename, chksum) = susetags_find(repo, what)
- if not filename:
- return False
- handle = repodata.new_handle()
- repodata.set_str(handle, solv.SUSETAGS_FILE_NAME, filename)
- if chksum:
- repodata.set_bin_checksum(handle, solv.SUSETAGS_FILE_CHECKSUM, chksum)
- if ext == 'DU':
- repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_DISKUSAGE)
- repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRNUMNUMARRAY)
- elif ext == 'FL':
- repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_FILELIST)
- repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRSTRARRAY)
- else:
- for langtag, langtagtype in [
- (solv.SOLVABLE_SUMMARY, solv.REPOKEY_TYPE_STR),
- (solv.SOLVABLE_DESCRIPTION, solv.REPOKEY_TYPE_STR),
- (solv.SOLVABLE_EULA, solv.REPOKEY_TYPE_STR),
- (solv.SOLVABLE_MESSAGEINS, solv.REPOKEY_TYPE_STR),
- (solv.SOLVABLE_MESSAGEDEL, solv.REPOKEY_TYPE_STR),
- (solv.SOLVABLE_CATEGORY, solv.REPOKEY_TYPE_ID)
- ]:
- repodata.add_idarray(handle, solv.REPOSITORY_KEYS, repo['handle'].pool.id2langid(langtag, ext, 1))
- repodata.add_idarray(handle, solv.REPOSITORY_KEYS, langtagtype)
- repodata.add_flexarray(solv.SOLVID_META, solv.REPOSITORY_EXTERNAL, handle)
- return True
-
-def susetags_add_exts(repo):
- repodata = repo['handle'].add_repodata(0)
- di = repo['handle'].dataiterator_new(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME, None, 0)
- di.prepend_keyname(solv.SUSETAGS_FILE)
- for d in di:
- filename = d.match_str()
- if not filename:
- continue
- if filename[0:9] != "packages.":
- continue
- if len(filename) == 11 and filename != "packages.gz":
- ext = filename[9:11]
- elif filename[11:12] == ".":
- ext = filename[9:11]
- else:
- continue
- if ext == "en":
- continue
- susetags_add_ext(repo, repodata, filename, ext)
- repodata.internalize()
-
-def susetags_load_ext(repo, repodata):
- filename = repodata.lookup_str(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME)
- ext = filename[9:11]
- sys.stdout.write("[%s:%s" % (repo['alias'], ext))
- if usecachedrepo(repo, ext):
- sys.stdout.write(" cached]\n")
- sys.stdout.flush()
- return True
- sys.stdout.write(" fetching]\n")
- sys.stdout.flush()
- defvendorid = repo['handle'].lookup_id(solv.SOLVID_META, solv.SUSETAGS_DEFAULTVENDOR)
- descrdir = repo['handle'].lookup_str(solv.SOLVID_META, solv.SUSETAGS_DESCRDIR)
- if not descrdir:
- descrdir = "suse/setup/descr"
- filechksum = repodata.lookup_checksum(solv.SOLVID_META, solv.SUSETAGS_FILE_CHECKSUM)
- f = curlfopen(repo, descrdir + '/' + filename, True, filechksum)
- if not f:
- return False
- repo['handle'].add_susetags(f, defvendorid, None, Repo.REPO_USE_LOADING|Repo.REPO_EXTEND_SOLVABLES)
- solv.xfclose(f)
- writecachedrepo(repo, ext, repodata)
- return True
-
-def validarch(pool, arch):
- if not arch:
- return False
- id = pool.str2id(arch, False)
- if not id:
- return False
- return pool.isknownarch(id)
-
-def limitjobs(pool, jobs, flags, evr):
- njobs = []
- for j in jobs:
- how = j.how
- sel = how & Job.SOLVER_SELECTMASK
- what = pool.rel2id(j.what, evr, flags)
- if flags == solv.REL_ARCH:
- how |= Job.SOLVER_SETARCH
- if flags == solv.REL_EQ and sel == Job.SOLVER_SOLVABLE_NAME:
- if pool.id2str(evr).find('-') >= 0:
- how |= Job.SOLVER_SETEVR
- else:
- how |= Job.SOLVER_SETEV
- njobs.append(pool.Job(how, what))
- return njobs
-
-def limitjobs_arch(pool, jobs, flags, evr):
- m = re.match(r'(.+)\.(.+?)$', evr)
- if m and validarch(pool, m.group(2)):
- jobs = limitjobs(pool, jobs, solv.REL_ARCH, pool.str2id(m.group(2)))
- return limitjobs(pool, jobs, flags, pool.str2id(m.group(1)))
- else:
- return limitjobs(pool, jobs, flags, pool.str2id(evr))
-
-def mkjobs(pool, cmd, arg):
- if len(arg) and arg[0] == '/':
- if re.search(r'[[*?]', arg):
- type = Dataiterator.SEARCH_GLOB
- else:
- type = Dataiterator.SEARCH_STRING
- if cmd == 'rm' or cmd == 'erase':
- di = pool.installed.dataiterator_new(0, solv.SOLVABLE_FILELIST, arg, type | Dataiterator.SEARCH_FILES|Dataiterator.SEARCH_COMPLETE_FILELIST)
- else:
- di = pool.dataiterator_new(0, solv.SOLVABLE_FILELIST, arg, type | Dataiterator.SEARCH_FILES|Dataiterator.SEARCH_COMPLETE_FILELIST)
- matches = []
- for d in di:
- s = d.solvable
- if s and s.installable():
- matches.append(s.id)
- di.skip_solvable() # one match is enough
- if len(matches):
- print "[using file list match for '%s']" % arg
- if len(matches) > 1:
- return [ pool.Job(Job.SOLVER_SOLVABLE_ONE_OF, pool.towhatprovides(matches)) ]
- else:
- return [ pool.Job(Job.SOLVER_SOLVABLE | Job.SOLVER_NOAUTOSET, matches[0]) ]
- m = re.match(r'(.+?)\s*([<=>]+)\s*(.+?)$', arg)
- if m:
- (name, rel, evr) = m.group(1, 2, 3)
- flags = 0
- if rel.find('<') >= 0: flags |= solv.REL_LT
- if rel.find('=') >= 0: flags |= solv.REL_EQ
- if rel.find('>') >= 0: flags |= solv.REL_GT
- jobs = depglob(pool, name, True, True)
- if len(jobs):
- return limitjobs(pool, jobs, flags, pool.str2id(evr))
- m = re.match(r'(.+)\.(.+?)$', name)
- if m and validarch(pool, m.group(2)):
- jobs = depglob(pool, m.group(1), True, True)
- if len(jobs):
- jobs = limitjobs(pool, jobs, solv.REL_ARCH, pool.str2id(m.group(2)))
- return limitjobs(pool, jobs, flags, pool.str2id(evr))
- else:
- jobs = depglob(pool, arg, True, True)
- if len(jobs):
- return jobs
- m = re.match(r'(.+)\.(.+?)$', arg)
- if m and validarch(pool, m.group(2)):
- jobs = depglob(pool, m.group(1), True, True)
- if len(jobs):
- return limitjobs(pool, jobs, solv.REL_ARCH, pool.str2id(m.group(2)))
- m = re.match(r'(.+)-(.+?)$', arg)
- if m:
- jobs = depglob(pool, m.group(1), True, False)
- if len(jobs):
- return limitjobs_arch(pool, jobs, solv.REL_EQ, m.group(2))
- m = re.match(r'(.+)-(.+?-.+?)$', arg)
- if m:
- jobs = depglob(pool, m.group(1), True, False)
- if len(jobs):
- return limitjobs_arch(pool, jobs, solv.REL_EQ, m.group(2))
- return []
-
-
-def depglob(pool, name, globname, globdep):
- id = pool.str2id(name, False)
- if id:
- match = False
- for s in pool.providers(id):
- if globname and s.nameid == id:
- return [ pool.Job(Job.SOLVER_SOLVABLE_NAME, id) ]
- match = True
- if match:
- if globname and globdep:
- print "[using capability match for '%s']" % name
- return [ pool.Job(Job.SOLVER_SOLVABLE_PROVIDES, id) ]
- if not re.search(r'[[*?]', name):
- return []
- if globname:
- # try name glob
- idmatches = {}
- for s in pool.solvables:
- if s.installable() and fnmatch.fnmatch(s.name, name):
- idmatches[s.nameid] = True
- if len(idmatches):
- return [ pool.Job(Job.SOLVER_SOLVABLE_NAME, id) for id in sorted(idmatches.keys()) ]
- if globdep:
- # try dependency glob
- idmatches = {}
- for id in pool.allprovidingids():
- if fnmatch.fnmatch(pool.id2str(id), name):
- idmatches[id] = True
- if len(idmatches):
- print "[using capability match for '%s']" % name
- return [ pool.Job(Job.SOLVER_SOLVABLE_PROVIDES, id) for id in sorted(idmatches.keys()) ]
- return []
-
-
-def load_stub(repodata):
- if repodata.lookup_str(solv.SOLVID_META, solv.REPOSITORY_REPOMD_TYPE):
- return repomd_load_ext(repodata.repo.appdata, repodata)
- if repodata.lookup_str(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME):
- return susetags_load_ext(repodata.repo.appdata, repodata)
- return False
-
-def rewrite_repos(pool, addedprovides):
- addedprovidesset = set(addedprovides)
- for repohandle in pool.repos:
- repo = repohandle.appdata
- if 'cookie' not in repo: # ignore commandlinerepo
- continue
- if not repohandle.nsolvables:
- continue
- # make sure there's just one real repodata with extensions
- repodata = repohandle.first_repodata()
- if not repodata:
- continue
- oldaddedprovides = repodata.lookup_idarray(solv.SOLVID_META, solv.REPOSITORY_ADDEDFILEPROVIDES)
- oldaddedprovidesset = set(oldaddedprovides)
- if not addedprovidesset <= oldaddedprovidesset:
- for id in addedprovides:
- repodata.add_idarray(solv.SOLVID_META, solv.REPOSITORY_ADDEDFILEPROVIDES, id)
- repodata.internalize()
- writecachedrepo(repo, None, repodata)
-
-
-parser = OptionParser(usage="usage: solv.py [options] COMMAND")
-(options, args) = parser.parse_args()
-if not args:
- parser.print_help(sys.stderr)
- sys.exit(1)
-
-cmd = args[0]
-args = args[1:]
-
-pool = solv.Pool()
-pool.setarch(os.uname()[4])
-pool.set_loadcallback(load_stub)
-
-repos = []
-for reposdir in ["/etc/zypp/repos.d"]:
- if not os.path.isdir(reposdir):
- continue
- for reponame in sorted(glob.glob('%s/*.repo' % reposdir)):
- cfg = INIConfig(open(reponame))
- for alias in cfg:
- repo = cfg[alias]
- repo['alias'] = alias
- if 'baseurl' not in repo:
- print "repo %s has no baseurl" % alias
- continue
- if 'priority' not in repo:
- repo['priority'] = 99
- if 'autorefresh' not in repo:
- repo['autorefresh'] = 1
- if 'type' not in repo:
- repo['type'] = 'rpm-md'
- repo['metadata_expire'] = 900
- repos.append(repo)
-
-print "rpm database:",
-sysrepo = { 'alias': '@System' }
-sysrepo['handle'] = pool.add_repo(sysrepo['alias'])
-sysrepo['handle'].appdata = sysrepo
-pool.installed = sysrepo['handle']
-sysrepostat = os.stat("/var/lib/rpm/Packages")
-sysrepocookie = calc_cookie_stat(sysrepostat)
-sysrepo['cookie'] = sysrepocookie
-if usecachedrepo(sysrepo, None):
- print "cached"
-else:
- print "reading"
- sysrepo['handle'].add_products("/etc/products.d", Repo.REPO_NO_INTERNALIZE)
- sysrepo['handle'].add_rpmdb(None)
- writecachedrepo(sysrepo, None)
-
-for repo in repos:
- if not int(repo.enabled):
- continue
- repo['handle'] = pool.add_repo(repo['alias'])
- repo['handle'].appdata = repo
- repo['handle'].priority = 99 - repo['priority']
- if repo['autorefresh']:
- dorefresh = True
- if dorefresh:
- try:
- st = os.stat(calccachepath(repo))
- if time.time() - st[ST_MTIME] < repo['metadata_expire']:
- dorefresh = False
- except OSError, e:
- pass
- repo['cookie'] = None
- if not dorefresh and usecachedrepo(repo, None):
- print "repo: '%s': cached" % repo['alias']
- continue
-
- badchecksum = {}
-
- if repo['type'] == 'rpm-md':
- print "rpmmd repo '%s':" % repo['alias'],
- sys.stdout.flush()
- f = curlfopen(repo, "repodata/repomd.xml", False, None, None)
- if not f:
- print "no repomd.xml file, skipped"
- repo['handle'].free(True)
- del repo['handle']
- continue
- repo['cookie'] = calc_cookie_fp(f)
- if usecachedrepo(repo, None, True):
- print "cached"
- solv.xfclose(f)
- continue
- repo['handle'].add_repomdxml(f, 0)
- solv.xfclose(f)
- print "fetching"
- (filename, filechksum) = repomd_find(repo, 'primary')
- if filename:
- f = curlfopen(repo, filename, True, filechksum, badchecksum)
- if f:
- repo['handle'].add_rpmmd(f, None, 0)
- solv.xfclose(f)
- if badchecksum:
- continue # hopeless, need good primary
- (filename, filechksum) = repomd_find(repo, 'updateinfo')
- if filename:
- f = curlfopen(repo, filename, True, filechksum, badchecksum)
- if f:
- repo['handle'].add_updateinfoxml(f, 0)
- solv.xfclose(f)
- repomd_add_exts(repo)
- elif repo['type'] == 'yast2':
- print "susetags repo '%s':" % repo['alias'],
- sys.stdout.flush()
- f = curlfopen(repo, "content", False, None, None)
- if not f:
- print "no content file, skipped"
- repo['handle'].free(True)
- del repo['handle']
- continue
- repo['cookie'] = calc_cookie_fp(f)
- if usecachedrepo(repo, None, True):
- print "cached"
- solv.xfclose(f)
- continue
- repo['handle'].add_content(f, 0)
- solv.xfclose(f)
- print "fetching"
- defvendorid = repo['handle'].lookup_id(solv.SOLVID_META, solv.SUSETAGS_DEFAULTVENDOR)
- descrdir = repo['handle'].lookup_str(solv.SOLVID_META, solv.SUSETAGS_DESCRDIR)
- if not descrdir:
- descrdir = "suse/setup/descr"
- (filename, filechksum) = susetags_find(repo, 'packages.gz')
- if not filename:
- (filename, filechksum) = susetags_find(repo, 'packages')
- if filename:
- f = curlfopen(repo, descrdir + '/' + filename, True, filechksum, badchecksum)
- if f:
- repo['handle'].add_susetags(f, defvendorid, None, Repo.REPO_NO_INTERNALIZE|Repo.SUSETAGS_RECORD_SHARES)
- solv.xfclose(f)
- (filename, filechksum) = susetags_find(repo, 'packages.en.gz')
- if not filename:
- (filename, filechksum) = susetags_find(repo, 'packages.en')
- if filename:
- f = curlfopen(repo, descrdir + '/' + filename, True, filechksum, badchecksum)
- if f:
- repo['handle'].add_susetags(f, defvendorid, None, Repo.REPO_NO_INTERNALIZE|Repo.REPO_REUSE_REPODATA|Repo.REPO_EXTEND_SOLVABLES)
- solv.xfclose(f)
- repo['handle'].internalize()
- susetags_add_exts(repo)
- else:
- print "unsupported repo '%s': skipped" % repo['alias']
- repo['handle'].free(True)
- del repo['handle']
- continue
-
- # if the checksum was bad we work with the data we got, but don't cache it
- if 'True' not in badchecksum:
- writecachedrepo(repo, None)
- # must be called after writing the repo
- repo['handle'].create_stubs()
-
-if cmd == 'se' or cmd == 'search':
- matches = {}
- di = pool.dataiterator_new(0, solv.SOLVABLE_NAME, args[0], Dataiterator.SEARCH_SUBSTRING|Dataiterator.SEARCH_NOCASE)
- for d in di:
- matches[di.solvid] = True
- for solvid in sorted(matches.keys()):
- print " - %s [%s]: %s" % (pool.solvid2str(solvid), pool.solvables[solvid].repo.name, pool.lookup_str(solvid, solv.SOLVABLE_SUMMARY))
- sys.exit(0)
-
-cmdlinerepo = None
-cmdlinepkgs = {}
-if cmd == 'li' or cmd == 'list' or cmd == 'info' or cmd == 'in' or cmd == 'install':
- for arg in args:
- if arg.endswith(".rpm") and os.access(arg, os.R_OK):
- if not cmdlinerepo:
- cmdlinerepo = { 'alias': '@commandline' }
- cmdlinerepo['handle'] = pool.add_repo(cmdlinerepo['alias'])
- cmdlinerepo['handle'].appdata = cmdlinerepo
- cmdlinepkgs[arg] = cmdlinerepo['handle'].add_rpm(arg, Repo.REPO_REUSE_REPODATA|Repo.REPO_NO_INTERNALIZE)
- if cmdlinerepo:
- cmdlinerepo['handle'].internalize()
-
-addedprovides = pool.addfileprovides_ids()
-if addedprovides:
- rewrite_repos(pool, addedprovides)
-pool.createwhatprovides()
-
-jobs = []
-for arg in args:
- if cmdlinerepo and arg in cmdlinepkgs:
- jobs.append(pool.Job(Job.SOLVER_SOLVABLE, cmdlinepkgs[arg]))
- continue
- argjob = mkjobs(pool, cmd, arg)
- jobs += argjob
-
-if cmd == 'li' or cmd == 'list' or cmd == 'info':
- if not jobs:
- print "no package matched."
- sys.exit(1)
- for job in jobs:
- for s in pool.jobsolvables(job):
- if cmd == 'info':
- print "Name: %s" % s.str()
- print "Repo: %s" % s.repo.name
- print "Summary: %s" % s.lookup_str(solv.SOLVABLE_SUMMARY)
- str = s.lookup_str(solv.SOLVABLE_URL)
- if str:
- print "Url: %s" % str
- str = s.lookup_str(solv.SOLVABLE_LICENSE)
- if str:
- print "License: %s" % str
- print "Description:\n%s" % s.lookup_str(solv.SOLVABLE_DESCRIPTION)
- print
- else:
- print " - %s [%s]" % (s.str(), s.repo.name)
- print " %s" % s.lookup_str(solv.SOLVABLE_SUMMARY)
- sys.exit(0)
-
-if cmd == 'in' or cmd == 'install' or cmd == 'rm' or cmd == 'erase' or cmd == 'up':
- if cmd == 'up' and not jobs:
- jobs = [ pool.Job(Job.SOLVER_SOLVABLE_ALL, 0) ]
- if not jobs:
- print "no package matched."
- sys.exit(1)
- for job in jobs:
- if cmd == 'up':
- if job.how == Job.SOLVER_SOLVABLE_ALL or filter(lambda s: s.isinstalled(), pool.jobsolvables(job)):
- job.how |= Job.SOLVER_UPDATE
- else:
- job.how |= Job.SOLVER_INSTALL
- if cmd == 'in' or cmd == 'install':
- job.how |= Job.SOLVER_INSTALL
- elif cmd == 'rm' or cmd == 'erase':
- job.how |= Job.SOLVER_ERASE
-
- #pool.set_debuglevel(2)
- solver = None
- while True:
- solver = pool.create_solver()
- solver.ignorealreadyrecommended = True
- if cmd == 'rm' or cmd == 'erase':
- solver.allowuninstall = True
- problems = solver.solve(jobs)
- if not problems:
- break
- for problem in problems:
- print "Problem %d:" % problem.id
- r = problem.findproblemrule()
- type, source, target, dep = r.info()
- if type == Solver.SOLVER_RULE_DISTUPGRADE:
- print "%s does not belong to a distupgrade repository" % source.str()
- elif type == Solver.SOLVER_RULE_INFARCH:
- print "%s has inferiour architecture" % source.str()
- elif type == Solver.SOLVER_RULE_UPDATE:
- print "problem with installed package %s" % source.str()
- elif type == Solver.SOLVER_RULE_JOB:
- print "conflicting requests"
- elif type == Solver.SOLVER_RULE_JOB_NOTHING_PROVIDES_DEP:
- print "nothing provides requested %s" % pool.dep2str(dep)
- elif type == Solver.SOLVER_RULE_RPM:
- print "some dependency problem"
- elif type == Solver.SOLVER_RULE_RPM_NOT_INSTALLABLE:
- print "package %s is not installable" % source.str()
- elif type == Solver.SOLVER_RULE_RPM_NOTHING_PROVIDES_DEP:
- print "nothing provides %s needed by %s" % (pool.dep2str(dep), source.str())
- elif type == Solver.SOLVER_RULE_RPM_SAME_NAME:
- print "cannot install both %s and %s" % (source.str(), target.str())
- elif type == Solver.SOLVER_RULE_RPM_PACKAGE_CONFLICT:
- print "package %s conflicts with %s provided by %s" % (source.str(), pool.dep2str(dep), target.str())
- elif type == Solver.SOLVER_RULE_RPM_PACKAGE_OBSOLETES:
- print "package %s obsoletes %s provided by %s" % (source.str(), pool.dep2str(dep), target.str())
- elif type == Solver.SOLVER_RULE_RPM_INSTALLEDPKG_OBSOLETES:
- print "installed package %s obsoletes %s provided by %s" % (source.str(), pool.dep2str(dep), target.str())
- elif type == Solver.SOLVER_RULE_RPM_IMPLICIT_OBSOLETES:
- print "package %s implicitely obsoletes %s provided by %s" % (source.str(), pool.dep2str(dep), target.str())
- elif type == Solver.SOLVER_RULE_RPM_PACKAGE_REQUIRES:
- print "package %s requires %s, but none of the providers can be installed" % (source.str(), pool.dep2str(dep))
- elif type == Solver.SOLVER_RULE_RPM_SELF_CONFLICT:
- print "package %s conflicts with %s provided by itself" % (source.str(), pool.dep2str(dep))
- else:
- print "bad rule type", type
- solutions = problem.solutions()
- for solution in solutions:
- print " Solution %d:" % solution.id
- elements = solution.elements()
- for element in elements:
- etype = element.type
- if etype == Solver.SOLVER_SOLUTION_JOB:
- print " - remove job %d" % element.jobidx
- elif etype == Solver.SOLVER_SOLUTION_INFARCH:
- if element.solvable.isinstalled():
- print " - keep %s despite the inferior architecture" % element.solvable.str()
- else:
- print " - install %s despite the inferior architecture" % element.solvable.str()
- elif etype == Solver.SOLVER_SOLUTION_DISTUPGRADE:
- if element.solvable.isinstalled():
- print " - keep obsolete %s" % element.solvable.str()
- else:
- print " - install %s from excluded repository" % element.solvable.str()
- elif etype == Solver.SOLVER_SOLUTION_REPLACE:
- print " - allow replacement of %s with %s" % (element.solvable.str(), element.replacement.str())
- elif etype == Solver.SOLVER_SOLUTION_DEINSTALL:
- print " - allow deinstallation of %s" % element.solvable.str()
- sol = ''
- while not (sol == 's' or sol == 'q' or (sol.isdigit() and int(sol) >= 1 and int(sol) <= len(solutions))):
- sys.stdout.write("Please choose a solution: ")
- sys.stdout.flush()
- sol = sys.stdin.readline().strip()
- if sol == 's':
- continue # skip problem
- if sol == 'q':
- sys.exit(1)
- solution = solutions[int(sol) - 1]
- for element in solution.elements():
- etype = element.type
- newjob = None
- if etype == Solver.SOLVER_SOLUTION_JOB:
- jobs[element.jobidx] = pool.Job(Job.SOLVER_NOOP, 0)
- elif etype == Solver.SOLVER_SOLUTION_INFARCH or etype == Solver.SOLVER_SOLUTION_DISTUPGRADE:
- newjob = pool.Job(Job.SOLVER_INSTALL|Job.SOLVER_SOLVABLE, element.solvable.id)
- elif etype == Solver.SOLVER_SOLUTION_REPLACE:
- newjob = pool.Job(Job.SOLVER_INSTALL|Job.SOLVER_SOLVABLE, element.replacement.id)
- elif etype == Solver.SOLVER_SOLUTION_DEINSTALL:
- newjob = pool.Job(Job.SOLVER_ERASE|Job.SOLVER_SOLVABLE, element.solvable.id)
- if newjob:
- for job in jobs:
- if job.how == newjob.how and job.what == newjob.what:
- newjob = None
- break
- if newjob:
- jobs.append(newjob)
- # no problems, show transaction
- trans = solver.transaction()
- del solver
- if trans.isempty():
- print "Nothing to do."
- sys.exit(0)
- print
- print "Transaction summary:"
- print
- for ctype, pkgs, fromid, toid in trans.classify():
- if ctype == Transaction.SOLVER_TRANSACTION_ERASE:
- print "%d erased packages:" % len(pkgs)
- elif ctype == Transaction.SOLVER_TRANSACTION_INSTALL:
- print "%d installed packages:" % len(pkgs)
- elif ctype == Transaction.SOLVER_TRANSACTION_REINSTALLED:
- print "%d reinstalled packages:" % len(pkgs)
- elif ctype == Transaction.SOLVER_TRANSACTION_DOWNGRADED:
- print "%d downgraded packages:" % len(pkgs)
- elif ctype == Transaction.SOLVER_TRANSACTION_CHANGED:
- print "%d changed packages:" % len(pkgs)
- elif ctype == Transaction.SOLVER_TRANSACTION_UPGRADED:
- print "%d upgraded packages:" % len(pkgs)
- elif ctype == Transaction.SOLVER_TRANSACTION_VENDORCHANGE:
- print "%d vendor changes from '%s' to '%s':" % (len(pkgs), pool.id2str(fromid), pool.id2str(toid))
- elif ctype == Transaction.SOLVER_TRANSACTION_ARCHCHANGE:
- print "%d arch changes from '%s' to '%s':" % (len(pkgs), pool.id2str(fromid), pool.id2str(toid))
- else:
- continue
- for p in pkgs:
- if ctype == Transaction.SOLVER_TRANSACTION_UPGRADED or ctype == Transaction.SOLVER_TRANSACTION_DOWNGRADED:
- op = trans.othersolvable(p)
- print " - %s -> %s" % (p.str(), op.str())
- else:
- print " - %s" % p.str()
- print
- print "install size change: %d K" % trans.calc_installsizechange()
- print
-
-# vim: sw=4 et
- while True:
- sys.stdout.write("OK to continue (y/n)? ")
- sys.stdout.flush()
- yn = sys.stdin.readline().strip()
- if yn == 'y': break
- if yn == 'n': sys.exit(1)
- newpkgs, keptpkgs = trans.installedresult()
- newpkgsfp = {}
- if newpkgs:
- downloadsize = 0
- for p in newpkgs:
- downloadsize += p.lookup_num(solv.SOLVABLE_DOWNLOADSIZE)
- print "Downloading %d packages, %d K" % (len(newpkgs), downloadsize)
- for p in newpkgs:
- repo = p.repo.appdata
- location, medianr = p.lookup_location()
- if not location:
- continue
- if repo == cmdlinerepo:
- f = solv.xfopen(location)
- if not f:
- sys.exit("\n%s: %s not found" % location)
- newpkgsfp[p.id] = f
- continue
- if sysrepo['handle'].nsolvables and os.access('/usr/bin/applydeltarpm', os.X_OK):
- pname = p.name
- di = p.repo.dataiterator_new(solv.SOLVID_META, solv.DELTA_PACKAGE_NAME, pname, Dataiterator.SEARCH_STRING)
- di.prepend_keyname(solv.REPOSITORY_DELTAINFO)
- for d in di:
- d.setpos_parent()
- if pool.lookup_id(solv.SOLVID_POS, solv.DELTA_PACKAGE_EVR) != p.evrid or pool.lookup_id(solv.SOLVID_POS, solv.DELTA_PACKAGE_ARCH) != p.archid:
- continue
- baseevrid = pool.lookup_id(solv.SOLVID_POS, solv.DELTA_BASE_EVR)
- candidate = None
- for installedp in pool.providers(p.nameid):
- if installedp.isinstalled() and installedp.nameid == p.nameid and installedp.archid == p.archid and installedp.evrid == baseevrid:
- candidate = installedp
- if not candidate:
- continue
- seq = pool.lookup_str(solv.SOLVID_POS, solv.DELTA_SEQ_NAME) + '-' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_SEQ_EVR) + '-' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_SEQ_NUM)
- st = subprocess.call(['/usr/bin/applydeltarpm', '-a', p.arch, '-c', '-s', seq])
- if st:
- continue
- chksum = pool.lookup_checksum(solv.SOLVID_POS, solv.DELTA_CHECKSUM)
- if not chksum:
- continue
- dloc = pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_DIR) + '/' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_NAME) + '-' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_EVR) + '.' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_SUFFIX)
- f = curlfopen(repo, dloc, False, chksum)
- if not f:
- continue
- nf = tempfile.TemporaryFile()
- nf = os.dup(nf.fileno())
- st = subprocess.call(['/usr/bin/applydeltarpm', '-a', p.arch, "/dev/fd/%d" % solv.xfileno(f), "/dev/fd/%d" % nf])
- solv.xfclose(f)
- os.lseek(nf, 0, os.SEEK_SET)
- newpkgsfp[p.id] = solv.xfopen_fd("", nf)
- break
- if p.id in newpkgsfp:
- sys.stdout.write("d")
- sys.stdout.flush()
- continue
-
- if repo['type'] == 'yast2':
- datadir = repo['handle'].lookup_str(solv.SOLVID_META, solv.SUSETAGS_DATADIR)
- if not datadir:
- datadir = 'suse'
- location = datadir + '/' + location
- chksum = p.lookup_checksum(solv.SOLVABLE_CHECKSUM)
- f = curlfopen(repo, location, False, chksum)
- if not f:
- sys.exit("\n%s: %s not found in repository" % (repo['alias'], location))
- newpkgsfp[p.id] = f
- sys.stdout.write(".")
- sys.stdout.flush()
- print
- print "Committing transaction:"
- print
- ts = rpm.TransactionSet('/')
- ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
- erasenamehelper = {}
- for p in trans.steps():
- type = trans.steptype(p, Transaction.SOLVER_TRANSACTION_RPM_ONLY)
- if type == Transaction.SOLVER_TRANSACTION_ERASE:
- rpmdbid = p.lookup_num(solv.RPM_RPMDBID)
- erasenamehelper[p.name] = p
- if not rpmdbid:
- sys.exit("\ninternal error: installed package %s has no rpmdbid\n" % p.str())
- ts.addErase(rpmdbid)
- elif type == Transaction.SOLVER_TRANSACTION_INSTALL:
- f = newpkgsfp[p.id]
- h = ts.hdrFromFdno(solv.xfileno(f))
- os.lseek(solv.xfileno(f), 0, os.SEEK_SET)
- ts.addInstall(h, p, 'u')
- elif type == Transaction.SOLVER_TRANSACTION_MULTIINSTALL:
- f = newpkgsfp[p.id]
- h = ts.hdrFromFdno(solv.xfileno(f))
- os.lseek(solv.xfileno(f), 0, os.SEEK_SET)
- ts.addInstall(h, p, 'i')
- checkproblems = ts.check()
- if checkproblems:
- print checkproblems
- sys.exit("Sorry.")
- ts.order()
- def runCallback(reason, amount, total, p, d):
- if reason == rpm.RPMCALLBACK_INST_OPEN_FILE:
- return solv.xfileno(newpkgsfp[p.id])
- if reason == rpm.RPMCALLBACK_INST_START:
- print "install", p.str()
- if reason == rpm.RPMCALLBACK_UNINST_START:
- # argh, p is just the name of the package
- if p in erasenamehelper:
- p = erasenamehelper[p]
- print "erase", p.str()
- runproblems = ts.run(runCallback, '')
- if runproblems:
- print runproblems
- sys.exit(1)
diff --git a/examples/solv.c b/examples/solv.c
index d8cd4c7..f33b8c8 100644
--- a/examples/solv.c
+++ b/examples/solv.c
@@ -665,23 +665,8 @@ findmetalinkurl(FILE *fp, unsigned char *chksump, Id *chksumtypep)
bp++;
if (chksumtypep && !*chksumtypep && !strncmp(bp, "<hash type=\"sha256\">", 20))
{
- int i;
-
bp += 20;
- memset(chksump, 0, 32);
- for (i = 0; i < 64; i++)
- {
- int c = *bp++;
- if (c >= '0' && c <= '9')
- chksump[i / 2] = chksump[i / 2] * 16 + (c - '0');
- else if (c >= 'a' && c <= 'f')
- chksump[i / 2] = chksump[i / 2] * 16 + (c - ('a' - 10));
- else if (c >= 'A' && c <= 'F')
- chksump[i / 2] = chksump[i / 2] * 16 + (c - ('A' - 10));
- else
- break;
- }
- if (i == 64)
+ if (sat_hex2bin((const char **)&bp, chksump, 32) == 32)
*chksumtypep = REPOKEY_TYPE_SHA256;
continue;
}
@@ -762,6 +747,13 @@ findmirrorlisturl(FILE *fp)
return 0;
}
+static inline int
+iscompressed(const char *name)
+{
+ int l = strlen(name);
+ return l > 3 && !strcmp(name + l - 3, ".gz") ? 1 : 0;
+}
+
FILE *
curlfopen(struct repoinfo *cinfo, const char *file, int uncompress, const unsigned char *chksum, Id chksumtype, int *badchecksump)
{
@@ -1011,11 +1003,11 @@ char *calccachepath(Repo *repo, const char *repoext)
char *q, *p = pool_tmpjoin(repo->pool, SOLVCACHE_PATH, "/", repo->name);
if (repoext)
{
- p = pool_tmpjoin(repo->pool, p, "_", repoext);
- p = pool_tmpjoin(repo->pool, p, ".solvx", 0);
+ p = pool_tmpappend(repo->pool, p, "_", repoext);
+ p = pool_tmpappend(repo->pool, p, ".solvx", 0);
}
else
- p = pool_tmpjoin(repo->pool, p, ".solv", 0);
+ p = pool_tmpappend(repo->pool, p, ".solv", 0);
q = p + strlen(SOLVCACHE_PATH) + 1;
if (*q == '.')
*q = '_';
@@ -1092,6 +1084,7 @@ writecachedrepo(Repo *repo, Repodata *info, const char *repoext, unsigned char *
cinfo = repo->appdata;
mkdir(SOLVCACHE_PATH, 0755);
+ /* use dupjoin instead of tmpjoin because tmpl must survive repo_write */
tmpl = sat_dupjoin(SOLVCACHE_PATH, "/", ".newsolv-XXXXXX");
fd = mkstemp(tmpl);
if (fd < 0)
@@ -1275,6 +1268,49 @@ repomd_add_ext(Repo *repo, Repodata *data, const char *what)
return 1;
}
+int
+repomd_load_ext(Repo *repo, Repodata *data)
+{
+ const char *filename, *repomdtype;
+ char ext[3];
+ FILE *fp;
+ struct repoinfo *cinfo;
+ const unsigned char *filechksum;
+ Id filechksumtype;
+
+ cinfo = repo->appdata;
+ repomdtype = repodata_lookup_str(data, SOLVID_META, REPOSITORY_REPOMD_TYPE);
+ if (!repomdtype)
+ return 0;
+ if (!strcmp(repomdtype, "filelists"))
+ strcpy(ext, "FL");
+ else if (!strcmp(repomdtype, "deltainfo"))
+ strcpy(ext, "DL");
+ else
+ return 0;
+#if 1
+ printf("[%s:%s", repo->name, ext);
+#endif
+ if (usecachedrepo(repo, ext, cinfo->extcookie, 0))
+ {
+ printf(" cached]\n");fflush(stdout);
+ return 1;
+ }
+ printf(" fetching]\n"); fflush(stdout);
+ filename = repodata_lookup_str(data, SOLVID_META, REPOSITORY_REPOMD_LOCATION);
+ filechksumtype = 0;
+ filechksum = repodata_lookup_bin_checksum(data, SOLVID_META, REPOSITORY_REPOMD_CHECKSUM, &filechksumtype);
+ if ((fp = curlfopen(cinfo, filename, iscompressed(filename), filechksum, filechksumtype, 0)) == 0)
+ return 0;
+ if (!strcmp(ext, "FL"))
+ repo_add_rpmmd(repo, fp, ext, REPO_USE_LOADING|REPO_EXTEND_SOLVABLES);
+ else if (!strcmp(ext, "DL"))
+ repo_add_deltainfoxml(repo, fp, REPO_USE_LOADING);
+ fclose(fp);
+ writecachedrepo(repo, data, ext, cinfo->extcookie);
+ return 1;
+}
+
/* susetags helpers */
@@ -1369,94 +1405,62 @@ susetags_add_ext(Repo *repo, Repodata *data)
dataiterator_free(&di);
}
-
-static inline int
-iscompressed(const char *name)
-{
- int l = strlen(name);
- return l > 3 && !strcmp(name + l - 3, ".gz") ? 1 : 0;
-}
-
-
-/* load callback */
-
int
-load_stub(Pool *pool, Repodata *data, void *dp)
+susetags_load_ext(Repo *repo, Repodata *data)
{
- const char *filename, *descrdir, *repomdtype;
- const unsigned char *filechksum;
- Id filechksumtype;
- struct repoinfo *cinfo;
- FILE *fp;
+ const char *filename, *descrdir;
Id defvendor;
char ext[3];
+ FILE *fp;
+ struct repoinfo *cinfo;
+ const unsigned char *filechksum;
+ Id filechksumtype;
- cinfo = data->repo->appdata;
-
+ cinfo = repo->appdata;
filename = repodata_lookup_str(data, SOLVID_META, SUSETAGS_FILE_NAME);
- if (filename)
- {
- /* susetags load */
- ext[0] = filename[9];
- ext[1] = filename[10];
- ext[2] = 0;
-#if 1
- printf("[%s:%s", data->repo->name, ext);
-#endif
- if (usecachedrepo(data->repo, ext, cinfo->extcookie, 0))
- {
- printf(" cached]\n"); fflush(stdout);
- return 1;
- }
+ if (!filename)
+ return 0;
+ /* susetags load */
+ ext[0] = filename[9];
+ ext[1] = filename[10];
+ ext[2] = 0;
#if 1
- printf(" fetching]\n"); fflush(stdout);
+ printf("[%s:%s", repo->name, ext);
#endif
- defvendor = repo_lookup_id(data->repo, SOLVID_META, SUSETAGS_DEFAULTVENDOR);
- descrdir = repo_lookup_str(data->repo, SOLVID_META, SUSETAGS_DESCRDIR);
- if (!descrdir)
- descrdir = "suse/setup/descr";
- filechksumtype = 0;
- filechksum = repodata_lookup_bin_checksum(data, SOLVID_META, SUSETAGS_FILE_CHECKSUM, &filechksumtype);
- if ((fp = curlfopen(cinfo, pool_tmpjoin(pool, descrdir, "/", filename), iscompressed(filename), filechksum, filechksumtype, 0)) == 0)
- return 0;
- repo_add_susetags(data->repo, fp, defvendor, ext, REPO_USE_LOADING|REPO_EXTEND_SOLVABLES);
- fclose(fp);
- writecachedrepo(data->repo, data, ext, cinfo->extcookie);
+ if (usecachedrepo(repo, ext, cinfo->extcookie, 0))
+ {
+ printf(" cached]\n"); fflush(stdout);
return 1;
}
-
- repomdtype = repodata_lookup_str(data, SOLVID_META, REPOSITORY_REPOMD_TYPE);
- if (repomdtype)
- {
- if (!strcmp(repomdtype, "filelists"))
- strcpy(ext, "FL");
- else if (!strcmp(repomdtype, "deltainfo"))
- strcpy(ext, "DL");
- else
- return 0;
#if 1
- printf("[%s:%s", data->repo->name, ext);
+ printf(" fetching]\n"); fflush(stdout);
#endif
- if (usecachedrepo(data->repo, ext, cinfo->extcookie, 0))
- {
- printf(" cached]\n");fflush(stdout);
- return 1;
- }
- printf(" fetching]\n"); fflush(stdout);
- filename = repodata_lookup_str(data, SOLVID_META, REPOSITORY_REPOMD_LOCATION);
- filechksumtype = 0;
- filechksum = repodata_lookup_bin_checksum(data, SOLVID_META, REPOSITORY_REPOMD_CHECKSUM, &filechksumtype);
- if ((fp = curlfopen(cinfo, filename, iscompressed(filename), filechksum, filechksumtype, 0)) == 0)
- return 0;
- if (!strcmp(ext, "FL"))
- repo_add_rpmmd(data->repo, fp, ext, REPO_USE_LOADING|REPO_EXTEND_SOLVABLES);
- else if (!strcmp(ext, "DL"))
- repo_add_deltainfoxml(data->repo, fp, REPO_USE_LOADING);
- fclose(fp);
- writecachedrepo(data->repo, data, ext, cinfo->extcookie);
- return 1;
- }
+ defvendor = repo_lookup_id(repo, SOLVID_META, SUSETAGS_DEFAULTVENDOR);
+ descrdir = repo_lookup_str(repo, SOLVID_META, SUSETAGS_DESCRDIR);
+ if (!descrdir)
+ descrdir = "suse/setup/descr";
+ filechksumtype = 0;
+ filechksum = repodata_lookup_bin_checksum(data, SOLVID_META, SUSETAGS_FILE_CHECKSUM, &filechksumtype);
+ if ((fp = curlfopen(cinfo, pool_tmpjoin(repo->pool, descrdir, "/", filename), iscompressed(filename), filechksum, filechksumtype, 0)) == 0)
+ return 0;
+ repo_add_susetags(repo, fp, defvendor, ext, REPO_USE_LOADING|REPO_EXTEND_SOLVABLES);
+ fclose(fp);
+ writecachedrepo(repo, data, ext, cinfo->extcookie);
+ return 1;
+}
+
+
+/* load callback */
+
+int
+load_stub(Pool *pool, Repodata *data, void *dp)
+{
+ struct repoinfo *cinfo = data->repo->appdata;
+ if (cinfo->type == TYPE_SUSETAGS)
+ return susetags_load_ext(data->repo, data);
+ if (cinfo->type == TYPE_RPMMD)
+ return repomd_load_ext(data->repo, data);
return 0;
}
@@ -1476,31 +1480,6 @@ repo_add_debdb(Repo *repo, int flags)
fclose(fp);
}
-static int
-hexstr2bytes(unsigned char *buf, const char *str, int buflen)
-{
- int i;
- for (i = 0; i < buflen; i++)
- {
-#define c2h(c) (((c)>='0' && (c)<='9') ? ((c)-'0') \
- : ((c)>='a' && (c)<='f') ? ((c)-('a'-10)) \
- : ((c)>='A' && (c)<='F') ? ((c)-('A'-10)) \
- : -1)
- int v = c2h(*str);
- str++;
- if (v < 0)
- return 0;
- buf[i] = v;
- v = c2h(*str);
- str++;
- if (v < 0)
- return 0;
- buf[i] = (buf[i] << 4) | v;
-#undef c2h
- }
- return buflen;
-}
-
const char *
debian_find_component(struct repoinfo *cinfo, FILE *fp, char *comp, const unsigned char **chksump, Id *chksumtypep)
{
@@ -1588,15 +1567,19 @@ debian_find_component(struct repoinfo *cinfo, FILE *fp, char *comp, const unsign
bp += lbinarydir;
if (!strcmp(bp, "Packages") || !strcmp(bp, "Packages.gz"))
{
+ unsigned char curchksum[32];
+ int curl;
if (filename && !strcmp(bp, "Packages"))
continue;
- if (chksumtype && sat_chksum_len(chksumtype) > sat_chksum_len(curchksumtype))
+ curl = sat_chksum_len(curchksumtype);
+ if (!curl || (chksumtype && sat_chksum_len(chksumtype) > curl))
continue;
- if (!hexstr2bytes(chksum, ch, sat_chksum_len(curchksumtype)))
+ if (sat_hex2bin(&ch, curchksum, sizeof(curchksum)) != curl)
continue;
sat_free(filename);
filename = strdup(fn);
chksumtype = curchksumtype;
+ memcpy(chksum, curchksum, curl);
}
}
free(binarydir);
@@ -3168,7 +3151,7 @@ rerunsolver:
seqevr = pool_lookup_str(pool, SOLVID_POS, DELTA_SEQ_EVR);
seqnum = pool_lookup_str(pool, SOLVID_POS, DELTA_SEQ_NUM);
seq = pool_tmpjoin(pool, seqname, "-", seqevr);
- seq = pool_tmpjoin(pool, seq, "-", seqnum);
+ seq = pool_tmpappend(pool, seq, "-", seqnum);
#ifdef FEDORA
sprintf(cmd, "/usr/bin/applydeltarpm -a %s -c -s ", id2str(pool, s->arch));
#else
@@ -3182,9 +3165,9 @@ rerunsolver:
if (!chksumtype)
continue; /* no way! */
dloc = pool_lookup_str(pool, SOLVID_POS, DELTA_LOCATION_DIR);
- dloc = pool_tmpjoin(pool, dloc, "/", pool_lookup_str(pool, SOLVID_POS, DELTA_LOCATION_NAME));
- dloc = pool_tmpjoin(pool, dloc, "-", pool_lookup_str(pool, SOLVID_POS, DELTA_LOCATION_EVR));
- dloc = pool_tmpjoin(pool, dloc, ".", pool_lookup_str(pool, SOLVID_POS, DELTA_LOCATION_SUFFIX));
+ dloc = pool_tmpappend(pool, dloc, "/", pool_lookup_str(pool, SOLVID_POS, DELTA_LOCATION_NAME));
+ dloc = pool_tmpappend(pool, dloc, "-", pool_lookup_str(pool, SOLVID_POS, DELTA_LOCATION_EVR));
+ dloc = pool_tmpappend(pool, dloc, ".", pool_lookup_str(pool, SOLVID_POS, DELTA_LOCATION_SUFFIX));
if ((fp = curlfopen(cinfo, dloc, 0, chksum, chksumtype, 0)) == 0)
continue;
/* got it, now reconstruct */
@@ -3306,7 +3289,7 @@ rerunsolver:
if (evrp > evr && evrp[0] == ':' && evrp[1])
evr = evrp + 1;
nvra = pool_tmpjoin(pool, id2str(pool, s->name), "-", evr);
- nvra = pool_tmpjoin(pool, nvra, ".", id2str(pool, s->arch));
+ nvra = pool_tmpappend(pool, nvra, ".", id2str(pool, s->arch));
runrpm("-e", nvra, -1); /* too bad that --querybynumber doesn't work */
#else
rundpkg("--remove", id2str(pool, s->name), 0);
diff --git a/examples/solv.i b/examples/solv.i
index cd96626..7924386 100644
--- a/examples/solv.i
+++ b/examples/solv.i
@@ -55,6 +55,8 @@
#include "stdio.h"
#include "pool.h"
#include "solver.h"
+#include "policy.h"
+#include "solverdebug.h"
#include "repo_solv.h"
#include "chksum.h"
@@ -309,12 +311,54 @@ typedef struct {
job->what = what;
return job;
}
+
+ const char *str() {
+ Pool *pool = $self->pool;
+ Id select = $self->how & SOLVER_SELECTMASK;
+ char *strstart = 0, *strend = 0;
+ switch ($self->how & SOLVER_JOBMASK) {
+ case SOLVER_INSTALL:
+ if (select == SOLVER_SOLVABLE && pool->installed && pool->solvables[$self->what].repo == pool->installed)
+ strstart = "keep ", strend = "installed";
+ else if (select == SOLVER_SOLVABLE_PROVIDES)
+ strstart = "install a solvable ";
+ else
+ strstart = "install ";
+ break;
+ case SOLVER_ERASE:
+ if (select == SOLVER_SOLVABLE && !(pool->installed && pool->solvables[$self->what].repo == pool->installed))
+ strstart = "keep ", strend = "uninstalled";
+ else if (select == SOLVER_SOLVABLE_PROVIDES)
+ strstart = "deinstall all solvables ";
+ else
+ strstart = "deinstall ";
+ break;
+ case SOLVER_UPDATE:
+ strstart = "install the most recent version of ";
+ break;
+ case SOLVER_LOCK:
+ strstart = "lock ";
+ break;
+ default:
+ return "unknwon job";
+ }
+ return pool_tmpjoin(pool, strstart, solver_select2str(pool, select, $self->what), strend);
+ }
}
%extend Chksum {
Chksum(Id type) {
return (Chksum *)sat_chksum_create(type);
}
+ Chksum(Id type, const char *hex) {
+ unsigned char buf[64];
+ int l = sat_chksum_len(type);
+ if (!l)
+ return 0;
+ if (sat_hex2bin(&hex, buf, sizeof(buf)) != l || hex[0])
+ return 0;
+ return (Chksum *)sat_chksum_create_from_bin(type, buf);
+ }
~Chksum() {
sat_chksum_free($self, 0);
}
@@ -341,6 +385,15 @@ typedef struct {
sat_chksum_add($self, buf, l);
lseek(fd, 0, 0); /* convenience */
}
+ void add_stat(const char *filename) {
+ struct stat stb;
+ if (stat(filename, &stb))
+ memset(&stb, 0, sizeof(stb));
+ sat_chksum_add($self, &stb.st_dev, sizeof(stb.st_dev));
+ sat_chksum_add($self, &stb.st_ino, sizeof(stb.st_ino));
+ sat_chksum_add($self, &stb.st_size, sizeof(stb.st_size));
+ sat_chksum_add($self, &stb.st_mtime, sizeof(stb.st_mtime));
+ }
bool matches(Chksum *othersum) {
int l;
const unsigned char *b, *bo;
@@ -360,20 +413,13 @@ typedef struct {
}
%newobject hex;
char *hex() {
- int i, l, c;
+ int l;
const unsigned char *b;
char *ret, *rp;
b = sat_chksum_get($self, &l);
- ret = rp = sat_malloc(2 * l + 1);
- for (i = 0; i < l; i++)
- {
- c = b[i] >> 4;
- *rp++ = c < 10 ? c + '0' : c + ('a' - 10);
- c = b[i] & 15;
- *rp++ = c < 10 ? c + '0' : c + ('a' - 10);
- }
- *rp++ = 0;
+ ret = sat_malloc(2 * l + 1);
+ sat_bin2hex(b, l, ret);
return ret;
}
}
@@ -620,11 +666,7 @@ typedef struct {
return 1;
}
Id add_rpm(const char *name, int flags = 0) {
- int oldend = $self->end;
- repo_add_rpms($self, &name, 1, flags);
- if (oldend == $self->end)
- return 0;
- return $self->end - 1;
+ return repo_add_rpm($self, name, flags);
}
bool add_susetags(FILE *fp, Id defvendor, const char *language, int flags = 0) {
repo_add_susetags($self, fp, defvendor, language, flags);
@@ -1015,6 +1057,19 @@ typedef struct {
return xs->pool->solvables[xs->id].arch;
}
%}
+ const char * const vendor;
+ %{
+ SWIGINTERN const char *XSolvable_vendor_get(XSolvable *xs) {
+ Pool *pool = xs->pool;
+ return id2str(pool, pool->solvables[xs->id].vendor);
+ }
+ %}
+ Id const vendorid;
+ %{
+ SWIGINTERN Id XSolvable_vendorid_get(XSolvable *xs) {
+ return xs->pool->solvables[xs->id].vendor;
+ }
+ %}
Repo * const repo;
%{
SWIGINTERN Repo *XSolvable_repo_get(XSolvable *xs) {
@@ -1108,6 +1163,11 @@ typedef struct {
}
return e;
}
+ int illegalreplace() {
+ if ($self->type != SOLVER_SOLUTION_REPLACE || $self->p <= 0 || $self->rp <= 0)
+ return 0;
+ return policy_is_illegal($self->solv, $self->solv->pool->solvables + $self->p, $self->solv->pool->solvables + $self->rp, 0);
+ }
%newobject solvable;
XSolvable * const solvable;
%newobject replacement;
@@ -1153,6 +1213,10 @@ typedef struct {
static const int SOLVER_SOLUTION_DEINSTALL = SOLVER_SOLUTION_DEINSTALL;
static const int SOLVER_SOLUTION_REPLACE = SOLVER_SOLUTION_REPLACE;
+ static const int POLICY_ILLEGAL_DOWNGRADE = POLICY_ILLEGAL_DOWNGRADE;
+ static const int POLICY_ILLEGAL_ARCHCHANGE = POLICY_ILLEGAL_ARCHCHANGE;
+ static const int POLICY_ILLEGAL_VENDORCHANGE = POLICY_ILLEGAL_VENDORCHANGE;
+
~Solver() {
solver_free($self);
}