summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/libsolv.ver2
-rw-r--r--src/linkedpkg.c122
-rw-r--r--src/linkedpkg.h1
-rw-r--r--src/policy.c17
-rw-r--r--src/pool.c2
-rw-r--r--src/poolarch.c4
-rw-r--r--src/repodata.c275
-rw-r--r--src/repodata.h1
-rw-r--r--src/rules.c150
-rw-r--r--src/solver.c61
-rw-r--r--src/util.c24
-rw-r--r--src/util.h9
12 files changed, 494 insertions, 174 deletions
diff --git a/src/libsolv.ver b/src/libsolv.ver
index 9e47117..6508288 100644
--- a/src/libsolv.ver
+++ b/src/libsolv.ver
@@ -202,6 +202,7 @@ SOLV_1.0 {
repodata_localize_id;
repodata_lookup_bin_checksum;
repodata_lookup_binary;
+ repodata_lookup_dirstrarray_uninternalized;
repodata_lookup_id;
repodata_lookup_id_uninternalized;
repodata_lookup_idarray;
@@ -262,6 +263,7 @@ SOLV_1.0 {
solv_depmarker;
solv_dupappend;
solv_dupjoin;
+ solv_extend_realloc;
solv_free;
solv_hex2bin;
solv_latin1toutf8;
diff --git a/src/linkedpkg.c b/src/linkedpkg.c
index c5adc9a..6387373 100644
--- a/src/linkedpkg.c
+++ b/src/linkedpkg.c
@@ -21,7 +21,7 @@
*
* product:
* created from product data in the repository (which is generated from files
- * in /etc/products.d. In the future we may switch to using product()
+ * in /etc/products.d). In the future we may switch to using product()
* provides of packages.
*
* pattern:
@@ -37,6 +37,7 @@
#include "pool.h"
#include "repo.h"
+#include "evr.h"
#include "linkedpkg.h"
#ifdef ENABLE_LINKED_PKGS
@@ -47,12 +48,11 @@ find_application_link(Pool *pool, Solvable *s, Id *reqidp, Queue *qr, Id *prvidp
Id req = 0;
Id prv = 0;
Id p, pp;
- Id pkgname = 0;
+ Id pkgname = 0, appdataid = 0;
/* find appdata requires */
if (s->requires)
{
- Id appdataid = 0;
Id *reqp = s->repo->idarraydata + s->requires;
while ((req = *reqp++) != 0) /* go through all requires */
{
@@ -63,22 +63,34 @@ find_application_link(Pool *pool, Solvable *s, Id *reqidp, Queue *qr, Id *prvidp
else
pkgname = req;
}
- req = appdataid;
}
+ req = appdataid ? appdataid : pkgname;
if (!req)
return;
/* find application-appdata provides */
if (s->provides)
{
Id *prvp = s->repo->idarraydata + s->provides;
+ const char *reqs = pool_id2str(pool, req);
+ const char *prvs;
while ((prv = *prvp++) != 0) /* go through all provides */
{
if (ISRELDEP(prv))
continue;
- if (strncmp("application-appdata(", pool_id2str(pool, prv), 20))
+ prvs = pool_id2str(pool, prv);
+ if (strncmp("application-appdata(", prvs, 20))
continue;
- if (!strcmp(pool_id2str(pool, prv) + 12, pool_id2str(pool, req)))
- break;
+ if (appdataid)
+ {
+ if (!strcmp(prvs + 12, reqs))
+ break;
+ }
+ else
+ {
+ int reqsl = strlen(reqs);
+ if (!strncmp(prvs + 20, reqs, reqsl) && !strcmp(prvs + 20 + reqsl, ")"))
+ break;
+ }
}
}
if (!prv)
@@ -88,7 +100,7 @@ find_application_link(Pool *pool, Solvable *s, Id *reqidp, Queue *qr, Id *prvidp
if (pool->solvables[p].repo == s->repo)
if (!pkgname || pool->solvables[p].name == pkgname)
queue_push(qr, p);
- if (!qr->count && pkgname)
+ if (!qr->count && pkgname && appdataid)
{
/* huh, no matching package? try without pkgname filter */
FOR_PROVIDES(p, pp, req)
@@ -112,6 +124,7 @@ find_product_link(Pool *pool, Solvable *s, Id *reqidp, Queue *qr, Id *prvidp, Qu
{
Id p, pp, namerelid;
char *str;
+ unsigned int sbt = 0;
/* search for project requires */
namerelid = 0;
@@ -149,6 +162,29 @@ find_product_link(Pool *pool, Solvable *s, Id *reqidp, Queue *qr, Id *prvidp, Qu
continue;
queue_push(qr, p);
}
+ if (qr->count > 1)
+ {
+ /* multiple providers. try buildtime filter */
+ sbt = solvable_lookup_num(s, SOLVABLE_BUILDTIME, 0);
+ if (sbt)
+ {
+ unsigned int bt;
+ int i, j;
+ int filterqp = 1;
+ for (i = j = 0; i < qr->count; i++)
+ {
+ bt = solvable_lookup_num(pool->solvables + qr->elements[i], SOLVABLE_BUILDTIME, 0);
+ if (!bt)
+ filterqp = 0; /* can't filter */
+ if (!bt || bt == sbt)
+ qr->elements[j++] = qr->elements[i];
+ }
+ if (j)
+ qr->count = j;
+ if (!j || !filterqp)
+ sbt = 0; /* filter failed */
+ }
+ }
if (!qr->count && s->repo == pool->installed)
{
/* oh no! Look up reference file */
@@ -174,6 +210,8 @@ find_product_link(Pool *pool, Solvable *s, Id *reqidp, Queue *qr, Id *prvidp, Qu
Solvable *ps = pool->solvables + p;
if (s->name != ps->name || ps->repo != s->repo || ps->arch != s->arch || s->evr != ps->evr)
continue;
+ if (sbt && solvable_lookup_num(ps, SOLVABLE_BUILDTIME, 0) != sbt)
+ continue;
queue_push(qp, p);
}
}
@@ -272,4 +310,72 @@ find_package_link(Pool *pool, Solvable *s, Id *reqidp, Queue *qr, Id *prvidp, Qu
find_product_link(pool, s, reqidp, qr, prvidp, qp);
}
+static int
+name_min_max(Pool *pool, Solvable *s, Id *namep, Id *minp, Id *maxp)
+{
+ Queue q;
+ Id qbuf[4];
+ Id name, min, max;
+ int i;
+
+ queue_init_buffer(&q, qbuf, sizeof(qbuf)/sizeof(*qbuf));
+ find_package_link(pool, s, 0, &q, 0, 0);
+ if (!q.count)
+ {
+ queue_free(&q);
+ return 0;
+ }
+ s = pool->solvables + q.elements[0];
+ name = s->name;
+ min = max = s->evr;
+ for (i = 1; i < q.count; i++)
+ {
+ s = pool->solvables + q.elements[i];
+ if (s->name != name)
+ {
+ queue_free(&q);
+ return 0;
+ }
+ if (s->evr == min || s->evr == max)
+ continue;
+ if (pool_evrcmp(pool, min, s->evr, EVRCMP_COMPARE) >= 0)
+ min = s->evr;
+ else if (min == max || pool_evrcmp(pool, max, s->evr, EVRCMP_COMPARE) <= 0)
+ max = s->evr;
+ }
+ queue_free(&q);
+ *namep = name;
+ *minp = min;
+ *maxp = max;
+ return 1;
+}
+
+int
+pool_link_evrcmp(Pool *pool, Solvable *s1, Solvable *s2)
+{
+ Id name1, evrmin1, evrmax1;
+ Id name2, evrmin2, evrmax2;
+
+ if (s1->name != s2->name)
+ return 0; /* can't compare */
+ if (!name_min_max(pool, s1, &name1, &evrmin1, &evrmax1))
+ return 0;
+ if (!name_min_max(pool, s2, &name2, &evrmin2, &evrmax2))
+ return 0;
+ /* compare linked names */
+ if (name1 != name2)
+ return 0;
+ if (evrmin1 == evrmin2 && evrmax1 == evrmax2)
+ return 0;
+ /* now compare evr intervals */
+ if (evrmin1 == evrmax1 && evrmin2 == evrmax2)
+ return pool_evrcmp(pool, evrmin1, evrmax2, EVRCMP_COMPARE);
+ if (evrmin1 != evrmax2 && pool_evrcmp(pool, evrmin1, evrmax2, EVRCMP_COMPARE) > 0)
+ return 1;
+ if (evrmax1 != evrmin2 && pool_evrcmp(pool, evrmax1, evrmin2, EVRCMP_COMPARE) < 0)
+ return -1;
+ return 0;
+}
+
+
#endif
diff --git a/src/linkedpkg.h b/src/linkedpkg.h
index 25894c9..4463280 100644
--- a/src/linkedpkg.h
+++ b/src/linkedpkg.h
@@ -34,5 +34,6 @@ extern Id find_autoproduct_name(Pool *pool, Solvable *s);
/* generic */
extern void find_package_link(Pool *pool, Solvable *s, Id *reqidp, Queue *qr, Id *prvidp, Queue *qp);
+extern int pool_link_evrcmp(Pool *pool, Solvable *s1, Solvable *s2);
#endif
diff --git a/src/policy.c b/src/policy.c
index fadfcda..12ad771 100644
--- a/src/policy.c
+++ b/src/policy.c
@@ -21,9 +21,11 @@
#include "policy.h"
#include "poolvendor.h"
#include "poolarch.h"
+#include "linkedpkg.h"
#include "cplxdeps.h"
+
/*-----------------------------------------------------------------*/
/*
@@ -825,7 +827,7 @@ move_installed_to_front(Pool *pool, Queue *plist)
void
prune_to_best_version(Pool *pool, Queue *plist)
{
- int i, j;
+ int i, j, r;
Solvable *s, *best;
if (plist->count < 2) /* no need to prune for a single entry */
@@ -858,12 +860,13 @@ prune_to_best_version(Pool *pool, Queue *plist)
best = s; /* take current as new best */
continue;
}
-
- if (best->evr != s->evr) /* compare evr */
- {
- if (pool_evrcmp(pool, best->evr, s->evr, EVRCMP_COMPARE) < 0)
- best = s;
- }
+ r = best->evr != s->evr ? pool_evrcmp(pool, best->evr, s->evr, EVRCMP_COMPARE) : 0;
+#ifdef ENABLE_LINKED_PKGS
+ if (r == 0 && has_package_link(pool, s))
+ r = pool_link_evrcmp(pool, best, s);
+#endif
+ if (r < 0)
+ best = s;
}
plist->elements[j++] = best - pool->solvables; /* finish last group */
plist->count = j;
diff --git a/src/pool.c b/src/pool.c
index 33293b5..85932bf 100644
--- a/src/pool.c
+++ b/src/pool.c
@@ -82,7 +82,7 @@ pool_create(void)
s->evr = ID_EMPTY;
pool->debugmask = SOLV_DEBUG_RESULT; /* FIXME */
-#ifdef FEDORA
+#if defined(FEDORA) || defined(MAGEIA)
pool->implicitobsoleteusescolors = 1;
#endif
#ifdef RPM5
diff --git a/src/poolarch.c b/src/poolarch.c
index 9408983..788646b 100644
--- a/src/poolarch.c
+++ b/src/poolarch.c
@@ -21,7 +21,7 @@
#include "util.h"
static const char *archpolicies[] = {
-#ifdef FEDORA
+#if defined(FEDORA) || defined(MAGEIA)
"x86_64", "x86_64:athlon:i686:i586:i486:i386",
#else
"x86_64", "x86_64:i686:i586:i486:i386",
@@ -64,7 +64,7 @@ static const char *archpolicies[] = {
"mips64", "mips64",
"mips64el", "mips64el",
"m68k", "m68k",
-#ifdef FEDORA
+#if defined(FEDORA) || defined(MAGEIA)
"ia32e", "ia32e:x86_64:athlon:i686:i586:i486:i386",
"athlon", "athlon:i686:i586:i486:i386",
"amd64", "amd64:x86_64:athlon:i686:i586:i486:i386",
diff --git a/src/repodata.c b/src/repodata.c
index c854262..ad3e71a 100644
--- a/src/repodata.c
+++ b/src/repodata.c
@@ -849,6 +849,37 @@ repodata_lookup_id_uninternalized(Repodata *data, Id solvid, Id keyname, Id void
return 0;
}
+const char *
+repodata_lookup_dirstrarray_uninternalized(Repodata *data, Id solvid, Id keyname, Id *didp, Id *iterp)
+{
+ Id *ap, did;
+ Id iter = *iterp;
+ if (iter == 0) /* find key data */
+ {
+ if (!data->attrs)
+ return 0;
+ ap = data->attrs[solvid - data->start];
+ if (!ap)
+ return 0;
+ for (; *ap; ap += 2)
+ if (data->keys[*ap].name == keyname && data->keys[*ap].type == REPOKEY_TYPE_DIRSTRARRAY)
+ break;
+ if (!*ap)
+ return 0;
+ iter = ap[1];
+ }
+ did = *didp;
+ for (ap = data->attriddata + iter; *ap; ap += 2)
+ {
+ if (did && ap[0] != did)
+ continue;
+ *didp = ap[0];
+ *iterp = ap - data->attriddata + 2;
+ return (const char *)data->attrdata + ap[1];
+ }
+ *iterp = 0;
+ return 0;
+}
/************************************************************************
* data search
@@ -3024,6 +3055,7 @@ repodata_serialize_key(Repodata *data, struct extdata *newincore,
case REPOKEY_TYPE_VOID:
case REPOKEY_TYPE_CONSTANT:
case REPOKEY_TYPE_CONSTANTID:
+ case REPOKEY_TYPE_DELETED:
break;
case REPOKEY_TYPE_STR:
data_addblob(xd, data->attrdata + val, strlen((char *)(data->attrdata + val)) + 1);
@@ -3094,29 +3126,30 @@ repodata_serialize_key(Repodata *data, struct extdata *newincore,
sp = schema;
kp = data->xattrs[-*ida];
if (!kp)
- continue;
+ continue; /* ignore empty elements */
num++;
- for (;*kp; kp += 2)
+ for (; *kp; kp += 2)
*sp++ = *kp;
*sp = 0;
if (!schemaid)
schemaid = repodata_schema2id(data, schema, 1);
else if (schemaid != repodata_schema2id(data, schema, 0))
{
- pool_debug(data->repo->pool, SOLV_FATAL, "fixarray substructs with different schemas\n");
- exit(1);
+ pool_debug(data->repo->pool, SOLV_ERROR, "repodata_serialize_key: fixarray substructs with different schemas\n");
+ num = 0;
+ break;
}
}
+ data_addid(xd, num);
if (!num)
break;
- data_addid(xd, num);
data_addid(xd, schemaid);
for (ida = data->attriddata + val; *ida; ida++)
{
Id *kp = data->xattrs[-*ida];
if (!kp)
continue;
- for (;*kp; kp += 2)
+ for (; *kp; kp += 2)
repodata_serialize_key(data, newincore, newvincore, schema, data->keys + *kp, kp[1]);
}
break;
@@ -3148,7 +3181,7 @@ repodata_serialize_key(Repodata *data, struct extdata *newincore,
break;
}
default:
- pool_debug(data->repo->pool, SOLV_FATAL, "don't know how to handle type %d\n", key->type);
+ pool_debug(data->repo->pool, SOLV_FATAL, "repodata_serialize_key: don't know how to handle type %d\n", key->type);
exit(1);
}
if (key->storage == KEY_STORAGE_VERTICAL_OFFSET)
@@ -3160,18 +3193,57 @@ repodata_serialize_key(Repodata *data, struct extdata *newincore,
}
}
+/* create a circular linked list of all keys that share
+ * the same keyname */
+static Id *
+calculate_keylink(Repodata *data)
+{
+ int i, j;
+ Id *link;
+ Id maxkeyname = 0, *keytable = 0;
+ link = solv_calloc(data->nkeys, sizeof(Id));
+ if (data->nkeys <= 2)
+ return link;
+ for (i = 1; i < data->nkeys; i++)
+ {
+ Id n = data->keys[i].name;
+ if (n >= maxkeyname)
+ {
+ keytable = solv_realloc2(keytable, n + 128, sizeof(Id));
+ memset(keytable + maxkeyname, 0, (n + 128 - maxkeyname) * sizeof(Id));
+ maxkeyname = n + 128;
+ }
+ j = keytable[n];
+ if (j)
+ link[i] = link[j];
+ else
+ j = i;
+ link[j] = i;
+ keytable[n] = i;
+ }
+ /* remove links that just point to themselfs */
+ for (i = 1; i < data->nkeys; i++)
+ if (link[i] == i)
+ link[i] = 0;
+ solv_free(keytable);
+ return link;
+}
+
void
repodata_internalize(Repodata *data)
{
Repokey *key, solvkey;
Id entry, nentry;
- Id schemaid, keyid, *schema, *sp, oldschema, *keyp, *seen;
+ Id schemaid, keyid, *schema, *sp, oldschemaid, *keyp, *seen;
+ Offset *oldincoreoffs = 0;
int schemaidx;
unsigned char *dp, *ndp;
- int newschema, oldcount;
+ int neednewschema;
struct extdata newincore;
struct extdata newvincore;
Id solvkeyid;
+ Id *keylink;
+ int haveoldkl;
if (!data->attrs && !data->xattrs)
return;
@@ -3204,140 +3276,181 @@ repodata_internalize(Repodata *data)
data->mainschema = 0;
data->mainschemaoffsets = solv_free(data->mainschemaoffsets);
+ keylink = calculate_keylink(data);
/* join entry data */
/* we start with the meta data, entry -1 */
for (entry = -1; entry < nentry; entry++)
{
- memset(seen, 0, data->nkeys * sizeof(Id));
- oldschema = 0;
+ oldschemaid = 0;
dp = data->incoredata;
if (dp)
{
dp += entry >= 0 ? data->incoreoffset[entry] : 1;
- dp = data_read_id(dp, &oldschema);
+ dp = data_read_id(dp, &oldschemaid);
}
+ memset(seen, 0, data->nkeys * sizeof(Id));
#if 0
-fprintf(stderr, "oldschema %d\n", oldschema);
-fprintf(stderr, "schemata %d\n", data->schemata[oldschema]);
+fprintf(stderr, "oldschemaid %d\n", oldschemaid);
+fprintf(stderr, "schemata %d\n", data->schemata[oldschemaid]);
fprintf(stderr, "schemadata %p\n", data->schemadata);
#endif
- /* seen: -1: old data 0: skipped >0: id + 1 */
- newschema = 0;
- oldcount = 0;
+
+ /* seen: -1: old data, 0: skipped, >0: id + 1 */
+ neednewschema = 0;
sp = schema;
- for (keyp = data->schemadata + data->schemata[oldschema]; *keyp; keyp++)
+ haveoldkl = 0;
+ for (keyp = data->schemadata + data->schemata[oldschemaid]; *keyp; keyp++)
{
if (seen[*keyp])
{
- pool_debug(data->repo->pool, SOLV_FATAL, "Inconsistent old data (key occured twice).\n");
- exit(1);
+ /* oops, should not happen */
+ neednewschema = 1;
+ continue;
}
- seen[*keyp] = -1;
+ seen[*keyp] = -1; /* use old marker */
*sp++ = *keyp;
- oldcount++;
+ if (keylink[*keyp])
+ haveoldkl = 1; /* potential keylink conflict */
}
- if (entry >= 0)
- keyp = data->attrs ? data->attrs[entry] : 0;
- else
+
+ /* strip solvables key */
+ if (entry < 0 && solvkeyid && seen[solvkeyid])
{
- /* strip solvables key */
*sp = 0;
for (sp = keyp = schema; *sp; sp++)
if (*sp != solvkeyid)
*keyp++ = *sp;
- else
- oldcount--;
sp = keyp;
seen[solvkeyid] = 0;
- keyp = data->xattrs ? data->xattrs[1] : 0;
+ neednewschema = 1;
}
+
+ /* add new entries */
+ if (entry >= 0)
+ keyp = data->attrs ? data->attrs[entry] : 0;
+ else
+ keyp = data->xattrs ? data->xattrs[1] : 0;
if (keyp)
for (; *keyp; keyp += 2)
{
if (!seen[*keyp])
{
- newschema = 1;
+ neednewschema = 1;
*sp++ = *keyp;
+ if (haveoldkl && keylink[*keyp]) /* this should be pretty rare */
+ {
+ Id kl;
+ for (kl = keylink[*keyp]; kl != *keyp; kl = keylink[kl])
+ if (seen[kl] == -1)
+ {
+ /* replacing old key kl, remove from schema and seen */
+ Id *osp;
+ for (osp = schema; osp < sp; osp++)
+ if (*osp == kl)
+ {
+ memmove(osp, osp + 1, (sp - osp) * sizeof(Id));
+ sp--;
+ seen[kl] = 0;
+ break;
+ }
+ }
+ }
}
seen[*keyp] = keyp[1] + 1;
}
+
+ /* add solvables key if needed */
if (entry < 0 && data->end != data->start)
{
- *sp++ = solvkeyid;
- newschema = 1;
+ *sp++ = solvkeyid; /* always last in schema */
+ neednewschema = 1;
}
+
+ /* commit schema */
*sp = 0;
- if (newschema)
+ if (neednewschema)
/* Ideally we'd like to sort the new schema here, to ensure
- schema equality independend of the ordering. We can't do that
- yet. For once see below (old ids need to come before new ids).
- An additional difficulty is that we also need to move
- the values with the keys. */
+ schema equality independend of the ordering. */
schemaid = repodata_schema2id(data, schema, 1);
else
- schemaid = oldschema;
+ schemaid = oldschemaid;
+
+ if (entry < 0)
+ {
+ data->mainschemaoffsets = solv_calloc(sp - schema, sizeof(Id));
+ data->mainschema = schemaid;
+ }
+
+ /* find offsets in old incore data */
+ if (oldschemaid)
+ {
+ Id *lastneeded = 0;
+ for (sp = data->schemadata + data->schemata[oldschemaid]; *sp; sp++)
+ if (seen[*sp] == -1)
+ lastneeded = sp + 1;
+ if (lastneeded)
+ {
+ if (!oldincoreoffs)
+ oldincoreoffs = solv_malloc2(data->nkeys, 2 * sizeof(Offset));
+ for (sp = data->schemadata + data->schemata[oldschemaid]; sp != lastneeded; sp++)
+ {
+ /* Skip the data associated with this old key. */
+ key = data->keys + *sp;
+ ndp = dp;
+ if (key->storage == KEY_STORAGE_VERTICAL_OFFSET)
+ {
+ ndp = data_skip(ndp, REPOKEY_TYPE_ID);
+ ndp = data_skip(ndp, REPOKEY_TYPE_ID);
+ }
+ else if (key->storage == KEY_STORAGE_INCORE)
+ ndp = data_skip_key(data, ndp, key);
+ oldincoreoffs[*sp * 2] = dp - data->incoredata;
+ oldincoreoffs[*sp * 2 + 1] = ndp - dp;
+ dp = ndp;
+ }
+ }
+ }
+ /* just copy over the complete old entry (including the schemaid) if there was no new data */
+ if (entry >= 0 && !neednewschema && oldschemaid && (!data->attrs || !data->attrs[entry]) && dp)
+ {
+ ndp = data->incoredata + data->incoreoffset[entry];
+ data->incoreoffset[entry] = newincore.len;
+ data_addblob(&newincore, ndp, dp - ndp);
+ goto entrydone;
+ }
/* Now create data blob. We walk through the (possibly new) schema
and either copy over old data, or insert the new. */
- /* XXX Here we rely on the fact that the (new) schema has the form
- o1 o2 o3 o4 ... | n1 n2 n3 ...
- (oX being the old keyids (possibly overwritten), and nX being
- the new keyids). This rules out sorting the keyids in order
- to ensure a small schema count. */
if (entry >= 0)
data->incoreoffset[entry] = newincore.len;
data_addid(&newincore, schemaid);
- if (entry == -1)
- {
- data->mainschema = schemaid;
- data->mainschemaoffsets = solv_calloc(sp - schema, sizeof(Id));
- }
+
/* we don't use a pointer to the schemadata here as repodata_serialize_key
* may call repodata_schema2id() which might realloc our schemadata */
for (schemaidx = data->schemata[schemaid]; (keyid = data->schemadata[schemaidx]) != 0; schemaidx++)
{
- if (entry == -1)
- data->mainschemaoffsets[schemaidx - data->schemata[schemaid]] = newincore.len;
- if (keyid == solvkeyid)
+ if (entry < 0)
{
- /* add flexarray entry count */
- data_addid(&newincore, data->end - data->start);
- break;
- }
- key = data->keys + keyid;
-#if 0
- fprintf(stderr, "internalize %d(%d):%s:%s\n", entry, entry + data->start, pool_id2str(data->repo->pool, key->name), pool_id2str(data->repo->pool, key->type));
-#endif
- ndp = dp;
- if (oldcount)
- {
- /* Skip the data associated with this old key. */
- if (key->storage == KEY_STORAGE_VERTICAL_OFFSET)
+ data->mainschemaoffsets[schemaidx - data->schemata[schemaid]] = newincore.len;
+ if (keyid == solvkeyid)
{
- ndp = data_skip(dp, REPOKEY_TYPE_ID);
- ndp = data_skip(ndp, REPOKEY_TYPE_ID);
+ /* add flexarray entry count */
+ data_addid(&newincore, data->end - data->start);
+ break; /* always the last entry */
}
- else if (key->storage == KEY_STORAGE_INCORE)
- ndp = data_skip_key(data, dp, key);
- oldcount--;
}
if (seen[keyid] == -1)
{
- /* If this key was an old one _and_ was not overwritten with
- a different value copy over the old value (we skipped it
- above). */
- if (dp != ndp)
- data_addblob(&newincore, dp, ndp - dp);
- seen[keyid] = 0;
+ if (oldincoreoffs[keyid * 2 + 1])
+ data_addblob(&newincore, data->incoredata + oldincoreoffs[keyid * 2], oldincoreoffs[keyid * 2 + 1]);
}
else if (seen[keyid])
- {
- /* Otherwise we have a new value. Parse it into the internal form. */
- repodata_serialize_key(data, &newincore, &newvincore, schema, key, seen[keyid] - 1);
- }
- dp = ndp;
+ repodata_serialize_key(data, &newincore, &newvincore, schema, data->keys + keyid, seen[keyid] - 1);
}
+
+entrydone:
+ /* free memory */
if (entry >= 0 && data->attrs)
{
if (data->attrs[entry])
@@ -3367,6 +3480,8 @@ fprintf(stderr, "schemadata %p\n", data->schemadata);
data->lastdatalen = 0;
solv_free(schema);
solv_free(seen);
+ solv_free(keylink);
+ solv_free(oldincoreoffs);
repodata_free_schemahash(data);
solv_free(data->incoredata);
diff --git a/src/repodata.h b/src/repodata.h
index ad05525..c18c688 100644
--- a/src/repodata.h
+++ b/src/repodata.h
@@ -301,6 +301,7 @@ void repodata_set_location(Repodata *data, Id solvid, int medianr, const char *d
void repodata_set_deltalocation(Repodata *data, Id handle, int medianr, const char *dir, const char *file);
void repodata_set_sourcepkg(Repodata *data, Id solvid, const char *sourcepkg);
Id repodata_lookup_id_uninternalized(Repodata *data, Id solvid, Id keyname, Id voidid);
+const char *repodata_lookup_dirstrarray_uninternalized(Repodata *data, Id solvid, Id keyname, Id *didp, Id *iterp);
/* stats */
unsigned int repodata_memused(Repodata *data);
diff --git a/src/rules.c b/src/rules.c
index b941986..248b1cd 100644
--- a/src/rules.c
+++ b/src/rules.c
@@ -1157,17 +1157,19 @@ finddistupgradepackages(Solver *solv, Solvable *s, Queue *qs, int allow_all)
if (!qs->count)
{
if (allow_all)
- return 0; /* orphaned, don't create feature rule */
+ return 0; /* orphaned, don't create feature rule */
/* check if this is an orphaned package */
policy_findupdatepackages(solv, s, qs, 1);
if (!qs->count)
- return 0; /* orphaned, don't create update rule */
+ return 0; /* orphaned, don't create update rule */
qs->count = 0;
return -SYSTEMSOLVABLE; /* supported but not installable */
}
if (allow_all)
return s - pool->solvables;
/* check if it is ok to keep the installed package */
+ if (solv->dupmap.size && MAPTST(&solv->dupmap, s - pool->solvables))
+ return s - pool->solvables;
for (i = 0; i < qs->count; i++)
{
Solvable *ns = pool->solvables + qs->elements[i];
@@ -1178,6 +1180,7 @@ finddistupgradepackages(Solver *solv, Solvable *s, Queue *qs, int allow_all)
return -SYSTEMSOLVABLE;
}
+#if 0
/* add packages from the dup repositories to the update candidates
* this isn't needed for the global dup mode as all packages are
* from dup repos in that case */
@@ -1201,6 +1204,7 @@ addduppackages(Solver *solv, Solvable *s, Queue *qs)
}
queue_free(&dupqs);
}
+#endif
/*-------------------------------------------------------------------
*
@@ -1218,18 +1222,15 @@ solver_addupdaterule(Solver *solv, Solvable *s, int allow_all)
Id p, d;
Queue qs;
Id qsbuf[64];
+ int isorphaned = 0;
queue_init_buffer(&qs, qsbuf, sizeof(qsbuf)/sizeof(*qsbuf));
p = s - pool->solvables;
/* find update candidates for 's' */
- if (solv->dupmap_all)
+ if (solv->dupmap_all || (solv->dupinvolvedmap.size && MAPTST(&solv->dupinvolvedmap, p)))
p = finddistupgradepackages(solv, s, &qs, allow_all);
else
- {
- policy_findupdatepackages(solv, s, &qs, allow_all);
- if (!allow_all && solv->dupinvolvedmap.size && MAPTST(&solv->dupinvolvedmap, p))
- addduppackages(solv, s, &qs);
- }
+ policy_findupdatepackages(solv, s, &qs, allow_all);
#ifdef ENABLE_LINKED_PKGS
if (solv->instbuddy && solv->instbuddy[s - pool->solvables - solv->installed->start])
@@ -1237,7 +1238,7 @@ solver_addupdaterule(Solver *solv, Solvable *s, int allow_all)
const char *name = pool_id2str(pool, s->name);
if (strncmp(name, "pattern:", 8) == 0 || strncmp(name, "application:", 12) == 0)
{
- /* a linked pseudo package. As it is linked, we do not need an update rule */
+ /* a linked pseudo package. As it is linked, we do not need an update/feature rule */
/* nevertheless we set specialupdaters so we can update */
solver_addrule(solv, 0, 0, 0);
if (!allow_all && qs.count)
@@ -1254,11 +1255,14 @@ solver_addupdaterule(Solver *solv, Solvable *s, int allow_all)
}
#endif
- if (!allow_all && !p && solv->dupmap_all)
+ if (!allow_all && !p) /* !p implies qs.count == 0 */
{
queue_push(&solv->orphaned, s - pool->solvables); /* an orphaned package */
if (solv->keep_orphans && !(solv->droporphanedmap_all || (solv->droporphanedmap.size && MAPTST(&solv->droporphanedmap, s - pool->solvables - solv->installed->start))))
p = s - pool->solvables; /* keep this orphaned package installed */
+ queue_free(&qs);
+ solver_addrule(solv, p, 0, 0);
+ return;
}
if (!allow_all && qs.count && solv->multiversion.size)
@@ -1271,7 +1275,7 @@ solver_addupdaterule(Solver *solv, Solvable *s, int allow_all)
if (i < qs.count)
{
/* filter out all multiversion packages as they don't update */
- d = pool_queuetowhatprovides(pool, &qs);
+ d = pool_queuetowhatprovides(pool, &qs); /* save qs away */
for (j = i; i < qs.count; i++)
{
if (MAPTST(&solv->multiversion, qs.elements[i]))
@@ -1290,19 +1294,25 @@ solver_addupdaterule(Solver *solv, Solvable *s, int allow_all)
}
qs.elements[j++] = qs.elements[i];
}
- if (j < qs.count)
+ if (j < qs.count) /* filtered at least one package? */
{
- if (d && solv->installed && s->repo == solv->installed &&
- (solv->updatemap_all || (solv->updatemap.size && MAPTST(&solv->updatemap, s - pool->solvables - solv->installed->start))))
+ if (j == 0 && p == -SYSTEMSOLVABLE)
{
+ /* this is a multiversion orphan */
+ queue_push(&solv->orphaned, s - pool->solvables);
if (!solv->specialupdaters)
solv->specialupdaters = solv_calloc(solv->installed->end - solv->installed->start, sizeof(Id));
solv->specialupdaters[s - pool->solvables - solv->installed->start] = d;
- }
- if (j == 0 && p == -SYSTEMSOLVABLE && solv->dupmap_all)
- {
- queue_push(&solv->orphaned, s - pool->solvables); /* also treat as orphaned */
- j = qs.count;
+ if (solv->keep_orphans && !(solv->droporphanedmap_all || (solv->droporphanedmap.size && MAPTST(&solv->droporphanedmap, s - pool->solvables - solv->installed->start))))
+ {
+ /* we need to keep the orphan */
+ queue_free(&qs);
+ solver_addrule(solv, s - pool->solvables, 0, 0);
+ return;
+ }
+ /* we can drop it as long as we update */
+ isorphaned = 1;
+ j = qs.count; /* force the update */
}
qs.count = j;
}
@@ -1310,11 +1320,13 @@ solver_addupdaterule(Solver *solv, Solvable *s, int allow_all)
{
/* could fallthrough, but then we would do pool_queuetowhatprovides twice */
queue_free(&qs);
- solver_addrule(solv, p, 0, d); /* allow update of s */
+ solver_addrule(solv, s - pool->solvables, 0, d); /* allow update of s */
return;
}
}
}
+ if (!isorphaned && p == -SYSTEMSOLVABLE && solv->dupmap.size)
+ p = s - pool->solvables; /* let the dup rules sort it out */
if (qs.count && p == -SYSTEMSOLVABLE)
p = queue_shift(&qs);
if (qs.count > 1)
@@ -1623,7 +1635,7 @@ add_cleandeps_package(Solver *solv, Id p)
queue_pushunique(solv->cleandeps_updatepkgs, p);
}
-static inline void
+static void
solver_addtodupmaps(Solver *solv, Id p, Id how, int targeted)
{
Pool *pool = solv->pool;
@@ -1796,9 +1808,11 @@ void
solver_addduprules(Solver *solv, Map *addedmap)
{
Pool *pool = solv->pool;
+ Repo *installed = solv->installed;
Id p, pp;
Solvable *s, *ps;
int first, i;
+ Rule *r;
solv->duprules = solv->nrules;
for (i = 1; i < pool->nsolvables; i++)
@@ -1818,11 +1832,11 @@ solver_addduprules(Solver *solv, Map *addedmap)
break;
if (!MAPTST(&solv->dupinvolvedmap, p))
continue;
- if (solv->installed && ps->repo == solv->installed)
+ if (installed && ps->repo == installed)
{
if (!solv->updatemap.size)
- map_grow(&solv->updatemap, solv->installed->end - solv->installed->start);
- MAPSET(&solv->updatemap, p - solv->installed->start);
+ map_grow(&solv->updatemap, installed->end - installed->start);
+ MAPSET(&solv->updatemap, p - installed->start);
if (!MAPTST(&solv->dupmap, p))
{
Id ip, ipp;
@@ -1835,10 +1849,22 @@ solver_addduprules(Solver *solv, Map *addedmap)
if (is->evr == ps->evr && solvable_identical(ps, is))
break;
}
- if (!ip)
- solver_addrule(solv, -p, 0, 0); /* no match, sorry */
- else
- MAPSET(&solv->dupmap, p); /* for best rules processing */
+ if (ip)
+ {
+ /* ok, found a good one. we may keep this package. */
+ MAPSET(&solv->dupmap, p); /* for best rules processing */
+ continue;
+ }
+ r = solv->rules + solv->updaterules + (p - installed->start);
+ if (!r->p)
+ r = solv->rules + solv->featurerules + (p - installed->start);
+ if (r->p && solv->specialupdaters && solv->specialupdaters[p - installed->start])
+ {
+ /* this is a multiversion orphan, we're good if an update is installed */
+ solver_addrule(solv, -p, 0, solv->specialupdaters[p - installed->start]);
+ continue;
+ }
+ solver_addrule(solv, -p, 0, 0); /* no match, sorry */
}
}
else if (!MAPTST(&solv->dupmap, p))
@@ -2823,32 +2849,51 @@ solver_rule2rules(Solver *solv, Id rid, Queue *q, int recursive)
/* check if the newest versions of pi still provides the dependency we're looking for */
static int
-solver_choicerulecheck(Solver *solv, Id pi, Rule *r, Map *m)
+solver_choicerulecheck(Solver *solv, Id pi, Rule *r, Map *m, Queue *q)
{
Pool *pool = solv->pool;
Rule *ur;
- Queue q;
- Id p, pp, qbuf[32];
+ Id p, pp;
int i;
- ur = solv->rules + solv->updaterules + (pi - pool->installed->start);
- if (!ur->p)
- ur = solv->rules + solv->featurerules + (pi - pool->installed->start);
- if (!ur->p)
- return 0;
- queue_init_buffer(&q, qbuf, sizeof(qbuf)/sizeof(*qbuf));
- FOR_RULELITERALS(p, pp, ur)
- if (p > 0)
- queue_push(&q, p);
- if (q.count > 1)
- policy_filter_unwanted(solv, &q, POLICY_MODE_CHOOSE);
- for (i = 0; i < q.count; i++)
- if (MAPTST(m, q.elements[i]))
- break;
- /* 1: none of the newest versions provide it */
- i = i == q.count ? 1 : 0;
- queue_free(&q);
- return i;
+ if (!q->count || q->elements[0] != pi)
+ {
+ if (q->count)
+ queue_empty(q);
+ ur = solv->rules + solv->updaterules + (pi - pool->installed->start);
+ if (!ur->p)
+ ur = solv->rules + solv->featurerules + (pi - pool->installed->start);
+ if (!ur->p)
+ return 0;
+ queue_push2(q, pi, 0);
+ FOR_RULELITERALS(p, pp, ur)
+ if (p > 0)
+ queue_push(q, p);
+ }
+ if (q->count == 2)
+ return 1;
+ if (q->count == 3)
+ {
+ p = q->elements[2];
+ return MAPTST(m, p) ? 0 : 1;
+ }
+ if (!q->elements[1])
+ {
+ for (i = 2; i < q->count; i++)
+ if (!MAPTST(m, q->elements[i]))
+ break;
+ if (i == q->count)
+ return 0; /* all provide it, no need to filter */
+ /* some don't provide it, have to filter */
+ queue_deleten(q, 0, 2);
+ policy_filter_unwanted(solv, q, POLICY_MODE_CHOOSE);
+ queue_unshift(q, 1); /* filter mark */
+ queue_unshift(q, pi);
+ }
+ for (i = 2; i < q->count; i++)
+ if (MAPTST(m, q->elements[i]))
+ return 0; /* at least one provides it */
+ return 1; /* none of the new packages provided it */
}
static inline void
@@ -2873,7 +2918,7 @@ solver_addchoicerules(Solver *solv)
Pool *pool = solv->pool;
Map m, mneg;
Rule *r;
- Queue q, qi;
+ Queue q, qi, qcheck;
int i, j, rid, havechoice;
Id p, d, pp;
Id p2, pp2;
@@ -2892,6 +2937,7 @@ solver_addchoicerules(Solver *solv)
solv->choicerules_ref = solv_calloc(solv->pkgrules_end, sizeof(Id));
queue_init(&q);
queue_init(&qi);
+ queue_init(&qcheck);
map_init(&m, pool->nsolvables);
map_init(&mneg, pool->nsolvables);
/* set up negative assertion map from infarch and dup rules */
@@ -3009,7 +3055,7 @@ solver_addchoicerules(Solver *solv)
p2 = qi.elements[i];
if (!p2)
continue;
- if (solver_choicerulecheck(solv, p2, r, &m))
+ if (solver_choicerulecheck(solv, p2, r, &m, &qcheck))
{
/* oops, remove element p from q */
queue_removeelement(&q, qi.elements[i + 1]);
@@ -3018,6 +3064,7 @@ solver_addchoicerules(Solver *solv)
qi.elements[j++] = p2;
}
queue_truncate(&qi, j);
+
if (!q.count || !qi.count)
{
FOR_RULELITERALS(p, pp, r)
@@ -3089,6 +3136,7 @@ solver_addchoicerules(Solver *solv)
}
queue_free(&q);
queue_free(&qi);
+ queue_free(&qcheck);
map_free(&m);
map_free(&mneg);
solv->choicerules_end = solv->nrules;
diff --git a/src/solver.c b/src/solver.c
index c6cad6b..2e28b7d 100644
--- a/src/solver.c
+++ b/src/solver.c
@@ -217,13 +217,24 @@ autouninstall(Solver *solv, Id *problem)
Rule *r;
if (m && !MAPTST(m, v - solv->updaterules))
continue;
- /* check if identical to feature rule, we don't like that */
+ /* check if identical to feature rule, we don't like that (except for orphans) */
r = solv->rules + solv->featurerules + (v - solv->updaterules);
if (!r->p)
{
/* update rule == feature rule */
if (v > lastfeature)
lastfeature = v;
+ /* prefer orphaned packages in dup mode */
+ if (solv->keep_orphans)
+ {
+ r = solv->rules + v;
+ if (!r->d && r->p == (solv->installed->start + (v - solv->updaterules)))
+ {
+ lastfeature = v;
+ lastupdate = 0;
+ break;
+ }
+ }
continue;
}
if (v > lastupdate)
@@ -2714,7 +2725,7 @@ solver_run_sat(Solver *solv, int disablerules, int doweak)
if (!solv->decisioncnt_orphan)
solv->decisioncnt_orphan = solv->decisionq.count;
- if (solv->dupmap_all && solv->installed)
+ if (solv->installed && (solv->orphaned.count || solv->brokenorphanrules))
{
int installedone = 0;
@@ -3350,7 +3361,7 @@ solver_solve(Solver *solv, Queue *job)
Solvable *s;
Rule *r;
int now, solve_start;
- int hasdupjob = 0;
+ int needduprules = 0;
int hasbestinstalljob = 0;
solve_start = solv_timems(0);
@@ -3561,6 +3572,19 @@ solver_solve(Solver *solv, Queue *job)
MAPSET(&solv->droporphanedmap, p - installed->start);
}
break;
+ case SOLVER_ALLOWUNINSTALL:
+ if (select == SOLVER_SOLVABLE_ALL || (select == SOLVER_SOLVABLE_REPO && installed && what == installed->repoid))
+ solv->allowuninstall_all = 1;
+ FOR_JOB_SELECT(p, pp, select, what)
+ {
+ s = pool->solvables + p;
+ if (s->repo != installed)
+ continue;
+ if (!solv->allowuninstallmap.size)
+ map_grow(&solv->allowuninstallmap, installed->end - installed->start);
+ MAPSET(&solv->allowuninstallmap, p - installed->start);
+ }
+ break;
default:
break;
}
@@ -3608,8 +3632,10 @@ solver_solve(Solver *solv, Queue *job)
if (how & SOLVER_FORCEBEST)
solv->bestupdatemap_all = 1;
}
- if (!solv->dupmap_all || solv->allowuninstall)
- hasdupjob = 1;
+ if ((how & SOLVER_TARGETED) != 0)
+ needduprules = 1;
+ if (!solv->dupmap_all || solv->allowuninstall || solv->allowuninstall_all || solv->allowuninstallmap.size || solv->keep_orphans)
+ needduprules = 1;
break;
default:
break;
@@ -3664,7 +3690,7 @@ solver_solve(Solver *solv, Queue *job)
/* create dup maps if needed. We need the maps early to create our
* update rules */
- if (hasdupjob)
+ if (needduprules)
solver_createdupmaps(solv);
/*
@@ -3723,9 +3749,13 @@ solver_solve(Solver *solv, Queue *job)
* check for and remove duplicate
*/
r = solv->rules + solv->nrules - 1; /* r: update rule */
- if (!r->p)
- continue;
sr = r - (installed->end - installed->start); /* sr: feature rule */
+ if (!r->p)
+ {
+ if (sr->p)
+ memset(sr, 0, sizeof(*sr)); /* no feature rules without update rules */
+ continue;
+ }
/* it's also orphaned if the feature rule consists just of the installed package */
if (!solv->dupmap_all && sr->p == i && !sr->d && !sr->w2)
queue_push(&solv->orphaned, i);
@@ -3917,17 +3947,6 @@ solver_solve(Solver *solv, Queue *job)
break;
case SOLVER_ALLOWUNINSTALL:
POOL_DEBUG(SOLV_DEBUG_JOB, "job: allowuninstall %s\n", solver_select2str(pool, select, what));
- if (select == SOLVER_SOLVABLE_ALL || (select == SOLVER_SOLVABLE_REPO && installed && what == installed->repoid))
- solv->allowuninstall_all = 1;
- FOR_JOB_SELECT(p, pp, select, what)
- {
- s = pool->solvables + p;
- if (s->repo != installed)
- continue;
- if (!solv->allowuninstallmap.size)
- map_grow(&solv->allowuninstallmap, installed->end - installed->start);
- MAPSET(&solv->allowuninstallmap, p - installed->start);
- }
break;
default:
POOL_DEBUG(SOLV_DEBUG_JOB, "job: unknown job\n");
@@ -3966,7 +3985,7 @@ solver_solve(Solver *solv, Queue *job)
else
solv->infarchrules = solv->infarchrules_end = solv->nrules;
- if (hasdupjob)
+ if (needduprules)
solver_addduprules(solv, &addedmap);
else
solv->duprules = solv->duprules_end = solv->nrules;
@@ -3976,7 +3995,7 @@ solver_solve(Solver *solv, Queue *job)
else
solv->bestrules = solv->bestrules_end = solv->nrules;
- if (hasdupjob)
+ if (needduprules)
solver_freedupmaps(solv); /* no longer needed */
if (solv->do_yum_obsoletes)
diff --git a/src/util.c b/src/util.c
index d8ae7ca..d611297 100644
--- a/src/util.c
+++ b/src/util.c
@@ -76,6 +76,30 @@ solv_calloc(size_t num, size_t len)
return r;
}
+/* this was solv_realloc2(old, len, size), but we now overshoot
+ * for huge len sizes */
+void *
+solv_extend_realloc(void *old, size_t len, size_t size, size_t block)
+{
+ size_t xblock = (block + 1) << 5;
+ len = (len + block) & ~block;
+ if (len >= xblock && xblock)
+ {
+ xblock <<= 1;
+ while (len >= xblock && xblock)
+ xblock <<= 1;
+ if (xblock)
+ {
+ size_t nlen;
+ xblock = (xblock >> 5) - 1;
+ nlen = (len + xblock) & ~xblock;
+ if (nlen > len)
+ len = nlen;
+ }
+ }
+ return solv_realloc2(old, len, size);
+}
+
void *
solv_free(void *mem)
{
diff --git a/src/util.h b/src/util.h
index d8a136d..5f7a93a 100644
--- a/src/util.h
+++ b/src/util.h
@@ -29,6 +29,7 @@ extern void *solv_malloc2(size_t, size_t);
extern void *solv_calloc(size_t, size_t);
extern void *solv_realloc(void *, size_t);
extern void *solv_realloc2(void *, size_t, size_t);
+extern void *solv_extend_realloc(void *, size_t, size_t, size_t);
extern void *solv_free(void *);
extern char *solv_strdup(const char *);
extern void solv_oom(size_t, size_t);
@@ -48,12 +49,12 @@ static inline void *solv_extend(void *buf, size_t len, size_t nmemb, size_t size
if (nmemb == 1)
{
if ((len & block) == 0)
- buf = solv_realloc2(buf, len + (1 + block), size);
+ buf = solv_extend_realloc(buf, len + 1, size, block);
}
else
{
if (((len - 1) | block) != ((len + nmemb - 1) | block))
- buf = solv_realloc2(buf, (len + (nmemb + block)) & ~block, size);
+ buf = solv_extend_realloc(buf, len + nmemb, size, block);
}
return buf;
}
@@ -76,7 +77,7 @@ static inline void *solv_zextend(void *buf, size_t len, size_t nmemb, size_t siz
static inline void *solv_extend_resize(void *buf, size_t len, size_t size, size_t block)
{
if (len)
- buf = solv_realloc2(buf, (len + block) & ~block, size);
+ buf = solv_extend_realloc(buf, len, size, block);
return buf;
}
@@ -85,7 +86,7 @@ static inline void *solv_calloc_block(size_t len, size_t size, size_t block)
void *buf;
if (!len)
return 0;
- buf = solv_malloc2((len + block) & ~block, size);
+ buf = solv_extend_realloc((void *)0, len, size, block);
memset(buf, 0, ((len + block) & ~block) * size);
return buf;
}