diff options
34 files changed, 1048 insertions, 292 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt index c089e21..fd1426b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -123,6 +123,7 @@ MESSAGE (STATUS "Building for Mageia") ADD_DEFINITIONS (-DMAGEIA) SET (ENABLE_MDKREPO ON) SET (ENABLE_RPMDB ON) +SET (ENABLE_RPMMD ON) SET (ENABLE_LZMA_COMPRESSION ON) SET (have_system ${have_system}x) ENDIF (MAGEIA) @@ -10,7 +10,7 @@ This code is based on two major, but independent, blocks: resolving package dependencies. The sat-solver code has been written to aim for the newest packages, -record the decison tree to provide introspection, and also allows to +record the decision tree to provide introspection, and also allows to provide the user with suggestions on how to deal with unsolvable problems. It also takes advantage of the repository storage to minimize memory usage. diff --git a/VERSION.cmake b/VERSION.cmake index 9ae12b5..7ba4cb6 100644 --- a/VERSION.cmake +++ b/VERSION.cmake @@ -49,5 +49,5 @@ SET(LIBSOLVEXT_SOVERSION "0") SET(LIBSOLV_MAJOR "0") SET(LIBSOLV_MINOR "6") -SET(LIBSOLV_PATCH "14") +SET(LIBSOLV_PATCH "15") diff --git a/doc/helix2solv.1 b/doc/helix2solv.1 index 900482f..07d9497 100644 --- a/doc/helix2solv.1 +++ b/doc/helix2solv.1 @@ -2,12 +2,12 @@ .\" Title: helix2solv .\" Author: [see the "Author" section] .\" Generator: DocBook XSL Stylesheets v1.78.0 <http://docbook.sf.net/> -.\" Date: 08/26/2015 +.\" Date: 12/14/2015 .\" Manual: LIBSOLV .\" Source: libsolv .\" Language: English .\" -.TH "HELIX2SOLV" "1" "08/26/2015" "libsolv" "LIBSOLV" +.TH "HELIX2SOLV" "1" "12/14/2015" "libsolv" "LIBSOLV" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- @@ -34,7 +34,7 @@ helix2solv \- convert legacy helixcode format into a solv file \fBhelix2solv\fR .SH "DESCRIPTION" .sp -The helix format was a metadata format used in the RedCarpet package manager\&. It\(cqs still used in libzypp testcases\&. The helix2solv tool reads data in helix format from standhard input and write it in solv file format to standard output\&. +The helix format was a metadata format used in the RedCarpet package manager\&. It\(cqs still used in libzypp testcases\&. The helix2solv tool reads data in helix format from standard input and writes it in solv file format to standard output\&. .SH "AUTHOR" .sp Michael Schroeder <mls@suse\&.de> diff --git a/doc/helix2solv.txt b/doc/helix2solv.txt index db8dfe9..f9b303f 100644 --- a/doc/helix2solv.txt +++ b/doc/helix2solv.txt @@ -16,8 +16,8 @@ Description ----------- The helix format was a metadata format used in the RedCarpet package manager. It's still used in libzypp testcases. -The helix2solv tool reads data in helix format from standhard -input and write it in solv file format to standard output. +The helix2solv tool reads data in helix format from standard +input and writes it in solv file format to standard output. Author ------ diff --git a/doc/libsolv-bindings.3 b/doc/libsolv-bindings.3 index ad8a7e6..14bf96c 100644 --- a/doc/libsolv-bindings.3 +++ b/doc/libsolv-bindings.3 @@ -2,12 +2,12 @@ .\" Title: Libsolv-Bindings .\" Author: [see the "Author" section] .\" Generator: DocBook XSL Stylesheets v1.78.0 <http://docbook.sf.net/> -.\" Date: 09/21/2015 +.\" Date: 12/14/2015 .\" Manual: LIBSOLV .\" Source: libsolv .\" Language: English .\" -.TH "LIBSOLV\-BINDINGS" "3" "09/21/2015" "libsolv" "LIBSOLV" +.TH "LIBSOLV\-BINDINGS" "3" "12/14/2015" "libsolv" "LIBSOLV" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- @@ -424,7 +424,7 @@ the \(lqequals to\(rq bit .PP \fBREL_GT\fR .RS 4 -the \(lqgreater then\(rq bit +the \(lqgreater than\(rq bit .RE .PP \fBREL_ARCH\fR @@ -609,7 +609,7 @@ If a package is installed in multiversionmode, rpm used to ignore both the impli .PP \fBPOOL_FLAG_ADDFILEPROVIDESFILTERED\fR .RS 4 -Make the addfileprovides method only add files from the standard locations (i\&.e\&. the \(lqbin\(rq and \(lqetc\(rq directories)\&. This is useful if you have only few packages that use non\-standard file dependencies, but you still wand the fast speed that addfileprovides() generates\&. +Make the addfileprovides method only add files from the standard locations (i\&.e\&. the \(lqbin\(rq and \(lqetc\(rq directories)\&. This is useful if you have only few packages that use non\-standard file dependencies, but you still want the fast speed that addfileprovides() generates\&. .RE .SS "METHODS" .sp @@ -641,7 +641,7 @@ Force a free of the pool\&. After this call, you must not access any object that .RE .\} .sp -Break the ownership relation betwen the binding object and the pool\&. After this call, the pool will not get freed even if the object goes out of scope\&. This also means that you must manually call the free method to free the pool data\&. +Break the ownership relation between the binding object and the pool\&. After this call, the pool will not get freed even if the object goes out of scope\&. This also means that you must manually call the free method to free the pool data\&. .sp .if n \{\ .RS 4 @@ -815,7 +815,7 @@ my \fI@ids\fR \fB=\fR \fI$pool\fR\fB\->addfileprovides_queue()\fR; .RE .\} .sp -Some package managers like rpm allow dependencies on files contained in other packages\&. To allow libsolv to deal with those dependencies in an efficient way, you need to call the addfileprovides method after creating and reading all repositories\&. This method will scan all dependency for file names and than scan all packages for matching files\&. If a filename has been matched, it will be added to the provides list of the corresponding package\&. The addfileprovides_queue variant works the same way but returns an array containing all file dependencies\&. This information can be stored in the meta section of the repositories to speed up the next time the repository is loaded and addfileprovides is called\&. +Some package managers like rpm allow dependencies on files contained in other packages\&. To allow libsolv to deal with those dependencies in an efficient way, you need to call the addfileprovides method after creating and reading all repositories\&. This method will scan all dependency for file names and then scan all packages for matching files\&. If a filename has been matched, it will be added to the provides list of the corresponding package\&. The addfileprovides_queue variant works the same way but returns an array containing all file dependencies\&. This information can be stored in the meta section of the repositories to speed up the next time the repository is loaded and addfileprovides is called\&. .sp .if n \{\ .RS 4 @@ -845,7 +845,7 @@ my \fI@solvables\fR \fB=\fR \fI$pool\fR\fB\->whatprovides(\fR\fI$dep\fR\fB)\fR; .RE .\} .sp -Return all solvables that provide the specified dependency\&. You can use either a Dep object or an simple Id as argument\&. +Return all solvables that provide the specified dependency\&. You can use either a Dep object or a simple Id as argument\&. .sp .if n \{\ .RS 4 @@ -1008,7 +1008,7 @@ Get/Set fixed jobs stored in the pool\&. Those jobs are automatically appended t .RE .\} .sp -Set the callback function called when repository metadata needs to be loaded on demand\&. To make use of this feature, you need to create repodata stubs that tell the library which data is available but not loaded\&. If later on the data needs to be accessed, the callback function is called with a repodata argument\&. You can then load the data (maybe fetching it first from an remote server)\&. The callback should return true if the data has been made available\&. +Set the callback function called when repository metadata needs to be loaded on demand\&. To make use of this feature, you need to create repodata stubs that tell the library which data is available but not loaded\&. If later on the data needs to be accessed, the callback function is called with a repodata argument\&. You can then load the data (maybe fetching it first from a remote server)\&. The callback should return true if the data has been made available\&. .sp .if n \{\ .RS 4 @@ -1589,7 +1589,7 @@ Do not create stubs for repository parts that can be downloaded on demand\&. .PP \fBSUSETAGS_RECORD_SHARES\fR .RS 4 -This is specific to the add_susetags() method\&. Susetags allows to refer to already read packages to save disk space\&. If this data sharing needs to work over multiple calls to add_susetags, you need to specify this flag so that the share information is made available to subsequent calls\&. +This is specific to the add_susetags() method\&. Susetags allows one to refer to already read packages to save disk space\&. If this data sharing needs to work over multiple calls to add_susetags, you need to specify this flag so that the share information is made available to subsequent calls\&. .RE .SS "METHODS" .sp @@ -1973,7 +1973,7 @@ Add metadata stored in the "rpm\-md" format (i\&.e\&. from files in the \(lqrepo .RE .\} .sp -Add the repomd\&.xml meta description from the "rpm\-md" format to the repository\&. This file contains information about the repository like keywords, and also a list of all database files with checksums\&. The data is added the the "meta" section of the repository, i\&.e\&. no package gets created\&. +Add the repomd\&.xml meta description from the "rpm\-md" format to the repository\&. This file contains information about the repository like keywords, and also a list of all database files with checksums\&. The data is added to the "meta" section of the repository, i\&.e\&. no package gets created\&. .sp .if n \{\ .RS 4 @@ -2123,7 +2123,7 @@ Add the contents of the archlinux installed package database to the repository\& .RE .\} .sp -Add the \(lqcontent\(rq meta description from the susetags format to the repository\&. This file contains information about the repository like keywords, and also a list of all database files with checksums\&. The data is added the the "meta" section of the repository, i\&.e\&. no package gets created\&. +Add the \(lqcontent\(rq meta description from the susetags format to the repository\&. This file contains information about the repository like keywords, and also a list of all database files with checksums\&. The data is added to the "meta" section of the repository, i\&.e\&. no package gets created\&. .sp .if n \{\ .RS 4 @@ -2536,7 +2536,7 @@ Return true if the two solvables are identical\&. .RE .\} .sp -Returns \-1 if the epoch/version/release of the solvable is less then the one from the other solvable, 1 if it is greater, and 0 if they are equal\&. Note that "equal" does not mean that the evr is identical\&. +Returns \-1 if the epoch/version/release of the solvable is less than the one from the other solvable, 1 if it is greater, and 0 if they are equal\&. Note that "equal" does not mean that the evr is identical\&. .sp .if n \{\ .RS 4 @@ -3583,7 +3583,7 @@ A rule to implement the update policy of installed packages\&. Every installed p .PP \fBSOLVER_RULE_FEATURE\fR .RS 4 -Feature rules are fallback rules used when a update rule is disabled\&. They include all packages that may replace the installed package ignoring the update policy, i\&.e\&. they contain downgrades, arch changes and so on\&. Without them, the solver would simply erase installed packages if their update rule gets disabled\&. +Feature rules are fallback rules used when an update rule is disabled\&. They include all packages that may replace the installed package ignoring the update policy, i\&.e\&. they contain downgrades, arch changes and so on\&. Without them, the solver would simply erase installed packages if their update rule gets disabled\&. .RE .PP \fBSOLVER_RULE_JOB\fR @@ -3593,7 +3593,7 @@ Job rules implement the job given to the solver\&. .PP \fBSOLVER_RULE_DISTUPGRADE\fR .RS 4 -This are simple negative assertions that make sure that only packages are kept that are also available in one of the repositories\&. +These are simple negative assertions that make sure that only packages are kept that are also available in one of the repositories\&. .RE .PP \fBSOLVER_RULE_INFARCH\fR @@ -3608,7 +3608,7 @@ Choice rules are used to make sure that the solver prefers updating to installin .PP \fBSOLVER_RULE_LEARNT\fR .RS 4 -These rules are generated by the solver to keep it from running into the same problem multiple times when it has to backtrack\&. They are the main reason why a sat solver is faster then other dependency solver implementations\&. +These rules are generated by the solver to keep it from running into the same problem multiple times when it has to backtrack\&. They are the main reason why a sat solver is faster than other dependency solver implementations\&. .RE .sp Special dependency rule types: @@ -4511,7 +4511,7 @@ This element installs a package with a different version keeping the other versi .PP \fBSOLVER_TRANSACTION_MULTIREINSTALL\fR .RS 4 -This element reinstalls a installed package keeping the other versions installed\&. +This element reinstalls an installed package keeping the other versions installed\&. .RE .sp Transaction element types, active view @@ -4533,7 +4533,7 @@ This element installs a newer version of an installed package\&. .PP \fBSOLVER_TRANSACTION_DOWNGRADE\fR .RS 4 -This element installs a older version of an installed package\&. +This element installs an older version of an installed package\&. .RE .PP \fBSOLVER_TRANSACTION_OBSOLETES\fR @@ -4679,7 +4679,7 @@ my \fI@newsolvables\fR \fB=\fR \fI$trans\fR\fB\->newsolvables()\fR; .RE .\} .sp -Return all packages that are to be installed by the transaction\&. This are the packages that need to be downloaded from the repositories\&. +Return all packages that are to be installed by the transaction\&. These are the packages that need to be downloaded from the repositories\&. .sp .if n \{\ .RS 4 @@ -4803,10 +4803,10 @@ Return the size change of the installed system in kilobytes (kibibytes)\&. .RE .\} .sp -Order the steps in the transactions so that dependant packages are updated before packages that depend on them\&. For rpm, you can also use rpmlib\(cqs ordering functionality, debian\(cqs dpkg does not provide a way to order a transaction\&. +Order the steps in the transactions so that dependent packages are updated before packages that depend on them\&. For rpm, you can also use rpmlib\(cqs ordering functionality, debian\(cqs dpkg does not provide a way to order a transaction\&. .SS "ACTIVE/PASSIVE VIEW" .sp -Active view list what new packages get installed, while passive view shows what happens to the installed packages\&. Most often there\(cqs not much difference between the two modes, but things get interesting of multiple package get replaced by one new package\&. Say you have installed package A\-1\-1 and B\-1\-1, and now install A\-2\-1 with has a new dependency that obsoletes B\&. The transaction elements will be +Active view lists what new packages get installed, while passive view shows what happens to the installed packages\&. Most often there\(cqs not much difference between the two modes, but things get interesting if multiple packages get replaced by one new package\&. Say you have installed packages A\-1\-1 and B\-1\-1, and now install A\-2\-1 which has a new dependency that obsoletes B\&. The transaction elements will be .sp .if n \{\ .RS 4 diff --git a/doc/libsolv-bindings.txt b/doc/libsolv-bindings.txt index 13d73bd..1ee699d 100644 --- a/doc/libsolv-bindings.txt +++ b/doc/libsolv-bindings.txt @@ -203,7 +203,7 @@ the ``less than'' bit the ``equals to'' bit *REL_GT*:: -the ``greater then'' bit +the ``greater than'' bit *REL_ARCH*:: used for relations that describe an extra architecture filter, the @@ -349,7 +349,7 @@ obsoleted packages still get removed. Make the addfileprovides method only add files from the standard locations (i.e. the ``bin'' and ``etc'' directories). This is useful if you have only few packages that use non-standard file -dependencies, but you still wand the fast speed that addfileprovides() +dependencies, but you still want the fast speed that addfileprovides() generates. === METHODS === @@ -367,7 +367,7 @@ that still references the pool. pool.disown() pool.disown() -Break the ownership relation betwen the binding object and the pool. After +Break the ownership relation between the binding object and the pool. After this call, the pool will not get freed even if the object goes out of scope. This also means that you must manually call the free method to free the pool data. @@ -461,7 +461,7 @@ not in the pool and _create_ is false, *undef*/*None*/*nil* is returned. Some package managers like rpm allow dependencies on files contained in other packages. To allow libsolv to deal with those dependencies in an efficient way, you need to call the addfileprovides method after creating and reading all -repositories. This method will scan all dependency for file names and than scan +repositories. This method will scan all dependency for file names and then scan all packages for matching files. If a filename has been matched, it will be added to the provides list of the corresponding package. The addfileprovides_queue variant works the same way but returns an array @@ -485,7 +485,7 @@ the call to addfileprovides(). solvables = pool.whatprovides(dep) Return all solvables that provide the specified dependency. You can use either -a Dep object or an simple Id as argument. +a Dep object or a simple Id as argument. Id *matchprovidingids(const char *match, int flags) my @ids = $pool->matchprovidingids($match, $flags); @@ -575,7 +575,7 @@ Set the callback function called when repository metadata needs to be loaded on demand. To make use of this feature, you need to create repodata stubs that tell the library which data is available but not loaded. If later on the data needs to be accessed, the callback function is called with a repodata argument. -You can then load the data (maybe fetching it first from an remote server). +You can then load the data (maybe fetching it first from a remote server). The callback should return true if the data has been made available. /* bindings only */ @@ -909,7 +909,7 @@ in your code. Do not create stubs for repository parts that can be downloaded on demand. *SUSETAGS_RECORD_SHARES*:: -This is specific to the add_susetags() method. Susetags allows to refer to +This is specific to the add_susetags() method. Susetags allows one to refer to already read packages to save disk space. If this data sharing needs to work over multiple calls to add_susetags, you need to specify this flag so that the share information is made available to subsequent calls. @@ -1121,7 +1121,7 @@ parameter. Add the repomd.xml meta description from the "rpm-md" format to the repository. This file contains information about the repository like keywords, and also a -list of all database files with checksums. The data is added the the "meta" +list of all database files with checksums. The data is added to the "meta" section of the repository, i.e. no package gets created. bool add_updateinfoxml(FILE *fp, int flags = 0) @@ -1203,7 +1203,7 @@ The _dir_ parameter is usually set to "/var/lib/pacman/local". Add the ``content'' meta description from the susetags format to the repository. This file contains information about the repository like keywords, and also -a list of all database files with checksums. The data is added the the "meta" +a list of all database files with checksums. The data is added to the "meta" section of the repository, i.e. no package gets created. bool add_susetags(FILE *fp, Id defvendor, const char *language, int flags = 0) @@ -1410,7 +1410,7 @@ Return true if the two solvables are identical. $solvable.evrcmp(other) $solvable.evrcmp(other) -Returns -1 if the epoch/version/release of the solvable is less then the +Returns -1 if the epoch/version/release of the solvable is less than the one from the other solvable, 1 if it is greater, and 0 if they are equal. Note that "equal" does not mean that the evr is identical. @@ -2171,7 +2171,7 @@ installed package has an update rule that consists of the packages that may replace the installed package. *SOLVER_RULE_FEATURE*:: -Feature rules are fallback rules used when a update rule is disabled. They +Feature rules are fallback rules used when an update rule is disabled. They include all packages that may replace the installed package ignoring the update policy, i.e. they contain downgrades, arch changes and so on. Without them, the solver would simply erase installed packages if their @@ -2181,7 +2181,7 @@ update rule gets disabled. Job rules implement the job given to the solver. *SOLVER_RULE_DISTUPGRADE*:: -This are simple negative assertions that make sure that only packages +These are simple negative assertions that make sure that only packages are kept that are also available in one of the repositories. *SOLVER_RULE_INFARCH*:: @@ -2198,7 +2198,7 @@ choice rules, so you will not see them when a problem is found. *SOLVER_RULE_LEARNT*:: These rules are generated by the solver to keep it from running into the same problem multiple times when it has to backtrack. They are -the main reason why a sat solver is faster then other dependency solver +the main reason why a sat solver is faster than other dependency solver implementations. Special dependency rule types: @@ -2779,7 +2779,7 @@ This element installs a package with a different version keeping the other versions installed. *SOLVER_TRANSACTION_MULTIREINSTALL*:: -This element reinstalls a installed package keeping the other versions +This element reinstalls an installed package keeping the other versions installed. Transaction element types, active view @@ -2795,7 +2795,7 @@ different content. This element installs a newer version of an installed package. *SOLVER_TRANSACTION_DOWNGRADE*:: -This element installs a older version of an installed package. +This element installs an older version of an installed package. *SOLVER_TRANSACTION_OBSOLETES*:: This element installs a package that obsoletes an installed package. @@ -2897,7 +2897,7 @@ Returns true if the transaction does not do anything, i.e. has no elements. newsolvables = trans.newsolvables() newsolvables = trans.newsolvables() -Return all packages that are to be installed by the transaction. This are +Return all packages that are to be installed by the transaction. These are the packages that need to be downloaded from the repositories. Solvable *keptsolvables(); @@ -2975,18 +2975,18 @@ Return the size change of the installed system in kilobytes (kibibytes). trans.order() trans.order() -Order the steps in the transactions so that dependant packages are updated +Order the steps in the transactions so that dependent packages are updated before packages that depend on them. For rpm, you can also use rpmlib's ordering functionality, debian's dpkg does not provide a way to order a transaction. === ACTIVE/PASSIVE VIEW === -Active view list what new packages get installed, while passive view shows +Active view lists what new packages get installed, while passive view shows what happens to the installed packages. Most often there's not much -difference between the two modes, but things get interesting of multiple -package get replaced by one new package. Say you have installed package -A-1-1 and B-1-1, and now install A-2-1 with has a new dependency that +difference between the two modes, but things get interesting if multiple +packages get replaced by one new package. Say you have installed packages +A-1-1 and B-1-1, and now install A-2-1 which has a new dependency that obsoletes B. The transaction elements will be updated A-1-1 (other: A-2-1) diff --git a/doc/libsolv-constantids.3 b/doc/libsolv-constantids.3 index bf2230a..327150c 100644 --- a/doc/libsolv-constantids.3 +++ b/doc/libsolv-constantids.3 @@ -2,12 +2,12 @@ .\" Title: Libsolv-Constantids .\" Author: [see the "Author" section] .\" Generator: DocBook XSL Stylesheets v1.78.0 <http://docbook.sf.net/> -.\" Date: 08/26/2015 +.\" Date: 12/14/2015 .\" Manual: LIBSOLV .\" Source: libsolv .\" Language: English .\" -.TH "LIBSOLV\-CONSTANTIDS" "3" "08/26/2015" "libsolv" "LIBSOLV" +.TH "LIBSOLV\-CONSTANTIDS" "3" "12/14/2015" "libsolv" "LIBSOLV" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- @@ -89,7 +89,7 @@ Stores an array of dependency Ids that describe the capabilities that also must .PP \fBSOLVABLE_RECOMMENDS "solvable:recommends"\fR .RS 4 -Stores an array of dependency Ids that describe the capabilities that also should be installed when this package is installed\&. It\(cqs not an error if not all capabilites can be met\&. +Stores an array of dependency Ids that describe the capabilities that also should be installed when this package is installed\&. It\(cqs not an error if not all capabilities can be met\&. .RE .PP \fBSOLVABLE_SUGGESTS "solvable:suggests"\fR @@ -390,7 +390,7 @@ The basename of the product file in the package\&. .PP \fBPRODUCT_SHORTLABEL "product:shortlabel"\fR .RS 4 -A identification string of the product\&. +An identification string of the product\&. .RE .PP \fBPRODUCT_DISTPRODUCT "product:distproduct"\fR @@ -430,12 +430,12 @@ A product line string used for product registering\&. .PP \fBPRODUCT_REGISTER_TARGET "product:regtarget"\fR .RS 4 -A target for prouduct registering\&. +A target for product registering\&. .RE .PP \fBPRODUCT_REGISTER_RELEASE "product:regrelease"\fR .RS 4 -A release string for proudct registering\&. +A release string for product registering\&. .RE .PP \fBPUBKEY_KEYID "pubkey:keyid"\fR @@ -548,7 +548,7 @@ The dependency is a special modalias dependency that matches installed hardware\ .PP \fBNAMESPACE_SPLITPROVIDES "namespace:splitprovides"\fR .RS 4 -The dependency is a special splitprovides dependency used to implement updates that include a package split\&. A splitprovoide dependency contains a filename and a package name, it is matched if a package with the provided package name is installed that contains the filename\&. This namespace is implemented in libsolv, so you do not need a callback\&. +The dependency is a special splitprovides dependency used to implement updates that include a package split\&. A splitprovides dependency contains a filename and a package name, it is matched if a package with the provided package name is installed that contains the filename\&. This namespace is implemented in libsolv, so you do not need a callback\&. .RE .PP \fBNAMESPACE_LANGUAGE "namespace:language"\fR @@ -636,12 +636,12 @@ The data is an array of non\-zero Ids ordered so that it needs less space\&. .PP \fBREPOKEY_TYPE_DIRSTRARRAY "repokey:type:dirstrarray"\fR .RS 4 -The data is an tuple consisting of a directory Id and a basename\&. Used to store file names\&. +The data is a tuple consisting of a directory Id and a basename\&. Used to store file names\&. .RE .PP \fBREPOKEY_TYPE_DIRNUMNUMARRAY "repokey:type:dirnumnumarray"\fR .RS 4 -The data is an triple consisting of a directory Id and two 32bit unsigned integers\&. Used to store disk usage information\&. +The data is a triple consisting of a directory Id and two 32bit unsigned integers\&. Used to store disk usage information\&. .RE .PP \fBREPOKEY_TYPE_MD5 "repokey:type:md5"\fR @@ -884,7 +884,7 @@ The checksum of the delta rpm file\&. .PP \fBDELTA_BASE_EVR "delta:baseevr"\fR .RS 4 -The version of the package the delta was build against\&. +The version of the package the delta was built against\&. .RE .PP \fBDELTA_SEQ_NAME "delta:seqname"\fR diff --git a/doc/libsolv-constantids.txt b/doc/libsolv-constantids.txt index 7a83b46..e067642 100644 --- a/doc/libsolv-constantids.txt +++ b/doc/libsolv-constantids.txt @@ -71,7 +71,7 @@ of the values stored in the attribute with the keyname. *SOLVABLE_RECOMMENDS "solvable:recommends"*:: Stores an array of dependency Ids that describe the capabilities that also should be installed when this package is installed. It's not an - error if not all capabilites can be met. + error if not all capabilities can be met. *SOLVABLE_SUGGESTS "solvable:suggests"*:: Stores an array of dependency Ids that describe the capabilities that @@ -292,7 +292,7 @@ Special Solvable Attributes The basename of the product file in the package. *PRODUCT_SHORTLABEL "product:shortlabel"*:: - A identification string of the product. + An identification string of the product. *PRODUCT_DISTPRODUCT "product:distproduct"*:: Obsolete, do not use. Was a SUSE Code-10 product name. @@ -316,10 +316,10 @@ Special Solvable Attributes A product line string used for product registering. *PRODUCT_REGISTER_TARGET "product:regtarget"*:: - A target for prouduct registering. + A target for product registering. *PRODUCT_REGISTER_RELEASE "product:regrelease"*:: - A release string for proudct registering. + A release string for product registering. *PUBKEY_KEYID "pubkey:keyid"*:: The keyid of a pubkey, consisting of 8 bytes in hex. @@ -406,7 +406,7 @@ different semantics. *NAMESPACE_SPLITPROVIDES "namespace:splitprovides"*:: The dependency is a special splitprovides dependency used to implement - updates that include a package split. A splitprovoide dependency contains + updates that include a package split. A splitprovides dependency contains a filename and a package name, it is matched if a package with the provided package name is installed that contains the filename. This namespace is implemented in libsolv, so you do not need a callback. @@ -479,11 +479,11 @@ know how to interpret the data. The following types are available: space. *REPOKEY_TYPE_DIRSTRARRAY "repokey:type:dirstrarray"*:: - The data is an tuple consisting of a directory Id and a basename. + The data is a tuple consisting of a directory Id and a basename. Used to store file names. *REPOKEY_TYPE_DIRNUMNUMARRAY "repokey:type:dirnumnumarray"*:: - The data is an triple consisting of a directory Id and two 32bit + The data is a triple consisting of a directory Id and two 32bit unsigned integers. Used to store disk usage information. *REPOKEY_TYPE_MD5 "repokey:type:md5"*:: @@ -661,7 +661,7 @@ Delta Package Attributes The checksum of the delta rpm file. *DELTA_BASE_EVR "delta:baseevr"*:: - The version of the package the delta was build against. + The version of the package the delta was built against. *DELTA_SEQ_NAME "delta:seqname"*:: The first part of the delta sequence, the base package name. diff --git a/doc/libsolv-pool.3 b/doc/libsolv-pool.3 index 79d7f41..0929ba6 100644 --- a/doc/libsolv-pool.3 +++ b/doc/libsolv-pool.3 @@ -2,12 +2,12 @@ .\" Title: Libsolv-Pool .\" Author: [see the "Author" section] .\" Generator: DocBook XSL Stylesheets v1.78.0 <http://docbook.sf.net/> -.\" Date: 08/26/2015 +.\" Date: 12/14/2015 .\" Manual: LIBSOLV .\" Source: libsolv .\" Language: English .\" -.TH "LIBSOLV\-POOL" "3" "08/26/2015" "libsolv" "LIBSOLV" +.TH "LIBSOLV\-POOL" "3" "12/14/2015" "libsolv" "LIBSOLV" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- @@ -290,22 +290,22 @@ Set a custom debug callback function\&. Instead of writing to stdout or stderr, .PP \fBDISTTYPE_RPM\fR .RS 4 -Used for systems with use rpm as low level package manager\&. +Used for systems which use rpm as low level package manager\&. .RE .PP \fBDISTTYPE_DEB\fR .RS 4 -Used for systems with use dpkg as low level package manager\&. +Used for systems which use dpkg as low level package manager\&. .RE .PP \fBDISTTYPE_ARCH\fR .RS 4 -Used for systems with use the arch linux package manager\&. +Used for systems which use the arch linux package manager\&. .RE .PP \fBDISTTYPE_HAIKU\fR .RS 4 -Used for systems with use haiku packages\&. +Used for systems which use haiku packages\&. .RE .PP \fBPOOL_FLAG_PROMOTEEPOCH\fR @@ -355,7 +355,7 @@ If a package is installed in multiversionmode, rpm used to ignore both the impli .PP \fBPOOL_FLAG_ADDFILEPROVIDESFILTERED\fR .RS 4 -Make the addfileprovides method only add files from the standard locations (i\&.e\&. the \(lqbin\(rq and \(lqetc\(rq directories)\&. This is useful if you have only few packages that use non\-standard file dependencies, but you still wand the fast speed that addfileprovides() generates\&. +Make the addfileprovides method only add files from the standard locations (i\&.e\&. the \(lqbin\(rq and \(lqetc\(rq directories)\&. This is useful if you have only few packages that use non\-standard file dependencies, but you still want the fast speed that addfileprovides() generates\&. .RE .SS "Functions" .sp @@ -585,7 +585,7 @@ A boolean OR operation, the \(lqname\(rq and \(lqevr\(rq parts of the relation c .PP \fBREL_WITH\fR .RS 4 -Like REL_AND, but packages mast match both dependencies simultaneously\&. See the section about boolean dependencies about more information\&. +Like REL_AND, but packages must match both dependencies simultaneously\&. See the section about boolean dependencies about more information\&. .RE .PP \fBREL_NAMESPACE\fR @@ -595,7 +595,7 @@ A special namespace relation\&. See the section about namespace dependencies for .PP \fBREL_ARCH\fR .RS 4 -A architecture filter dependency\&. The \(lqname\(rq part of the relation is a sub\-dependency, the \(lqevr\(rq part is the Id of an architecture that the matching packages must have (note that this is an exact match ignoring architecture policies)\&. +An architecture filter dependency\&. The \(lqname\(rq part of the relation is a sub\-dependency, the \(lqevr\(rq part is the Id of an architecture that the matching packages must have (note that this is an exact match ignoring architecture policies)\&. .RE .PP \fBREL_FILECONFLICT\fR @@ -792,7 +792,7 @@ Only compare the epoch and the version parts, ignore the release part\&. .RE .\} .sp -Compare two version Ids, return \-1 if the first version is less then the second version, 0 if they are identical, and 1 if the first version is bigger than the second one\&. +Compare two version Ids, return \-1 if the first version is less than the second version, 0 if they are identical, and 1 if the first version is bigger than the second one\&. .sp .if n \{\ .RS 4 @@ -853,7 +853,7 @@ Like pool_match_dep, but the provider is the "self\-provides" dependency of the .RE .\} .sp -Create a index that maps dependency Ids to sets of packages that provide the dependency\&. +Create an index that maps dependency Ids to sets of packages that provide the dependency\&. .sp .if n \{\ .RS 4 @@ -913,7 +913,7 @@ Add the contents of the Queue \fIq\fR to the end of the whatprovidesdata array, .RE .\} .sp -Some package managers like rpm allow dependencies on files contained in other packages\&. To allow libsolv to deal with those dependencies in an efficient way, you need to call the addfileprovides method after creating and reading all repositories\&. This method will scan all dependency for file names and than scan all packages for matching files\&. If a filename has been matched, it will be added to the provides list of the corresponding package\&. +Some package managers like rpm allow dependencies on files contained in other packages\&. To allow libsolv to deal with those dependencies in an efficient way, you need to call the addfileprovides method after creating and reading all repositories\&. This method will scan all dependency for file names and then scan all packages for matching files\&. If a filename has been matched, it will be added to the provides list of the corresponding package\&. .sp .if n \{\ .RS 4 @@ -1010,7 +1010,7 @@ Join three strings and return the result in the pool\(cqs temporary space area\& .RE .\} .sp -Like pool_tmpjoin(), but if the first argument is the last allocated space in the pool\(cqs temporary space area, it will be replaced with the result of the join and no new temporary space slot will be used\&. Thus you can join more then three strings by a combination of one pool_tmpjoin() and multiple pool_tmpappend() calls\&. Note that the \fIstr1\fR pointer is no longer usable after the call\&. +Like pool_tmpjoin(), but if the first argument is the last allocated space in the pool\(cqs temporary space area, it will be replaced with the result of the join and no new temporary space slot will be used\&. Thus you can join more than three strings by a combination of one pool_tmpjoin() and multiple pool_tmpappend() calls\&. Note that the \fIstr1\fR pointer is no longer usable after the call\&. .SH "DATA LOOKUP" .SS "Constants" .PP diff --git a/doc/libsolv-pool.txt b/doc/libsolv-pool.txt index 8567cbf..8ee0c18 100644 --- a/doc/libsolv-pool.txt +++ b/doc/libsolv-pool.txt @@ -188,16 +188,16 @@ Pool configuration === Constants === *DISTTYPE_RPM*:: -Used for systems with use rpm as low level package manager. +Used for systems which use rpm as low level package manager. *DISTTYPE_DEB*:: -Used for systems with use dpkg as low level package manager. +Used for systems which use dpkg as low level package manager. *DISTTYPE_ARCH*:: -Used for systems with use the arch linux package manager. +Used for systems which use the arch linux package manager. *DISTTYPE_HAIKU*:: -Used for systems with use haiku packages. +Used for systems which use haiku packages. *POOL_FLAG_PROMOTEEPOCH*:: Promote the epoch of the providing dependency to the requesting @@ -257,7 +257,7 @@ obsoleted packages still get removed. Make the addfileprovides method only add files from the standard locations (i.e. the ``bin'' and ``etc'' directories). This is useful if you have only few packages that use non-standard file -dependencies, but you still wand the fast speed that addfileprovides() +dependencies, but you still want the fast speed that addfileprovides() generates. @@ -383,7 +383,7 @@ A boolean OR operation, the ``name'' and ``evr'' parts of the relation can be two sub-dependencies. Packages can match any part of the dependency. *REL_WITH*:: -Like REL_AND, but packages mast match both dependencies simultaneously. See +Like REL_AND, but packages must match both dependencies simultaneously. See the section about boolean dependencies about more information. *REL_NAMESPACE*:: @@ -391,7 +391,7 @@ A special namespace relation. See the section about namespace dependencies for more information. *REL_ARCH*:: -A architecture filter dependency. The ``name'' part of the relation is a +An architecture filter dependency. The ``name'' part of the relation is a sub-dependency, the ``evr'' part is the Id of an architecture that the matching packages must have (note that this is an exact match ignoring architecture policies). @@ -508,7 +508,7 @@ Only compare the epoch and the version parts, ignore the release part. === Functions === int pool_evrcmp(const Pool *pool, Id evr1id, Id evr2id, int mode); -Compare two version Ids, return -1 if the first version is less then the +Compare two version Ids, return -1 if the first version is less than the second version, 0 if they are identical, and 1 if the first version is bigger than the second one. @@ -538,7 +538,7 @@ Whatprovides Index ------------------ void pool_createwhatprovides(Pool *pool); -Create a index that maps dependency Ids to sets of packages that provide the +Create an index that maps dependency Ids to sets of packages that provide the dependency. void pool_freewhatprovides(Pool *pool); @@ -569,7 +569,7 @@ Some package managers like rpm allow dependencies on files contained in other packages. To allow libsolv to deal with those dependencies in an efficient way, you need to call the addfileprovides method after creating and reading all repositories. This method will scan all dependency for file -names and than scan all packages for matching files. If a filename has been +names and then scan all packages for matching files. If a filename has been matched, it will be added to the provides list of the corresponding package. @@ -631,7 +631,7 @@ area. You can use NULL arguments if you just want to join less strings. Like pool_tmpjoin(), but if the first argument is the last allocated space in the pool's temporary space area, it will be replaced with the result of the join and no new temporary space slot will be used. Thus you can join -more then three strings by a combination of one pool_tmpjoin() and multiple +more than three strings by a combination of one pool_tmpjoin() and multiple pool_tmpappend() calls. Note that the _str1_ pointer is no longer usable after the call. diff --git a/ext/pool_fileconflicts.c b/ext/pool_fileconflicts.c index bfd07ef..4238d2d 100644 --- a/ext/pool_fileconflicts.c +++ b/ext/pool_fileconflicts.c @@ -806,6 +806,7 @@ pool_findfileconflicts(Pool *pool, Queue *pkgs, int cutoff, Queue *conflicts, in Repo *installed = pool->installed; Id p; int obsoleteusescolors = pool_get_flag(pool, POOL_FLAG_OBSOLETEUSESCOLORS); + int hdrfetches; queue_empty(conflicts); if (!pkgs->count) @@ -841,6 +842,7 @@ pool_findfileconflicts(Pool *pool, Queue *pkgs, int cutoff, Queue *conflicts, in /* first pass: scan dirs */ if (!cbdata.aliases) { + hdrfetches = 0; cflmapn = (cutoff + 3) * 64; while ((cflmapn & (cflmapn - 1)) != 0) cflmapn = cflmapn & (cflmapn - 1); @@ -867,12 +869,14 @@ pool_findfileconflicts(Pool *pool, Queue *pkgs, int cutoff, Queue *conflicts, in handle = (*handle_cb)(pool, p, handle_cbdata); if (!handle) continue; + hdrfetches++; rpm_iterate_filelist(handle, RPM_ITERATE_FILELIST_ONLYDIRS, finddirs_cb, &cbdata); if (MAPTST(&cbdata.idxmap, i)) idxmapset++; } POOL_DEBUG(SOLV_DEBUG_STATS, "dirmap size: %d, used %d\n", cbdata.dirmapn + 1, cbdata.dirmapused); POOL_DEBUG(SOLV_DEBUG_STATS, "dirmap memory usage: %d K\n", (cbdata.dirmapn + 1) * 2 * (int)sizeof(Id) / 1024); + POOL_DEBUG(SOLV_DEBUG_STATS, "header fetches: %d\n", hdrfetches); POOL_DEBUG(SOLV_DEBUG_STATS, "dirmap creation took %d ms\n", solv_timems(now)); POOL_DEBUG(SOLV_DEBUG_STATS, "dir conflicts found: %d, idxmap %d of %d\n", cbdata.dirconflicts, idxmapset, pkgs->count); } @@ -885,6 +889,7 @@ pool_findfileconflicts(Pool *pool, Queue *pkgs, int cutoff, Queue *conflicts, in cbdata.cflmap = solv_calloc(cflmapn, 2 * sizeof(Id)); cbdata.cflmapn = cflmapn - 1; /* make it a mask */ cbdata.create = 1; + hdrfetches = 0; for (i = 0; i < pkgs->count; i++) { if (i == cutoff) @@ -904,12 +909,14 @@ pool_findfileconflicts(Pool *pool, Queue *pkgs, int cutoff, Queue *conflicts, in handle = (*handle_cb)(pool, p, handle_cbdata); if (!handle) continue; + hdrfetches++; cbdata.lastdiridx = -1; rpm_iterate_filelist(handle, RPM_ITERATE_FILELIST_NOGHOSTS, cbdata.aliases ? findfileconflicts_basename_cb : findfileconflicts_cb, &cbdata); } POOL_DEBUG(SOLV_DEBUG_STATS, "filemap size: %d, used %d\n", cbdata.cflmapn + 1, cbdata.cflmapused); POOL_DEBUG(SOLV_DEBUG_STATS, "filemap memory usage: %d K\n", (cbdata.cflmapn + 1) * 2 * (int)sizeof(Id) / 1024); + POOL_DEBUG(SOLV_DEBUG_STATS, "header fetches: %d\n", hdrfetches); POOL_DEBUG(SOLV_DEBUG_STATS, "filemap creation took %d ms\n", solv_timems(now)); POOL_DEBUG(SOLV_DEBUG_STATS, "lookat_dir size: %d\n", cbdata.lookat_dir.count); queue_free(&cbdata.lookat_dir); @@ -931,6 +938,7 @@ pool_findfileconflicts(Pool *pool, Queue *pkgs, int cutoff, Queue *conflicts, in cbdata.statmapn = cflmapn - 1; /* make it a mask */ } cbdata.create = 0; + hdrfetches = 0; for (i = 0; i < pkgs->count; i++) { if (!MAPTST(&cbdata.idxmap, i)) @@ -942,11 +950,13 @@ pool_findfileconflicts(Pool *pool, Queue *pkgs, int cutoff, Queue *conflicts, in handle = (*handle_cb)(pool, p, handle_cbdata); if (!handle) continue; + hdrfetches++; cbdata.lastdiridx = -1; rpm_iterate_filelist(handle, RPM_ITERATE_FILELIST_NOGHOSTS, findfileconflicts_alias_cb, &cbdata); } POOL_DEBUG(SOLV_DEBUG_STATS, "normap size: %d, used %d\n", cbdata.normapn + 1, cbdata.normapused); POOL_DEBUG(SOLV_DEBUG_STATS, "normap memory usage: %d K\n", (cbdata.normapn + 1) * 2 * (int)sizeof(Id) / 1024); + POOL_DEBUG(SOLV_DEBUG_STATS, "header fetches: %d\n", hdrfetches); POOL_DEBUG(SOLV_DEBUG_STATS, "stats made: %d\n", cbdata.statsmade); if (cbdata.usestat) { @@ -967,11 +977,10 @@ pool_findfileconflicts(Pool *pool, Queue *pkgs, int cutoff, Queue *conflicts, in cbdata.cflmapn = 0; cbdata.cflmapused = 0; - now = solv_timems(0); - map_free(&cbdata.idxmap); /* sort and unify/prune */ + now = solv_timems(0); POOL_DEBUG(SOLV_DEBUG_STATS, "raw candidates: %d, pruning\n", cbdata.lookat.count / 4); solv_sort(cbdata.lookat.elements, cbdata.lookat.count / 4, sizeof(Id) * 4, &lookat_hx_cmp, pool); for (i = j = 0; i < cbdata.lookat.count; ) @@ -1006,10 +1015,13 @@ pool_findfileconflicts(Pool *pool, Queue *pkgs, int cutoff, Queue *conflicts, in } queue_truncate(&cbdata.lookat, j); POOL_DEBUG(SOLV_DEBUG_STATS, "candidates now: %d\n", cbdata.lookat.count / 4); + POOL_DEBUG(SOLV_DEBUG_STATS, "pruning took %d ms\n", solv_timems(now)); /* third pass: collect file info for all files that match a hx */ + now = solv_timems(0); solv_sort(cbdata.lookat.elements, cbdata.lookat.count / 4, sizeof(Id) * 4, &lookat_idx_cmp, pool); queue_init(&cbdata.files); + hdrfetches = 0; for (i = 0; i < cbdata.lookat.count; i += 4) { Id idx = cbdata.lookat.elements[i + 1]; @@ -1018,6 +1030,8 @@ pool_findfileconflicts(Pool *pool, Queue *pkgs, int cutoff, Queue *conflicts, in iterflags |= RPM_ITERATE_FILELIST_WITHCOL; p = pkgs->elements[idx]; handle = (*handle_cb)(pool, p, handle_cbdata); + if (handle) + hdrfetches++; for (;; i += 4) { int fstart = cbdata.files.count; @@ -1036,11 +1050,14 @@ pool_findfileconflicts(Pool *pool, Queue *pkgs, int cutoff, Queue *conflicts, in break; } } + POOL_DEBUG(SOLV_DEBUG_STATS, "header fetches: %d\n", hdrfetches); + POOL_DEBUG(SOLV_DEBUG_STATS, "file info fetching took %d ms\n", solv_timems(now)); cbdata.normap = solv_free(cbdata.normap); cbdata.normapn = 0; /* forth pass: for each hx we have, compare all matching files against all other matching files */ + now = solv_timems(0); solv_sort(cbdata.lookat.elements, cbdata.lookat.count / 4, sizeof(Id) * 4, &lookat_hx_cmp, pool); for (i = 0; i < cbdata.lookat.count - 4; i += 4) { diff --git a/ext/repo_appdata.c b/ext/repo_appdata.c index e987967..cbc42e4 100644 --- a/ext/repo_appdata.c +++ b/ext/repo_appdata.c @@ -49,6 +49,7 @@ enum state { STATE_GROUP, STATE_KEYWORDS, STATE_KEYWORD, + STATE_EXTENDS, NUMSTATES }; @@ -74,6 +75,7 @@ static struct stateswitch stateswitches[] = { { STATE_APPLICATION, "url", STATE_URL, 1 }, { STATE_APPLICATION, "project_group", STATE_GROUP, 1 }, { STATE_APPLICATION, "keywords", STATE_KEYWORDS, 0 }, + { STATE_APPLICATION, "extends", STATE_EXTENDS, 1 }, { STATE_DESCRIPTION, "p", STATE_P, 1 }, { STATE_DESCRIPTION, "ul", STATE_UL, 0 }, { STATE_DESCRIPTION, "ol", STATE_OL, 0 }, @@ -107,6 +109,8 @@ struct parsedata { int flags; char *desktop_file; int havesummary; + const char *filename; + Queue *owners; }; @@ -127,6 +131,7 @@ startElement(void *userData, const char *name, const char **atts) Pool *pool = pd->pool; Solvable *s = pd->solvable; struct stateswitch *sw; + const char *type; #if 0 fprintf(stderr, "start: [%d]%s\n", pd->state, name); @@ -176,6 +181,10 @@ startElement(void *userData, const char *name, const char **atts) s = pd->solvable = pool_id2solvable(pool, repo_add_solvable(pd->repo)); pd->handle = s - pool->solvables; pd->havesummary = 0; + type = find_attr("type", atts); + if (!type || !*type) + type = "desktop"; + repodata_set_poolstr(pd->data, pd->handle, SOLVABLE_CATEGORY, type); break; case STATE_DESCRIPTION: pd->description = solv_free(pd->description); @@ -311,6 +320,26 @@ add_missing_tags_from_desktop_file(struct parsedata *pd, Solvable *s, const char fclose(fp); } +static char * +guess_filename_from_id(Pool *pool, const char *id) +{ + int l = strlen(id); + char *r = pool_tmpjoin(pool, id, ".metainfo.xml", 0); + if (l > 8 && !strcmp(".desktop", id + l - 8)) + strcpy(r + l - 8, ".appdata.xml"); + else if (l > 4 && !strcmp(".ttf", id + l - 4)) + strcpy(r + l - 4, ".metainfo.xml"); + else if (l > 4 && !strcmp(".otf", id + l - 4)) + strcpy(r + l - 4, ".metainfo.xml"); + else if (l > 4 && !strcmp(".xml", id + l - 4)) + strcpy(r + l - 4, ".metainfo.xml"); + else if (l > 3 && !strcmp(".db", id + l - 3)) + strcpy(r + l - 3, ".metainfo.xml"); + else + return 0; + return r; +} + static void XMLCALL endElement(void *userData, const char *name) { @@ -361,6 +390,31 @@ endElement(void *userData, const char *name) l -= 8; s->name = pool_strn2id(pool, name, l, 1); } + if (!s->requires && pd->owners) + { + int i; + Id id; + for (i = 0; i < pd->owners->count; i++) + { + Solvable *os = pd->pool->solvables + pd->owners->elements[i]; + s->requires = repo_addid_dep(pd->repo, s->requires, os->name, 0); + id = pool_str2id(pd->pool, pool_tmpjoin(pd->pool, "application-appdata(", pool_id2str(pd->pool, os->name), ")"), 1); + s->provides = repo_addid_dep(pd->repo, s->provides, id, 0); + } + } + if (!s->requires && (pd->desktop_file || pd->filename)) + { + /* add appdata() link requires/provides */ + const char *filename = pd->filename; + if (!filename) + filename = guess_filename_from_id(pool, pd->desktop_file); + if (filename) + { + filename = pool_tmpjoin(pool, "application-appdata(", filename, ")"); + s->requires = repo_addid_dep(pd->repo, s->requires, pool_str2id(pd->pool, filename + 12, 1), 0); + s->provides = repo_addid_dep(pd->repo, s->provides, pool_str2id(pd->pool, filename, 1), 0); + } + } if (s->name && s->arch != ARCH_SRC && s->arch != ARCH_NOSRC) s->provides = repo_addid_dep(pd->repo, s->provides, pool_rel2id(pd->pool, s->name, s->evr, REL_EQ, 1), 0); pd->solvable = 0; @@ -368,21 +422,6 @@ endElement(void *userData, const char *name) break; case STATE_ID: pd->desktop_file = solv_strdup(pd->content); - /* guess the appdata.xml file name from the id element */ - if (pd->lcontent > 8 && !strcmp(".desktop", pd->content + pd->lcontent - 8)) - pd->content[pd->lcontent - 8] = 0; - else if (pd->lcontent > 4 && !strcmp(".ttf", pd->content + pd->lcontent - 4)) - pd->content[pd->lcontent - 4] = 0; - else if (pd->lcontent > 4 && !strcmp(".otf", pd->content + pd->lcontent - 4)) - pd->content[pd->lcontent - 4] = 0; - else if (pd->lcontent > 4 && !strcmp(".xml", pd->content + pd->lcontent - 4)) - pd->content[pd->lcontent - 4] = 0; - else if (pd->lcontent > 3 && !strcmp(".db", pd->content + pd->lcontent - 3)) - pd->content[pd->lcontent - 3] = 0; - id = pool_str2id(pd->pool, pool_tmpjoin(pool, "appdata(", pd->content, ".appdata.xml)"), 1); - s->requires = repo_addid_dep(pd->repo, s->requires, id, 0); - id = pool_str2id(pd->pool, pool_tmpjoin(pool, "application-appdata(", pd->content, ".appdata.xml)"), 1); - s->provides = repo_addid_dep(pd->repo, s->provides, id, 0); break; case STATE_NAME: s->name = pool_str2id(pd->pool, pool_tmpjoin(pool, "application:", pd->content, 0), 1); @@ -400,6 +439,9 @@ endElement(void *userData, const char *name) case STATE_GROUP: repodata_add_poolstr_array(pd->data, pd->handle, SOLVABLE_GROUP, pd->content); break; + case STATE_EXTENDS: + repodata_add_poolstr_array(pd->data, pd->handle, SOLVABLE_EXTENDS, pd->content); + break; case STATE_DESCRIPTION: if (pd->description) { @@ -436,6 +478,8 @@ endElement(void *userData, const char *name) case STATE_PKGNAME: id = pool_str2id(pd->pool, pd->content, 1); s->requires = repo_addid_dep(pd->repo, s->requires, id, 0); + id = pool_str2id(pd->pool, pool_tmpjoin(pd->pool, "application-appdata(", pd->content, ")"), 1); + s->provides = repo_addid_dep(pd->repo, s->provides, id, 0); break; case STATE_KEYWORD: repodata_add_poolstr_array(pd->data, pd->handle, SOLVABLE_KEYWORDS, pd->content); @@ -476,8 +520,8 @@ characterData(void *userData, const XML_Char *s, int len) #define BUFF_SIZE 8192 -int -repo_add_appdata(Repo *repo, FILE *fp, int flags) +static int +repo_add_appdata_fn(Repo *repo, FILE *fp, int flags, const char *filename, Queue *owners) { Pool *pool = repo->pool; struct parsedata pd; @@ -493,6 +537,8 @@ repo_add_appdata(Repo *repo, FILE *fp, int flags) pd.pool = repo->pool; pd.data = data; pd.flags = flags; + pd.filename = filename; + pd.owners = owners; pd.content = malloc(256); pd.acontent = 256; @@ -537,6 +583,60 @@ repo_add_appdata(Repo *repo, FILE *fp, int flags) return ret; } +int +repo_add_appdata(Repo *repo, FILE *fp, int flags) +{ + return repo_add_appdata_fn(repo, fp, flags, 0, 0); +} + +static void +search_uninternalized_filelist(Repo *repo, const char *dir, Queue *res) +{ + Pool *pool = repo->pool; + Id rdid, p; + Id iter, did, idid; + + for (rdid = 1; rdid < repo->nrepodata; rdid++) + { + Repodata *data = repo_id2repodata(repo, rdid); + if (!data) + continue; + if (data->state == REPODATA_STUB) + continue; + if (!repodata_has_keyname(data, SOLVABLE_FILELIST)) + continue; + did = repodata_str2dir(data, dir, 0); + if (!did) + continue; + for (p = data->start; p < data->end; p++) + { + if (p >= pool->nsolvables) + continue; + if (pool->solvables[p].repo != repo) + continue; + iter = 0; + for (;;) + { + const char *str; + int l; + Id id; + idid = did; + str = repodata_lookup_dirstrarray_uninternalized(data, p, SOLVABLE_FILELIST, &idid, &iter); + if (!iter) + break; + l = strlen(str); + if (l > 12 && strncmp(str + l - 12, ".appdata.xml", 12)) + id = pool_str2id(pool, str, 1); + else if (l > 13 && strncmp(str + l - 13, ".metainfo.xml", 13)) + id = pool_str2id(pool, str, 1); + else + continue; + queue_push2(res, p, id); + } + } + } +} + /* add all files ending in .appdata.xml */ int repo_add_appdata_dir(Repo *repo, const char *appdatadir, int flags) @@ -544,7 +644,13 @@ repo_add_appdata_dir(Repo *repo, const char *appdatadir, int flags) DIR *dir; char *dirpath; Repodata *data; + Queue flq; + Queue oq; + queue_init(&flq); + queue_init(&oq); + if (flags & APPDATA_SEARCH_UNINTERNALIZED_FILELIST) + search_uninternalized_filelist(repo, appdatadir, &flq); data = repo_add_repodata(repo, flags); if (flags & REPO_USE_ROOTDIR) dirpath = pool_prepend_rootdir(repo->pool, appdatadir); @@ -558,10 +664,11 @@ repo_add_appdata_dir(Repo *repo, const char *appdatadir, int flags) const char *n; FILE *fp; int len = strlen(entry->d_name); - if (len <= 12 || strcmp(entry->d_name + len - 12, ".appdata.xml") != 0) - continue; if (entry->d_name[0] == '.') continue; + if (!(len > 12 && !strcmp(entry->d_name + len - 12, ".appdata.xml")) && + !(len > 13 && !strcmp(entry->d_name + len - 13, ".metainfo.xml"))) + continue; n = pool_tmpjoin(repo->pool, dirpath, "/", entry->d_name); fp = fopen(n, "r"); if (!fp) @@ -569,7 +676,19 @@ repo_add_appdata_dir(Repo *repo, const char *appdatadir, int flags) pool_error(repo->pool, 0, "%s: %s", n, strerror(errno)); continue; } - repo_add_appdata(repo, fp, flags | REPO_NO_INTERNALIZE | REPO_REUSE_REPODATA | APPDATA_CHECK_DESKTOP_FILE); + if (flags & APPDATA_SEARCH_UNINTERNALIZED_FILELIST) + { + Id id = pool_str2id(repo->pool, entry->d_name, 0); + queue_empty(&oq); + if (id) + { + int i; + for (i = 0; i < flq.count; i += 2) + if (flq.elements[i + 1] == id) + queue_push(&oq, flq.elements[i]); + } + } + repo_add_appdata_fn(repo, fp, flags | REPO_NO_INTERNALIZE | REPO_REUSE_REPODATA | APPDATA_CHECK_DESKTOP_FILE, entry->d_name, oq.count ? &oq : 0); fclose(fp); } closedir(dir); @@ -577,5 +696,7 @@ repo_add_appdata_dir(Repo *repo, const char *appdatadir, int flags) solv_free(dirpath); if (!(flags & REPO_NO_INTERNALIZE)) repodata_internalize(data); + queue_free(&oq); + queue_free(&flq); return 0; } diff --git a/ext/repo_appdata.h b/ext/repo_appdata.h index 51b35b5..db5f2cf 100644 --- a/ext/repo_appdata.h +++ b/ext/repo_appdata.h @@ -8,4 +8,5 @@ int repo_add_appdata(Repo *repo, FILE *fp, int flags); int repo_add_appdata_dir(Repo *repo, const char *appdatadir, int flags); +#define APPDATA_SEARCH_UNINTERNALIZED_FILELIST (1 << 8) #define APPDATA_CHECK_DESKTOP_FILE (1 << 30) /* internal */ diff --git a/ext/repo_content.c b/ext/repo_content.c index e94ca9e..0cd1293 100644 --- a/ext/repo_content.c +++ b/ext/repo_content.c @@ -142,7 +142,7 @@ adddep(Pool *pool, struct parsedata *pd, unsigned int olddeps, char *line, Id ma if (!rel || !evr) { - pool_debug(pool, SOLV_FATAL, "repo_content: bad relation '%s %s'\n", name, rel); + pool_debug(pool, SOLV_ERROR, "repo_content: bad relation '%s %s'\n", name, rel); continue; } for (flags = 0; flags < 6; flags++) @@ -150,7 +150,7 @@ adddep(Pool *pool, struct parsedata *pd, unsigned int olddeps, char *line, Id ma break; if (flags == 6) { - pool_debug(pool, SOLV_FATAL, "repo_content: unknown relation '%s'\n", rel); + pool_debug(pool, SOLV_ERROR, "repo_content: unknown relation '%s'\n", rel); continue; } id = pool_rel2id(pool, id, pool_str2id(pool, evr, 1), flags + 1, 1); @@ -518,7 +518,7 @@ repo_add_content(Repo *repo, FILE *fp, int flags) if (s && !s->name) { - pool_debug(pool, SOLV_FATAL, "repo_content: 'content' incomplete, no product solvable created!\n"); + pool_debug(pool, SOLV_ERROR, "repo_content: 'content' incomplete, no product solvable created!\n"); repo_free_solvable(repo, s - pool->solvables, 1); s = 0; } diff --git a/ext/repo_helix.c b/ext/repo_helix.c index 95dac35..f495be7 100644 --- a/ext/repo_helix.c +++ b/ext/repo_helix.c @@ -632,7 +632,7 @@ endElement(void *userData, const char *name) s->evr = evr2id(pool, pd, pd->epoch ? pd->evrspace + pd->epoch : 0, pd->version ? pd->evrspace + pd->version : 0, - pd->release ? pd->evrspace + pd->release : ""); + pd->release ? pd->evrspace + pd->release : 0); /* ensure self-provides */ if (s->name && s->arch != ARCH_SRC && s->arch != ARCH_NOSRC) s->provides = repo_addid_dep(pd->repo, s->provides, pool_rel2id(pool, s->name, s->evr, REL_EQ, 1), 0); diff --git a/ext/testcase.c b/ext/testcase.c index 3c40451..b9fddef 100644 --- a/ext/testcase.c +++ b/ext/testcase.c @@ -164,7 +164,7 @@ static struct selflags2str { }; static const char *features[] = { -#ifdef ENABLE_LINKED_PACKAGES +#ifdef ENABLE_LINKED_PKGS "linked_packages", #endif #ifdef ENABLE_COMPLEX_DEPS diff --git a/package/libsolv.changes b/package/libsolv.changes index 745e5bf..8714d4a 100644 --- a/package/libsolv.changes +++ b/package/libsolv.changes @@ -1,4 +1,15 @@ ------------------------------------------------------------------- +Mon Dec 14 15:48:01 CET 2015 - mls@suse.de + +- change product links to also look at timestamps [bnc#956443] +- rework multiversion orphaned handling [bnc#957606] +- support key type changes in repodata_internalize() +- allow serialization of REPOKEY_TYPE_DELETED +- improve appdata handling of installed packages +- improve performance when run under xen +- bump version to 0.6.15 + +------------------------------------------------------------------- Mon Oct 5 13:27:25 CEST 2015 - mls@suse.de - fix bug in recommends handling [bnc#948482] diff --git a/src/libsolv.ver b/src/libsolv.ver index 9e47117..6508288 100644 --- a/src/libsolv.ver +++ b/src/libsolv.ver @@ -202,6 +202,7 @@ SOLV_1.0 { repodata_localize_id; repodata_lookup_bin_checksum; repodata_lookup_binary; + repodata_lookup_dirstrarray_uninternalized; repodata_lookup_id; repodata_lookup_id_uninternalized; repodata_lookup_idarray; @@ -262,6 +263,7 @@ SOLV_1.0 { solv_depmarker; solv_dupappend; solv_dupjoin; + solv_extend_realloc; solv_free; solv_hex2bin; solv_latin1toutf8; diff --git a/src/linkedpkg.c b/src/linkedpkg.c index c5adc9a..6387373 100644 --- a/src/linkedpkg.c +++ b/src/linkedpkg.c @@ -21,7 +21,7 @@ * * product: * created from product data in the repository (which is generated from files - * in /etc/products.d. In the future we may switch to using product() + * in /etc/products.d). In the future we may switch to using product() * provides of packages. * * pattern: @@ -37,6 +37,7 @@ #include "pool.h" #include "repo.h" +#include "evr.h" #include "linkedpkg.h" #ifdef ENABLE_LINKED_PKGS @@ -47,12 +48,11 @@ find_application_link(Pool *pool, Solvable *s, Id *reqidp, Queue *qr, Id *prvidp Id req = 0; Id prv = 0; Id p, pp; - Id pkgname = 0; + Id pkgname = 0, appdataid = 0; /* find appdata requires */ if (s->requires) { - Id appdataid = 0; Id *reqp = s->repo->idarraydata + s->requires; while ((req = *reqp++) != 0) /* go through all requires */ { @@ -63,22 +63,34 @@ find_application_link(Pool *pool, Solvable *s, Id *reqidp, Queue *qr, Id *prvidp else pkgname = req; } - req = appdataid; } + req = appdataid ? appdataid : pkgname; if (!req) return; /* find application-appdata provides */ if (s->provides) { Id *prvp = s->repo->idarraydata + s->provides; + const char *reqs = pool_id2str(pool, req); + const char *prvs; while ((prv = *prvp++) != 0) /* go through all provides */ { if (ISRELDEP(prv)) continue; - if (strncmp("application-appdata(", pool_id2str(pool, prv), 20)) + prvs = pool_id2str(pool, prv); + if (strncmp("application-appdata(", prvs, 20)) continue; - if (!strcmp(pool_id2str(pool, prv) + 12, pool_id2str(pool, req))) - break; + if (appdataid) + { + if (!strcmp(prvs + 12, reqs)) + break; + } + else + { + int reqsl = strlen(reqs); + if (!strncmp(prvs + 20, reqs, reqsl) && !strcmp(prvs + 20 + reqsl, ")")) + break; + } } } if (!prv) @@ -88,7 +100,7 @@ find_application_link(Pool *pool, Solvable *s, Id *reqidp, Queue *qr, Id *prvidp if (pool->solvables[p].repo == s->repo) if (!pkgname || pool->solvables[p].name == pkgname) queue_push(qr, p); - if (!qr->count && pkgname) + if (!qr->count && pkgname && appdataid) { /* huh, no matching package? try without pkgname filter */ FOR_PROVIDES(p, pp, req) @@ -112,6 +124,7 @@ find_product_link(Pool *pool, Solvable *s, Id *reqidp, Queue *qr, Id *prvidp, Qu { Id p, pp, namerelid; char *str; + unsigned int sbt = 0; /* search for project requires */ namerelid = 0; @@ -149,6 +162,29 @@ find_product_link(Pool *pool, Solvable *s, Id *reqidp, Queue *qr, Id *prvidp, Qu continue; queue_push(qr, p); } + if (qr->count > 1) + { + /* multiple providers. try buildtime filter */ + sbt = solvable_lookup_num(s, SOLVABLE_BUILDTIME, 0); + if (sbt) + { + unsigned int bt; + int i, j; + int filterqp = 1; + for (i = j = 0; i < qr->count; i++) + { + bt = solvable_lookup_num(pool->solvables + qr->elements[i], SOLVABLE_BUILDTIME, 0); + if (!bt) + filterqp = 0; /* can't filter */ + if (!bt || bt == sbt) + qr->elements[j++] = qr->elements[i]; + } + if (j) + qr->count = j; + if (!j || !filterqp) + sbt = 0; /* filter failed */ + } + } if (!qr->count && s->repo == pool->installed) { /* oh no! Look up reference file */ @@ -174,6 +210,8 @@ find_product_link(Pool *pool, Solvable *s, Id *reqidp, Queue *qr, Id *prvidp, Qu Solvable *ps = pool->solvables + p; if (s->name != ps->name || ps->repo != s->repo || ps->arch != s->arch || s->evr != ps->evr) continue; + if (sbt && solvable_lookup_num(ps, SOLVABLE_BUILDTIME, 0) != sbt) + continue; queue_push(qp, p); } } @@ -272,4 +310,72 @@ find_package_link(Pool *pool, Solvable *s, Id *reqidp, Queue *qr, Id *prvidp, Qu find_product_link(pool, s, reqidp, qr, prvidp, qp); } +static int +name_min_max(Pool *pool, Solvable *s, Id *namep, Id *minp, Id *maxp) +{ + Queue q; + Id qbuf[4]; + Id name, min, max; + int i; + + queue_init_buffer(&q, qbuf, sizeof(qbuf)/sizeof(*qbuf)); + find_package_link(pool, s, 0, &q, 0, 0); + if (!q.count) + { + queue_free(&q); + return 0; + } + s = pool->solvables + q.elements[0]; + name = s->name; + min = max = s->evr; + for (i = 1; i < q.count; i++) + { + s = pool->solvables + q.elements[i]; + if (s->name != name) + { + queue_free(&q); + return 0; + } + if (s->evr == min || s->evr == max) + continue; + if (pool_evrcmp(pool, min, s->evr, EVRCMP_COMPARE) >= 0) + min = s->evr; + else if (min == max || pool_evrcmp(pool, max, s->evr, EVRCMP_COMPARE) <= 0) + max = s->evr; + } + queue_free(&q); + *namep = name; + *minp = min; + *maxp = max; + return 1; +} + +int +pool_link_evrcmp(Pool *pool, Solvable *s1, Solvable *s2) +{ + Id name1, evrmin1, evrmax1; + Id name2, evrmin2, evrmax2; + + if (s1->name != s2->name) + return 0; /* can't compare */ + if (!name_min_max(pool, s1, &name1, &evrmin1, &evrmax1)) + return 0; + if (!name_min_max(pool, s2, &name2, &evrmin2, &evrmax2)) + return 0; + /* compare linked names */ + if (name1 != name2) + return 0; + if (evrmin1 == evrmin2 && evrmax1 == evrmax2) + return 0; + /* now compare evr intervals */ + if (evrmin1 == evrmax1 && evrmin2 == evrmax2) + return pool_evrcmp(pool, evrmin1, evrmax2, EVRCMP_COMPARE); + if (evrmin1 != evrmax2 && pool_evrcmp(pool, evrmin1, evrmax2, EVRCMP_COMPARE) > 0) + return 1; + if (evrmax1 != evrmin2 && pool_evrcmp(pool, evrmax1, evrmin2, EVRCMP_COMPARE) < 0) + return -1; + return 0; +} + + #endif diff --git a/src/linkedpkg.h b/src/linkedpkg.h index 25894c9..4463280 100644 --- a/src/linkedpkg.h +++ b/src/linkedpkg.h @@ -34,5 +34,6 @@ extern Id find_autoproduct_name(Pool *pool, Solvable *s); /* generic */ extern void find_package_link(Pool *pool, Solvable *s, Id *reqidp, Queue *qr, Id *prvidp, Queue *qp); +extern int pool_link_evrcmp(Pool *pool, Solvable *s1, Solvable *s2); #endif diff --git a/src/policy.c b/src/policy.c index fadfcda..12ad771 100644 --- a/src/policy.c +++ b/src/policy.c @@ -21,9 +21,11 @@ #include "policy.h" #include "poolvendor.h" #include "poolarch.h" +#include "linkedpkg.h" #include "cplxdeps.h" + /*-----------------------------------------------------------------*/ /* @@ -825,7 +827,7 @@ move_installed_to_front(Pool *pool, Queue *plist) void prune_to_best_version(Pool *pool, Queue *plist) { - int i, j; + int i, j, r; Solvable *s, *best; if (plist->count < 2) /* no need to prune for a single entry */ @@ -858,12 +860,13 @@ prune_to_best_version(Pool *pool, Queue *plist) best = s; /* take current as new best */ continue; } - - if (best->evr != s->evr) /* compare evr */ - { - if (pool_evrcmp(pool, best->evr, s->evr, EVRCMP_COMPARE) < 0) - best = s; - } + r = best->evr != s->evr ? pool_evrcmp(pool, best->evr, s->evr, EVRCMP_COMPARE) : 0; +#ifdef ENABLE_LINKED_PKGS + if (r == 0 && has_package_link(pool, s)) + r = pool_link_evrcmp(pool, best, s); +#endif + if (r < 0) + best = s; } plist->elements[j++] = best - pool->solvables; /* finish last group */ plist->count = j; @@ -82,7 +82,7 @@ pool_create(void) s->evr = ID_EMPTY; pool->debugmask = SOLV_DEBUG_RESULT; /* FIXME */ -#ifdef FEDORA +#if defined(FEDORA) || defined(MAGEIA) pool->implicitobsoleteusescolors = 1; #endif #ifdef RPM5 diff --git a/src/poolarch.c b/src/poolarch.c index 9408983..788646b 100644 --- a/src/poolarch.c +++ b/src/poolarch.c @@ -21,7 +21,7 @@ #include "util.h" static const char *archpolicies[] = { -#ifdef FEDORA +#if defined(FEDORA) || defined(MAGEIA) "x86_64", "x86_64:athlon:i686:i586:i486:i386", #else "x86_64", "x86_64:i686:i586:i486:i386", @@ -64,7 +64,7 @@ static const char *archpolicies[] = { "mips64", "mips64", "mips64el", "mips64el", "m68k", "m68k", -#ifdef FEDORA +#if defined(FEDORA) || defined(MAGEIA) "ia32e", "ia32e:x86_64:athlon:i686:i586:i486:i386", "athlon", "athlon:i686:i586:i486:i386", "amd64", "amd64:x86_64:athlon:i686:i586:i486:i386", diff --git a/src/repodata.c b/src/repodata.c index c854262..ad3e71a 100644 --- a/src/repodata.c +++ b/src/repodata.c @@ -849,6 +849,37 @@ repodata_lookup_id_uninternalized(Repodata *data, Id solvid, Id keyname, Id void return 0; } +const char * +repodata_lookup_dirstrarray_uninternalized(Repodata *data, Id solvid, Id keyname, Id *didp, Id *iterp) +{ + Id *ap, did; + Id iter = *iterp; + if (iter == 0) /* find key data */ + { + if (!data->attrs) + return 0; + ap = data->attrs[solvid - data->start]; + if (!ap) + return 0; + for (; *ap; ap += 2) + if (data->keys[*ap].name == keyname && data->keys[*ap].type == REPOKEY_TYPE_DIRSTRARRAY) + break; + if (!*ap) + return 0; + iter = ap[1]; + } + did = *didp; + for (ap = data->attriddata + iter; *ap; ap += 2) + { + if (did && ap[0] != did) + continue; + *didp = ap[0]; + *iterp = ap - data->attriddata + 2; + return (const char *)data->attrdata + ap[1]; + } + *iterp = 0; + return 0; +} /************************************************************************ * data search @@ -3024,6 +3055,7 @@ repodata_serialize_key(Repodata *data, struct extdata *newincore, case REPOKEY_TYPE_VOID: case REPOKEY_TYPE_CONSTANT: case REPOKEY_TYPE_CONSTANTID: + case REPOKEY_TYPE_DELETED: break; case REPOKEY_TYPE_STR: data_addblob(xd, data->attrdata + val, strlen((char *)(data->attrdata + val)) + 1); @@ -3094,29 +3126,30 @@ repodata_serialize_key(Repodata *data, struct extdata *newincore, sp = schema; kp = data->xattrs[-*ida]; if (!kp) - continue; + continue; /* ignore empty elements */ num++; - for (;*kp; kp += 2) + for (; *kp; kp += 2) *sp++ = *kp; *sp = 0; if (!schemaid) schemaid = repodata_schema2id(data, schema, 1); else if (schemaid != repodata_schema2id(data, schema, 0)) { - pool_debug(data->repo->pool, SOLV_FATAL, "fixarray substructs with different schemas\n"); - exit(1); + pool_debug(data->repo->pool, SOLV_ERROR, "repodata_serialize_key: fixarray substructs with different schemas\n"); + num = 0; + break; } } + data_addid(xd, num); if (!num) break; - data_addid(xd, num); data_addid(xd, schemaid); for (ida = data->attriddata + val; *ida; ida++) { Id *kp = data->xattrs[-*ida]; if (!kp) continue; - for (;*kp; kp += 2) + for (; *kp; kp += 2) repodata_serialize_key(data, newincore, newvincore, schema, data->keys + *kp, kp[1]); } break; @@ -3148,7 +3181,7 @@ repodata_serialize_key(Repodata *data, struct extdata *newincore, break; } default: - pool_debug(data->repo->pool, SOLV_FATAL, "don't know how to handle type %d\n", key->type); + pool_debug(data->repo->pool, SOLV_FATAL, "repodata_serialize_key: don't know how to handle type %d\n", key->type); exit(1); } if (key->storage == KEY_STORAGE_VERTICAL_OFFSET) @@ -3160,18 +3193,57 @@ repodata_serialize_key(Repodata *data, struct extdata *newincore, } } +/* create a circular linked list of all keys that share + * the same keyname */ +static Id * +calculate_keylink(Repodata *data) +{ + int i, j; + Id *link; + Id maxkeyname = 0, *keytable = 0; + link = solv_calloc(data->nkeys, sizeof(Id)); + if (data->nkeys <= 2) + return link; + for (i = 1; i < data->nkeys; i++) + { + Id n = data->keys[i].name; + if (n >= maxkeyname) + { + keytable = solv_realloc2(keytable, n + 128, sizeof(Id)); + memset(keytable + maxkeyname, 0, (n + 128 - maxkeyname) * sizeof(Id)); + maxkeyname = n + 128; + } + j = keytable[n]; + if (j) + link[i] = link[j]; + else + j = i; + link[j] = i; + keytable[n] = i; + } + /* remove links that just point to themselfs */ + for (i = 1; i < data->nkeys; i++) + if (link[i] == i) + link[i] = 0; + solv_free(keytable); + return link; +} + void repodata_internalize(Repodata *data) { Repokey *key, solvkey; Id entry, nentry; - Id schemaid, keyid, *schema, *sp, oldschema, *keyp, *seen; + Id schemaid, keyid, *schema, *sp, oldschemaid, *keyp, *seen; + Offset *oldincoreoffs = 0; int schemaidx; unsigned char *dp, *ndp; - int newschema, oldcount; + int neednewschema; struct extdata newincore; struct extdata newvincore; Id solvkeyid; + Id *keylink; + int haveoldkl; if (!data->attrs && !data->xattrs) return; @@ -3204,140 +3276,181 @@ repodata_internalize(Repodata *data) data->mainschema = 0; data->mainschemaoffsets = solv_free(data->mainschemaoffsets); + keylink = calculate_keylink(data); /* join entry data */ /* we start with the meta data, entry -1 */ for (entry = -1; entry < nentry; entry++) { - memset(seen, 0, data->nkeys * sizeof(Id)); - oldschema = 0; + oldschemaid = 0; dp = data->incoredata; if (dp) { dp += entry >= 0 ? data->incoreoffset[entry] : 1; - dp = data_read_id(dp, &oldschema); + dp = data_read_id(dp, &oldschemaid); } + memset(seen, 0, data->nkeys * sizeof(Id)); #if 0 -fprintf(stderr, "oldschema %d\n", oldschema); -fprintf(stderr, "schemata %d\n", data->schemata[oldschema]); +fprintf(stderr, "oldschemaid %d\n", oldschemaid); +fprintf(stderr, "schemata %d\n", data->schemata[oldschemaid]); fprintf(stderr, "schemadata %p\n", data->schemadata); #endif - /* seen: -1: old data 0: skipped >0: id + 1 */ - newschema = 0; - oldcount = 0; + + /* seen: -1: old data, 0: skipped, >0: id + 1 */ + neednewschema = 0; sp = schema; - for (keyp = data->schemadata + data->schemata[oldschema]; *keyp; keyp++) + haveoldkl = 0; + for (keyp = data->schemadata + data->schemata[oldschemaid]; *keyp; keyp++) { if (seen[*keyp]) { - pool_debug(data->repo->pool, SOLV_FATAL, "Inconsistent old data (key occured twice).\n"); - exit(1); + /* oops, should not happen */ + neednewschema = 1; + continue; } - seen[*keyp] = -1; + seen[*keyp] = -1; /* use old marker */ *sp++ = *keyp; - oldcount++; + if (keylink[*keyp]) + haveoldkl = 1; /* potential keylink conflict */ } - if (entry >= 0) - keyp = data->attrs ? data->attrs[entry] : 0; - else + + /* strip solvables key */ + if (entry < 0 && solvkeyid && seen[solvkeyid]) { - /* strip solvables key */ *sp = 0; for (sp = keyp = schema; *sp; sp++) if (*sp != solvkeyid) *keyp++ = *sp; - else - oldcount--; sp = keyp; seen[solvkeyid] = 0; - keyp = data->xattrs ? data->xattrs[1] : 0; + neednewschema = 1; } + + /* add new entries */ + if (entry >= 0) + keyp = data->attrs ? data->attrs[entry] : 0; + else + keyp = data->xattrs ? data->xattrs[1] : 0; if (keyp) for (; *keyp; keyp += 2) { if (!seen[*keyp]) { - newschema = 1; + neednewschema = 1; *sp++ = *keyp; + if (haveoldkl && keylink[*keyp]) /* this should be pretty rare */ + { + Id kl; + for (kl = keylink[*keyp]; kl != *keyp; kl = keylink[kl]) + if (seen[kl] == -1) + { + /* replacing old key kl, remove from schema and seen */ + Id *osp; + for (osp = schema; osp < sp; osp++) + if (*osp == kl) + { + memmove(osp, osp + 1, (sp - osp) * sizeof(Id)); + sp--; + seen[kl] = 0; + break; + } + } + } } seen[*keyp] = keyp[1] + 1; } + + /* add solvables key if needed */ if (entry < 0 && data->end != data->start) { - *sp++ = solvkeyid; - newschema = 1; + *sp++ = solvkeyid; /* always last in schema */ + neednewschema = 1; } + + /* commit schema */ *sp = 0; - if (newschema) + if (neednewschema) /* Ideally we'd like to sort the new schema here, to ensure - schema equality independend of the ordering. We can't do that - yet. For once see below (old ids need to come before new ids). - An additional difficulty is that we also need to move - the values with the keys. */ + schema equality independend of the ordering. */ schemaid = repodata_schema2id(data, schema, 1); else - schemaid = oldschema; + schemaid = oldschemaid; + + if (entry < 0) + { + data->mainschemaoffsets = solv_calloc(sp - schema, sizeof(Id)); + data->mainschema = schemaid; + } + + /* find offsets in old incore data */ + if (oldschemaid) + { + Id *lastneeded = 0; + for (sp = data->schemadata + data->schemata[oldschemaid]; *sp; sp++) + if (seen[*sp] == -1) + lastneeded = sp + 1; + if (lastneeded) + { + if (!oldincoreoffs) + oldincoreoffs = solv_malloc2(data->nkeys, 2 * sizeof(Offset)); + for (sp = data->schemadata + data->schemata[oldschemaid]; sp != lastneeded; sp++) + { + /* Skip the data associated with this old key. */ + key = data->keys + *sp; + ndp = dp; + if (key->storage == KEY_STORAGE_VERTICAL_OFFSET) + { + ndp = data_skip(ndp, REPOKEY_TYPE_ID); + ndp = data_skip(ndp, REPOKEY_TYPE_ID); + } + else if (key->storage == KEY_STORAGE_INCORE) + ndp = data_skip_key(data, ndp, key); + oldincoreoffs[*sp * 2] = dp - data->incoredata; + oldincoreoffs[*sp * 2 + 1] = ndp - dp; + dp = ndp; + } + } + } + /* just copy over the complete old entry (including the schemaid) if there was no new data */ + if (entry >= 0 && !neednewschema && oldschemaid && (!data->attrs || !data->attrs[entry]) && dp) + { + ndp = data->incoredata + data->incoreoffset[entry]; + data->incoreoffset[entry] = newincore.len; + data_addblob(&newincore, ndp, dp - ndp); + goto entrydone; + } /* Now create data blob. We walk through the (possibly new) schema and either copy over old data, or insert the new. */ - /* XXX Here we rely on the fact that the (new) schema has the form - o1 o2 o3 o4 ... | n1 n2 n3 ... - (oX being the old keyids (possibly overwritten), and nX being - the new keyids). This rules out sorting the keyids in order - to ensure a small schema count. */ if (entry >= 0) data->incoreoffset[entry] = newincore.len; data_addid(&newincore, schemaid); - if (entry == -1) - { - data->mainschema = schemaid; - data->mainschemaoffsets = solv_calloc(sp - schema, sizeof(Id)); - } + /* we don't use a pointer to the schemadata here as repodata_serialize_key * may call repodata_schema2id() which might realloc our schemadata */ for (schemaidx = data->schemata[schemaid]; (keyid = data->schemadata[schemaidx]) != 0; schemaidx++) { - if (entry == -1) - data->mainschemaoffsets[schemaidx - data->schemata[schemaid]] = newincore.len; - if (keyid == solvkeyid) + if (entry < 0) { - /* add flexarray entry count */ - data_addid(&newincore, data->end - data->start); - break; - } - key = data->keys + keyid; -#if 0 - fprintf(stderr, "internalize %d(%d):%s:%s\n", entry, entry + data->start, pool_id2str(data->repo->pool, key->name), pool_id2str(data->repo->pool, key->type)); -#endif - ndp = dp; - if (oldcount) - { - /* Skip the data associated with this old key. */ - if (key->storage == KEY_STORAGE_VERTICAL_OFFSET) + data->mainschemaoffsets[schemaidx - data->schemata[schemaid]] = newincore.len; + if (keyid == solvkeyid) { - ndp = data_skip(dp, REPOKEY_TYPE_ID); - ndp = data_skip(ndp, REPOKEY_TYPE_ID); + /* add flexarray entry count */ + data_addid(&newincore, data->end - data->start); + break; /* always the last entry */ } - else if (key->storage == KEY_STORAGE_INCORE) - ndp = data_skip_key(data, dp, key); - oldcount--; } if (seen[keyid] == -1) { - /* If this key was an old one _and_ was not overwritten with - a different value copy over the old value (we skipped it - above). */ - if (dp != ndp) - data_addblob(&newincore, dp, ndp - dp); - seen[keyid] = 0; + if (oldincoreoffs[keyid * 2 + 1]) + data_addblob(&newincore, data->incoredata + oldincoreoffs[keyid * 2], oldincoreoffs[keyid * 2 + 1]); } else if (seen[keyid]) - { - /* Otherwise we have a new value. Parse it into the internal form. */ - repodata_serialize_key(data, &newincore, &newvincore, schema, key, seen[keyid] - 1); - } - dp = ndp; + repodata_serialize_key(data, &newincore, &newvincore, schema, data->keys + keyid, seen[keyid] - 1); } + +entrydone: + /* free memory */ if (entry >= 0 && data->attrs) { if (data->attrs[entry]) @@ -3367,6 +3480,8 @@ fprintf(stderr, "schemadata %p\n", data->schemadata); data->lastdatalen = 0; solv_free(schema); solv_free(seen); + solv_free(keylink); + solv_free(oldincoreoffs); repodata_free_schemahash(data); solv_free(data->incoredata); diff --git a/src/repodata.h b/src/repodata.h index ad05525..c18c688 100644 --- a/src/repodata.h +++ b/src/repodata.h @@ -301,6 +301,7 @@ void repodata_set_location(Repodata *data, Id solvid, int medianr, const char *d void repodata_set_deltalocation(Repodata *data, Id handle, int medianr, const char *dir, const char *file); void repodata_set_sourcepkg(Repodata *data, Id solvid, const char *sourcepkg); Id repodata_lookup_id_uninternalized(Repodata *data, Id solvid, Id keyname, Id voidid); +const char *repodata_lookup_dirstrarray_uninternalized(Repodata *data, Id solvid, Id keyname, Id *didp, Id *iterp); /* stats */ unsigned int repodata_memused(Repodata *data); diff --git a/src/rules.c b/src/rules.c index b941986..248b1cd 100644 --- a/src/rules.c +++ b/src/rules.c @@ -1157,17 +1157,19 @@ finddistupgradepackages(Solver *solv, Solvable *s, Queue *qs, int allow_all) if (!qs->count) { if (allow_all) - return 0; /* orphaned, don't create feature rule */ + return 0; /* orphaned, don't create feature rule */ /* check if this is an orphaned package */ policy_findupdatepackages(solv, s, qs, 1); if (!qs->count) - return 0; /* orphaned, don't create update rule */ + return 0; /* orphaned, don't create update rule */ qs->count = 0; return -SYSTEMSOLVABLE; /* supported but not installable */ } if (allow_all) return s - pool->solvables; /* check if it is ok to keep the installed package */ + if (solv->dupmap.size && MAPTST(&solv->dupmap, s - pool->solvables)) + return s - pool->solvables; for (i = 0; i < qs->count; i++) { Solvable *ns = pool->solvables + qs->elements[i]; @@ -1178,6 +1180,7 @@ finddistupgradepackages(Solver *solv, Solvable *s, Queue *qs, int allow_all) return -SYSTEMSOLVABLE; } +#if 0 /* add packages from the dup repositories to the update candidates * this isn't needed for the global dup mode as all packages are * from dup repos in that case */ @@ -1201,6 +1204,7 @@ addduppackages(Solver *solv, Solvable *s, Queue *qs) } queue_free(&dupqs); } +#endif /*------------------------------------------------------------------- * @@ -1218,18 +1222,15 @@ solver_addupdaterule(Solver *solv, Solvable *s, int allow_all) Id p, d; Queue qs; Id qsbuf[64]; + int isorphaned = 0; queue_init_buffer(&qs, qsbuf, sizeof(qsbuf)/sizeof(*qsbuf)); p = s - pool->solvables; /* find update candidates for 's' */ - if (solv->dupmap_all) + if (solv->dupmap_all || (solv->dupinvolvedmap.size && MAPTST(&solv->dupinvolvedmap, p))) p = finddistupgradepackages(solv, s, &qs, allow_all); else - { - policy_findupdatepackages(solv, s, &qs, allow_all); - if (!allow_all && solv->dupinvolvedmap.size && MAPTST(&solv->dupinvolvedmap, p)) - addduppackages(solv, s, &qs); - } + policy_findupdatepackages(solv, s, &qs, allow_all); #ifdef ENABLE_LINKED_PKGS if (solv->instbuddy && solv->instbuddy[s - pool->solvables - solv->installed->start]) @@ -1237,7 +1238,7 @@ solver_addupdaterule(Solver *solv, Solvable *s, int allow_all) const char *name = pool_id2str(pool, s->name); if (strncmp(name, "pattern:", 8) == 0 || strncmp(name, "application:", 12) == 0) { - /* a linked pseudo package. As it is linked, we do not need an update rule */ + /* a linked pseudo package. As it is linked, we do not need an update/feature rule */ /* nevertheless we set specialupdaters so we can update */ solver_addrule(solv, 0, 0, 0); if (!allow_all && qs.count) @@ -1254,11 +1255,14 @@ solver_addupdaterule(Solver *solv, Solvable *s, int allow_all) } #endif - if (!allow_all && !p && solv->dupmap_all) + if (!allow_all && !p) /* !p implies qs.count == 0 */ { queue_push(&solv->orphaned, s - pool->solvables); /* an orphaned package */ if (solv->keep_orphans && !(solv->droporphanedmap_all || (solv->droporphanedmap.size && MAPTST(&solv->droporphanedmap, s - pool->solvables - solv->installed->start)))) p = s - pool->solvables; /* keep this orphaned package installed */ + queue_free(&qs); + solver_addrule(solv, p, 0, 0); + return; } if (!allow_all && qs.count && solv->multiversion.size) @@ -1271,7 +1275,7 @@ solver_addupdaterule(Solver *solv, Solvable *s, int allow_all) if (i < qs.count) { /* filter out all multiversion packages as they don't update */ - d = pool_queuetowhatprovides(pool, &qs); + d = pool_queuetowhatprovides(pool, &qs); /* save qs away */ for (j = i; i < qs.count; i++) { if (MAPTST(&solv->multiversion, qs.elements[i])) @@ -1290,19 +1294,25 @@ solver_addupdaterule(Solver *solv, Solvable *s, int allow_all) } qs.elements[j++] = qs.elements[i]; } - if (j < qs.count) + if (j < qs.count) /* filtered at least one package? */ { - if (d && solv->installed && s->repo == solv->installed && - (solv->updatemap_all || (solv->updatemap.size && MAPTST(&solv->updatemap, s - pool->solvables - solv->installed->start)))) + if (j == 0 && p == -SYSTEMSOLVABLE) { + /* this is a multiversion orphan */ + queue_push(&solv->orphaned, s - pool->solvables); if (!solv->specialupdaters) solv->specialupdaters = solv_calloc(solv->installed->end - solv->installed->start, sizeof(Id)); solv->specialupdaters[s - pool->solvables - solv->installed->start] = d; - } - if (j == 0 && p == -SYSTEMSOLVABLE && solv->dupmap_all) - { - queue_push(&solv->orphaned, s - pool->solvables); /* also treat as orphaned */ - j = qs.count; + if (solv->keep_orphans && !(solv->droporphanedmap_all || (solv->droporphanedmap.size && MAPTST(&solv->droporphanedmap, s - pool->solvables - solv->installed->start)))) + { + /* we need to keep the orphan */ + queue_free(&qs); + solver_addrule(solv, s - pool->solvables, 0, 0); + return; + } + /* we can drop it as long as we update */ + isorphaned = 1; + j = qs.count; /* force the update */ } qs.count = j; } @@ -1310,11 +1320,13 @@ solver_addupdaterule(Solver *solv, Solvable *s, int allow_all) { /* could fallthrough, but then we would do pool_queuetowhatprovides twice */ queue_free(&qs); - solver_addrule(solv, p, 0, d); /* allow update of s */ + solver_addrule(solv, s - pool->solvables, 0, d); /* allow update of s */ return; } } } + if (!isorphaned && p == -SYSTEMSOLVABLE && solv->dupmap.size) + p = s - pool->solvables; /* let the dup rules sort it out */ if (qs.count && p == -SYSTEMSOLVABLE) p = queue_shift(&qs); if (qs.count > 1) @@ -1623,7 +1635,7 @@ add_cleandeps_package(Solver *solv, Id p) queue_pushunique(solv->cleandeps_updatepkgs, p); } -static inline void +static void solver_addtodupmaps(Solver *solv, Id p, Id how, int targeted) { Pool *pool = solv->pool; @@ -1796,9 +1808,11 @@ void solver_addduprules(Solver *solv, Map *addedmap) { Pool *pool = solv->pool; + Repo *installed = solv->installed; Id p, pp; Solvable *s, *ps; int first, i; + Rule *r; solv->duprules = solv->nrules; for (i = 1; i < pool->nsolvables; i++) @@ -1818,11 +1832,11 @@ solver_addduprules(Solver *solv, Map *addedmap) break; if (!MAPTST(&solv->dupinvolvedmap, p)) continue; - if (solv->installed && ps->repo == solv->installed) + if (installed && ps->repo == installed) { if (!solv->updatemap.size) - map_grow(&solv->updatemap, solv->installed->end - solv->installed->start); - MAPSET(&solv->updatemap, p - solv->installed->start); + map_grow(&solv->updatemap, installed->end - installed->start); + MAPSET(&solv->updatemap, p - installed->start); if (!MAPTST(&solv->dupmap, p)) { Id ip, ipp; @@ -1835,10 +1849,22 @@ solver_addduprules(Solver *solv, Map *addedmap) if (is->evr == ps->evr && solvable_identical(ps, is)) break; } - if (!ip) - solver_addrule(solv, -p, 0, 0); /* no match, sorry */ - else - MAPSET(&solv->dupmap, p); /* for best rules processing */ + if (ip) + { + /* ok, found a good one. we may keep this package. */ + MAPSET(&solv->dupmap, p); /* for best rules processing */ + continue; + } + r = solv->rules + solv->updaterules + (p - installed->start); + if (!r->p) + r = solv->rules + solv->featurerules + (p - installed->start); + if (r->p && solv->specialupdaters && solv->specialupdaters[p - installed->start]) + { + /* this is a multiversion orphan, we're good if an update is installed */ + solver_addrule(solv, -p, 0, solv->specialupdaters[p - installed->start]); + continue; + } + solver_addrule(solv, -p, 0, 0); /* no match, sorry */ } } else if (!MAPTST(&solv->dupmap, p)) @@ -2823,32 +2849,51 @@ solver_rule2rules(Solver *solv, Id rid, Queue *q, int recursive) /* check if the newest versions of pi still provides the dependency we're looking for */ static int -solver_choicerulecheck(Solver *solv, Id pi, Rule *r, Map *m) +solver_choicerulecheck(Solver *solv, Id pi, Rule *r, Map *m, Queue *q) { Pool *pool = solv->pool; Rule *ur; - Queue q; - Id p, pp, qbuf[32]; + Id p, pp; int i; - ur = solv->rules + solv->updaterules + (pi - pool->installed->start); - if (!ur->p) - ur = solv->rules + solv->featurerules + (pi - pool->installed->start); - if (!ur->p) - return 0; - queue_init_buffer(&q, qbuf, sizeof(qbuf)/sizeof(*qbuf)); - FOR_RULELITERALS(p, pp, ur) - if (p > 0) - queue_push(&q, p); - if (q.count > 1) - policy_filter_unwanted(solv, &q, POLICY_MODE_CHOOSE); - for (i = 0; i < q.count; i++) - if (MAPTST(m, q.elements[i])) - break; - /* 1: none of the newest versions provide it */ - i = i == q.count ? 1 : 0; - queue_free(&q); - return i; + if (!q->count || q->elements[0] != pi) + { + if (q->count) + queue_empty(q); + ur = solv->rules + solv->updaterules + (pi - pool->installed->start); + if (!ur->p) + ur = solv->rules + solv->featurerules + (pi - pool->installed->start); + if (!ur->p) + return 0; + queue_push2(q, pi, 0); + FOR_RULELITERALS(p, pp, ur) + if (p > 0) + queue_push(q, p); + } + if (q->count == 2) + return 1; + if (q->count == 3) + { + p = q->elements[2]; + return MAPTST(m, p) ? 0 : 1; + } + if (!q->elements[1]) + { + for (i = 2; i < q->count; i++) + if (!MAPTST(m, q->elements[i])) + break; + if (i == q->count) + return 0; /* all provide it, no need to filter */ + /* some don't provide it, have to filter */ + queue_deleten(q, 0, 2); + policy_filter_unwanted(solv, q, POLICY_MODE_CHOOSE); + queue_unshift(q, 1); /* filter mark */ + queue_unshift(q, pi); + } + for (i = 2; i < q->count; i++) + if (MAPTST(m, q->elements[i])) + return 0; /* at least one provides it */ + return 1; /* none of the new packages provided it */ } static inline void @@ -2873,7 +2918,7 @@ solver_addchoicerules(Solver *solv) Pool *pool = solv->pool; Map m, mneg; Rule *r; - Queue q, qi; + Queue q, qi, qcheck; int i, j, rid, havechoice; Id p, d, pp; Id p2, pp2; @@ -2892,6 +2937,7 @@ solver_addchoicerules(Solver *solv) solv->choicerules_ref = solv_calloc(solv->pkgrules_end, sizeof(Id)); queue_init(&q); queue_init(&qi); + queue_init(&qcheck); map_init(&m, pool->nsolvables); map_init(&mneg, pool->nsolvables); /* set up negative assertion map from infarch and dup rules */ @@ -3009,7 +3055,7 @@ solver_addchoicerules(Solver *solv) p2 = qi.elements[i]; if (!p2) continue; - if (solver_choicerulecheck(solv, p2, r, &m)) + if (solver_choicerulecheck(solv, p2, r, &m, &qcheck)) { /* oops, remove element p from q */ queue_removeelement(&q, qi.elements[i + 1]); @@ -3018,6 +3064,7 @@ solver_addchoicerules(Solver *solv) qi.elements[j++] = p2; } queue_truncate(&qi, j); + if (!q.count || !qi.count) { FOR_RULELITERALS(p, pp, r) @@ -3089,6 +3136,7 @@ solver_addchoicerules(Solver *solv) } queue_free(&q); queue_free(&qi); + queue_free(&qcheck); map_free(&m); map_free(&mneg); solv->choicerules_end = solv->nrules; diff --git a/src/solver.c b/src/solver.c index c6cad6b..2e28b7d 100644 --- a/src/solver.c +++ b/src/solver.c @@ -217,13 +217,24 @@ autouninstall(Solver *solv, Id *problem) Rule *r; if (m && !MAPTST(m, v - solv->updaterules)) continue; - /* check if identical to feature rule, we don't like that */ + /* check if identical to feature rule, we don't like that (except for orphans) */ r = solv->rules + solv->featurerules + (v - solv->updaterules); if (!r->p) { /* update rule == feature rule */ if (v > lastfeature) lastfeature = v; + /* prefer orphaned packages in dup mode */ + if (solv->keep_orphans) + { + r = solv->rules + v; + if (!r->d && r->p == (solv->installed->start + (v - solv->updaterules))) + { + lastfeature = v; + lastupdate = 0; + break; + } + } continue; } if (v > lastupdate) @@ -2714,7 +2725,7 @@ solver_run_sat(Solver *solv, int disablerules, int doweak) if (!solv->decisioncnt_orphan) solv->decisioncnt_orphan = solv->decisionq.count; - if (solv->dupmap_all && solv->installed) + if (solv->installed && (solv->orphaned.count || solv->brokenorphanrules)) { int installedone = 0; @@ -3350,7 +3361,7 @@ solver_solve(Solver *solv, Queue *job) Solvable *s; Rule *r; int now, solve_start; - int hasdupjob = 0; + int needduprules = 0; int hasbestinstalljob = 0; solve_start = solv_timems(0); @@ -3561,6 +3572,19 @@ solver_solve(Solver *solv, Queue *job) MAPSET(&solv->droporphanedmap, p - installed->start); } break; + case SOLVER_ALLOWUNINSTALL: + if (select == SOLVER_SOLVABLE_ALL || (select == SOLVER_SOLVABLE_REPO && installed && what == installed->repoid)) + solv->allowuninstall_all = 1; + FOR_JOB_SELECT(p, pp, select, what) + { + s = pool->solvables + p; + if (s->repo != installed) + continue; + if (!solv->allowuninstallmap.size) + map_grow(&solv->allowuninstallmap, installed->end - installed->start); + MAPSET(&solv->allowuninstallmap, p - installed->start); + } + break; default: break; } @@ -3608,8 +3632,10 @@ solver_solve(Solver *solv, Queue *job) if (how & SOLVER_FORCEBEST) solv->bestupdatemap_all = 1; } - if (!solv->dupmap_all || solv->allowuninstall) - hasdupjob = 1; + if ((how & SOLVER_TARGETED) != 0) + needduprules = 1; + if (!solv->dupmap_all || solv->allowuninstall || solv->allowuninstall_all || solv->allowuninstallmap.size || solv->keep_orphans) + needduprules = 1; break; default: break; @@ -3664,7 +3690,7 @@ solver_solve(Solver *solv, Queue *job) /* create dup maps if needed. We need the maps early to create our * update rules */ - if (hasdupjob) + if (needduprules) solver_createdupmaps(solv); /* @@ -3723,9 +3749,13 @@ solver_solve(Solver *solv, Queue *job) * check for and remove duplicate */ r = solv->rules + solv->nrules - 1; /* r: update rule */ - if (!r->p) - continue; sr = r - (installed->end - installed->start); /* sr: feature rule */ + if (!r->p) + { + if (sr->p) + memset(sr, 0, sizeof(*sr)); /* no feature rules without update rules */ + continue; + } /* it's also orphaned if the feature rule consists just of the installed package */ if (!solv->dupmap_all && sr->p == i && !sr->d && !sr->w2) queue_push(&solv->orphaned, i); @@ -3917,17 +3947,6 @@ solver_solve(Solver *solv, Queue *job) break; case SOLVER_ALLOWUNINSTALL: POOL_DEBUG(SOLV_DEBUG_JOB, "job: allowuninstall %s\n", solver_select2str(pool, select, what)); - if (select == SOLVER_SOLVABLE_ALL || (select == SOLVER_SOLVABLE_REPO && installed && what == installed->repoid)) - solv->allowuninstall_all = 1; - FOR_JOB_SELECT(p, pp, select, what) - { - s = pool->solvables + p; - if (s->repo != installed) - continue; - if (!solv->allowuninstallmap.size) - map_grow(&solv->allowuninstallmap, installed->end - installed->start); - MAPSET(&solv->allowuninstallmap, p - installed->start); - } break; default: POOL_DEBUG(SOLV_DEBUG_JOB, "job: unknown job\n"); @@ -3966,7 +3985,7 @@ solver_solve(Solver *solv, Queue *job) else solv->infarchrules = solv->infarchrules_end = solv->nrules; - if (hasdupjob) + if (needduprules) solver_addduprules(solv, &addedmap); else solv->duprules = solv->duprules_end = solv->nrules; @@ -3976,7 +3995,7 @@ solver_solve(Solver *solv, Queue *job) else solv->bestrules = solv->bestrules_end = solv->nrules; - if (hasdupjob) + if (needduprules) solver_freedupmaps(solv); /* no longer needed */ if (solv->do_yum_obsoletes) @@ -76,6 +76,30 @@ solv_calloc(size_t num, size_t len) return r; } +/* this was solv_realloc2(old, len, size), but we now overshoot + * for huge len sizes */ +void * +solv_extend_realloc(void *old, size_t len, size_t size, size_t block) +{ + size_t xblock = (block + 1) << 5; + len = (len + block) & ~block; + if (len >= xblock && xblock) + { + xblock <<= 1; + while (len >= xblock && xblock) + xblock <<= 1; + if (xblock) + { + size_t nlen; + xblock = (xblock >> 5) - 1; + nlen = (len + xblock) & ~xblock; + if (nlen > len) + len = nlen; + } + } + return solv_realloc2(old, len, size); +} + void * solv_free(void *mem) { @@ -29,6 +29,7 @@ extern void *solv_malloc2(size_t, size_t); extern void *solv_calloc(size_t, size_t); extern void *solv_realloc(void *, size_t); extern void *solv_realloc2(void *, size_t, size_t); +extern void *solv_extend_realloc(void *, size_t, size_t, size_t); extern void *solv_free(void *); extern char *solv_strdup(const char *); extern void solv_oom(size_t, size_t); @@ -48,12 +49,12 @@ static inline void *solv_extend(void *buf, size_t len, size_t nmemb, size_t size if (nmemb == 1) { if ((len & block) == 0) - buf = solv_realloc2(buf, len + (1 + block), size); + buf = solv_extend_realloc(buf, len + 1, size, block); } else { if (((len - 1) | block) != ((len + nmemb - 1) | block)) - buf = solv_realloc2(buf, (len + (nmemb + block)) & ~block, size); + buf = solv_extend_realloc(buf, len + nmemb, size, block); } return buf; } @@ -76,7 +77,7 @@ static inline void *solv_zextend(void *buf, size_t len, size_t nmemb, size_t siz static inline void *solv_extend_resize(void *buf, size_t len, size_t size, size_t block) { if (len) - buf = solv_realloc2(buf, (len + block) & ~block, size); + buf = solv_extend_realloc(buf, len, size, block); return buf; } @@ -85,7 +86,7 @@ static inline void *solv_calloc_block(size_t len, size_t size, size_t block) void *buf; if (!len) return 0; - buf = solv_malloc2((len + block) & ~block, size); + buf = solv_extend_realloc((void *)0, len, size, block); memset(buf, 0, ((len + block) & ~block) * size); return buf; } diff --git a/test/testcases/distupgrade/dup_multiversion1 b/test/testcases/distupgrade/dup_multiversion1 new file mode 100644 index 0000000..326de7a --- /dev/null +++ b/test/testcases/distupgrade/dup_multiversion1 @@ -0,0 +1,91 @@ +# test dup with multiversion packages +# +# part 1: simple update +repo system 0 testtags <inline> +#>=Pkg: a 1 1 i686 +repo available 0 testtags <inline> +#>=Pkg: a 2 1 i686 +system i686 * system + +job multiversion name a +job distupgrade all packages +# a-1-1 is treated as orphaned and stays behind +result transaction,problems <inline> +#>install a-2-1.i686@available + +nextjob + +job multiversion name a +job distupgrade repo available +# a-1-1 is treated as orphaned and stays behind +result transaction,problems <inline> +#>install a-2-1.i686@available + + +### same with keeporphans + +nextjob + +solverflags keeporphans +job multiversion name a +job distupgrade all packages +# a-1-1 is treated as orphaned and stays behind +result transaction,problems <inline> +#>install a-2-1.i686@available + + +nextjob + +solverflags keeporphans +job multiversion name a +job distupgrade repo available +# a-1-1 is treated as orphaned and stays behind +result transaction,problems <inline> +#>install a-2-1.i686@available + + +### same with allowuninstall + +nextjob + +solverflags allowuninstall +job multiversion name a +job distupgrade all packages +# a-1-1 is treated as orphaned and stays behind +result transaction,problems <inline> +#>install a-2-1.i686@available + + +nextjob + +solverflags allowuninstall +job multiversion name a +job distupgrade repo available +# a-1-1 is treated as orphaned and stays behind +result transaction,problems <inline> +#>install a-2-1.i686@available + + +### same with allowuninstall and keeporphans + +nextjob + +solverflags allowuninstall keeporphans +job multiversion name a +job distupgrade all packages +# a-1-1 is treated as orphaned and stays behind +result transaction,problems <inline> +#>install a-2-1.i686@available + + +nextjob + +solverflags allowuninstall keeporphans +job multiversion name a +job distupgrade repo available +# a-1-1 is treated as orphaned and stays behind +result transaction,problems <inline> +#>install a-2-1.i686@available + + + diff --git a/test/testcases/distupgrade/dup_multiversion2 b/test/testcases/distupgrade/dup_multiversion2 new file mode 100644 index 0000000..18909eb --- /dev/null +++ b/test/testcases/distupgrade/dup_multiversion2 @@ -0,0 +1,106 @@ +# test dup with multiversion packages +# same as with dup_multiversion1, but we can't keep the orphan + +# +# part 1: simple update +repo system 0 testtags <inline> +#>=Pkg: a 1 1 i686 +#>=Pkg: b 1 1 i686 +repo available 0 testtags <inline> +#>=Pkg: a 2 1 i686 +#>=Pkg: b 2 1 i686 +#>=Con: a = 1-1 +system i686 * system + +job multiversion name a +job distupgrade all packages +result transaction,problems <inline> +#>erase a-1-1.i686@system +#>install a-2-1.i686@available +#>upgrade b-1-1.i686@system b-2-1.i686@available + +nextjob + +job multiversion name a +job distupgrade repo available +result transaction,problems <inline> +#>erase a-1-1.i686@system +#>install a-2-1.i686@available +#>upgrade b-1-1.i686@system b-2-1.i686@available + + +### same with keeporphans, this will result in problems as we cannot keep the orphan + +nextjob + +solverflags keeporphans +job multiversion name a +job distupgrade all packages +result transaction,problems <inline> +#>install a-2-1.i686@available +#>problem 4d4de423 info package b-2-1.i686 conflicts with a = 1-1 provided by a-1-1.i686 +#>problem 4d4de423 solution 2cf4745c erase a-1-1.i686@system +#>problem 4d4de423 solution 2cf4745c replace a-1-1.i686@system a-2-1.i686@available +#>problem 4d4de423 solution 5a433aff allow b-1-1.i686@system +#>problem 4d4de423 solution ce4305f2 erase b-1-1.i686@system + +nextjob + +solverflags keeporphans +job multiversion name a +job distupgrade repo available +result transaction,problems <inline> +#>install a-2-1.i686@available +#>problem 4d4de423 info package b-2-1.i686 conflicts with a = 1-1 provided by a-1-1.i686 +#>problem 4d4de423 solution 2cf4745c erase a-1-1.i686@system +#>problem 4d4de423 solution 2cf4745c replace a-1-1.i686@system a-2-1.i686@available +#>problem 4d4de423 solution 5a433aff allow b-1-1.i686@system +#>problem 4d4de423 solution ce4305f2 erase b-1-1.i686@system + +### same with allowuninstall + +nextjob + +solverflags allowuninstall +job multiversion name a +job distupgrade all packages +result transaction,problems <inline> +#>erase a-1-1.i686@system +#>install a-2-1.i686@available +#>upgrade b-1-1.i686@system b-2-1.i686@available + +nextjob + +solverflags allowuninstall +job multiversion name a +job distupgrade repo available +result transaction,problems <inline> +#>erase a-1-1.i686@system +#>install a-2-1.i686@available +#>upgrade b-1-1.i686@system b-2-1.i686@available + + +### same with allowuninstall and keeporphans + +nextjob + +solverflags allowuninstall keeporphans +job multiversion name a +job distupgrade all packages +# a-1-1 is treated as orphaned and stays behind +result transaction,problems <inline> +#>erase b-1-1.i686@system +#>install a-2-1.i686@available + + +nextjob + +solverflags allowuninstall keeporphans +job multiversion name a +job distupgrade repo available +# a-1-1 is treated as orphaned and stays behind +result transaction,problems <inline> +#>erase b-1-1.i686@system +#>install a-2-1.i686@available + + diff --git a/test/testcases/distupgrade/dup_multiversion3 b/test/testcases/distupgrade/dup_multiversion3 new file mode 100644 index 0000000..8be3190 --- /dev/null +++ b/test/testcases/distupgrade/dup_multiversion3 @@ -0,0 +1,88 @@ +# test dup with multiversion packages where we cannot install the +# target. Should give problems except for allowuninstall. +# +# part 1: simple update +repo system 0 testtags <inline> +#>=Pkg: a 1 1 i686 +repo available 0 testtags <inline> +#>=Pkg: a 2 1 i686 +#>=Req: c +system i686 * system + +job multiversion name a +job distupgrade all packages +result transaction,problems <inline> +#>problem 251f1f35 info nothing provides c needed by a-2-1.i686 +#>problem 251f1f35 solution 2f2d254c allow a-1-1.i686@system + +nextjob + +job multiversion name a +job distupgrade repo available +result transaction,problems <inline> +#>erase a-1-1.i686@system +#>problem 251f1f35 info nothing provides c needed by a-2-1.i686 +#>problem 251f1f35 solution 2f2d254c allow a-1-1.i686@system + +### same with keeporphans + +nextjob + +solverflags keeporphans +job multiversion name a +job distupgrade all packages +result transaction,problems <inline> +#>problem 771581fd info nothing provides c needed by a-2-1.i686 +#>problem 771581fd solution 179b72ed allow a-1-1.i686@system +#>problem 771581fd solution 2cf4745c erase a-1-1.i686@system + +nextjob + +solverflags keeporphans +job multiversion name a +job distupgrade repo available +result transaction,problems <inline> +#>problem 771581fd info nothing provides c needed by a-2-1.i686 +#>problem 771581fd solution 179b72ed allow a-1-1.i686@system +#>problem 771581fd solution 2cf4745c erase a-1-1.i686@system + +### same with allowuninstall + +nextjob + +solverflags allowuninstall +job multiversion name a +job distupgrade all packages +result transaction,problems <inline> +#>erase a-1-1.i686@system + + +nextjob + +solverflags allowuninstall +job multiversion name a +job distupgrade repo available +result transaction,problems <inline> +#>erase a-1-1.i686@system + + +### same with allowuninstall and keeporphans + +nextjob + +solverflags allowuninstall keeporphans +job multiversion name a +job distupgrade all packages +result transaction,problems <inline> +#>erase a-1-1.i686@system + + +nextjob + +solverflags allowuninstall keeporphans +job multiversion name a +job distupgrade repo available +result transaction,problems <inline> +#>erase a-1-1.i686@system + + diff --git a/tools/rpmdb2solv.c b/tools/rpmdb2solv.c index 2fbf558..3b1d41b 100644 --- a/tools/rpmdb2solv.c +++ b/tools/rpmdb2solv.c @@ -207,7 +207,7 @@ main(int argc, char **argv) #ifdef ENABLE_APPDATA if (add_appdata) - repo_add_appdata_dir(repo, "/usr/share/appdata", REPO_USE_ROOTDIR | REPO_REUSE_REPODATA | REPO_NO_INTERNALIZE); + repo_add_appdata_dir(repo, "/usr/share/appdata", REPO_USE_ROOTDIR | REPO_REUSE_REPODATA | REPO_NO_INTERNALIZE | APPDATA_SEARCH_UNINTERNALIZED_FILELIST); #endif repodata_internalize(data); |