Package _emerge :: Module depgraph
[hide private]

Source Code for Module _emerge.depgraph

   1  # Copyright 1999-2016 Gentoo Foundation 
   2  # Distributed under the terms of the GNU General Public License v2 
   3   
   4  from __future__ import division, print_function, unicode_literals 
   5   
   6  import collections 
   7  import errno 
   8  import io 
   9  import logging 
  10  import stat 
  11  import sys 
  12  import textwrap 
  13  import warnings 
  14  from collections import deque 
  15  from itertools import chain 
  16   
  17  import portage 
  18  from portage import os, OrderedDict 
  19  from portage import _unicode_decode, _unicode_encode, _encodings 
  20  from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH, VCS_DIRS 
  21  from portage.dbapi import dbapi 
  22  from portage.dbapi.dep_expand import dep_expand 
  23  from portage.dbapi.DummyTree import DummyTree 
  24  from portage.dbapi.IndexedPortdb import IndexedPortdb 
  25  from portage.dbapi._similar_name_search import similar_name_search 
  26  from portage.dep import Atom, best_match_to_list, extract_affecting_use, \ 
  27          check_required_use, human_readable_required_use, match_from_list, \ 
  28          _repo_separator 
  29  from portage.dep._slot_operator import (ignore_built_slot_operator_deps, 
  30          strip_slots) 
  31  from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use, \ 
  32          _get_eapi_attrs 
  33  from portage.exception import (InvalidAtom, InvalidData, InvalidDependString, 
  34          PackageNotFound, PortageException) 
  35  from portage.localization import _ 
  36  from portage.output import colorize, create_color_func, \ 
  37          darkgreen, green 
  38  bad = create_color_func("BAD") 
  39  from portage.package.ebuild.config import _get_feature_flags 
  40  from portage.package.ebuild.getmaskingstatus import \ 
  41          _getmaskingstatus, _MaskReason 
  42  from portage._sets import SETPREFIX 
  43  from portage._sets.base import InternalPackageSet 
  44  from portage.util import ConfigProtect, shlex_split, new_protect_filename 
  45  from portage.util import cmp_sort_key, writemsg, writemsg_stdout 
  46  from portage.util import ensure_dirs 
  47  from portage.util import writemsg_level, write_atomic 
  48  from portage.util.digraph import digraph 
  49  from portage.util._async.TaskScheduler import TaskScheduler 
  50  from portage.util._eventloop.EventLoop import EventLoop 
  51  from portage.util._eventloop.global_event_loop import global_event_loop 
  52  from portage.versions import catpkgsplit 
  53   
  54  from _emerge.AtomArg import AtomArg 
  55  from _emerge.Blocker import Blocker 
  56  from _emerge.BlockerCache import BlockerCache 
  57  from _emerge.BlockerDepPriority import BlockerDepPriority 
  58  from .chk_updated_cfg_files import chk_updated_cfg_files 
  59  from _emerge.countdown import countdown 
  60  from _emerge.create_world_atom import create_world_atom 
  61  from _emerge.Dependency import Dependency 
  62  from _emerge.DependencyArg import DependencyArg 
  63  from _emerge.DepPriority import DepPriority 
  64  from _emerge.DepPriorityNormalRange import DepPriorityNormalRange 
  65  from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange 
  66  from _emerge.EbuildMetadataPhase import EbuildMetadataPhase 
  67  from _emerge.FakeVartree import FakeVartree 
  68  from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps 
  69  from _emerge.is_valid_package_atom import insert_category_into_atom, \ 
  70          is_valid_package_atom 
  71  from _emerge.Package import Package 
  72  from _emerge.PackageArg import PackageArg 
  73  from _emerge.PackageVirtualDbapi import PackageVirtualDbapi 
  74  from _emerge.RootConfig import RootConfig 
  75  from _emerge.search import search 
  76  from _emerge.SetArg import SetArg 
  77  from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice 
  78  from _emerge.UnmergeDepPriority import UnmergeDepPriority 
  79  from _emerge.UseFlagDisplay import pkg_use_display 
  80  from _emerge.UserQuery import UserQuery 
  81   
  82  from _emerge.resolver.backtracking import Backtracker, BacktrackParameter 
  83  from _emerge.resolver.DbapiProvidesIndex import DbapiProvidesIndex 
  84  from _emerge.resolver.package_tracker import PackageTracker, PackageTrackerDbapiWrapper 
  85  from _emerge.resolver.slot_collision import slot_conflict_handler 
  86  from _emerge.resolver.circular_dependency import circular_dependency_handler 
  87  from _emerge.resolver.output import Display, format_unmatched_atom 
  88   
  89  if sys.hexversion >= 0x3000000: 
  90          basestring = str 
  91          long = int 
  92          _unicode = str 
  93  else: 
  94          _unicode = unicode 
  95   
96 -class _scheduler_graph_config(object):
97 - def __init__(self, trees, pkg_cache, graph, mergelist):
98 self.trees = trees 99 self.pkg_cache = pkg_cache 100 self.graph = graph 101 self.mergelist = mergelist
102
103 -def _wildcard_set(atoms):
104 pkgs = InternalPackageSet(allow_wildcard=True) 105 for x in atoms: 106 try: 107 x = Atom(x, allow_wildcard=True, allow_repo=False) 108 except portage.exception.InvalidAtom: 109 x = Atom("*/" + x, allow_wildcard=True, allow_repo=False) 110 pkgs.add(x) 111 return pkgs
112
113 -class _frozen_depgraph_config(object):
114
115 - def __init__(self, settings, trees, myopts, params, spinner):
116 self.settings = settings 117 self.target_root = settings["EROOT"] 118 self.myopts = myopts 119 self.edebug = 0 120 if settings.get("PORTAGE_DEBUG", "") == "1": 121 self.edebug = 1 122 self.spinner = spinner 123 self.requested_depth = params.get("deep", 0) 124 self._running_root = trees[trees._running_eroot]["root_config"] 125 self.pkgsettings = {} 126 self.trees = {} 127 self._trees_orig = trees 128 self.roots = {} 129 # All Package instances 130 self._pkg_cache = {} 131 self._highest_license_masked = {} 132 # We can't know that an soname dep is unsatisfied if there are 133 # any unbuilt ebuilds in the graph, since unbuilt ebuilds have 134 # no soname data. Therefore, only enable soname dependency 135 # resolution if --usepkgonly is enabled, or for removal actions. 136 self.soname_deps_enabled = ( 137 ("--usepkgonly" in myopts or "remove" in params) and 138 params.get("ignore_soname_deps") != "y") 139 dynamic_deps = myopts.get("--dynamic-deps", "y") != "n" 140 ignore_built_slot_operator_deps = myopts.get( 141 "--ignore-built-slot-operator-deps", "n") == "y" 142 for myroot in trees: 143 self.trees[myroot] = {} 144 # Create a RootConfig instance that references 145 # the FakeVartree instead of the real one. 146 self.roots[myroot] = RootConfig( 147 trees[myroot]["vartree"].settings, 148 self.trees[myroot], 149 trees[myroot]["root_config"].setconfig) 150 for tree in ("porttree", "bintree"): 151 self.trees[myroot][tree] = trees[myroot][tree] 152 self.trees[myroot]["vartree"] = \ 153 FakeVartree(trees[myroot]["root_config"], 154 pkg_cache=self._pkg_cache, 155 pkg_root_config=self.roots[myroot], 156 dynamic_deps=dynamic_deps, 157 ignore_built_slot_operator_deps=ignore_built_slot_operator_deps, 158 soname_deps=self.soname_deps_enabled) 159 self.pkgsettings[myroot] = portage.config( 160 clone=self.trees[myroot]["vartree"].settings) 161 if self.soname_deps_enabled and "remove" not in params: 162 self.trees[myroot]["bintree"] = DummyTree( 163 DbapiProvidesIndex(trees[myroot]["bintree"].dbapi)) 164 165 self._required_set_names = set(["world"]) 166 167 atoms = ' '.join(myopts.get("--exclude", [])).split() 168 self.excluded_pkgs = _wildcard_set(atoms) 169 atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split() 170 self.reinstall_atoms = _wildcard_set(atoms) 171 atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split() 172 self.usepkg_exclude = _wildcard_set(atoms) 173 atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split() 174 self.useoldpkg_atoms = _wildcard_set(atoms) 175 atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split() 176 self.rebuild_exclude = _wildcard_set(atoms) 177 atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split() 178 self.rebuild_ignore = _wildcard_set(atoms) 179 180 self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts 181 self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts 182 self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
183
184 -class _depgraph_sets(object):
185 - def __init__(self):
186 # contains all sets added to the graph 187 self.sets = {} 188 # contains non-set atoms given as arguments 189 self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True) 190 # contains all atoms from all sets added to the graph, including 191 # atoms given as arguments 192 self.atoms = InternalPackageSet(allow_repo=True) 193 self.atom_arg_map = {}
194
195 -class _rebuild_config(object):
196 - def __init__(self, frozen_config, backtrack_parameters):
197 self._graph = digraph() 198 self._frozen_config = frozen_config 199 self.rebuild_list = backtrack_parameters.rebuild_list.copy() 200 self.orig_rebuild_list = self.rebuild_list.copy() 201 self.reinstall_list = backtrack_parameters.reinstall_list.copy() 202 self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev 203 self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver 204 self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt 205 self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or 206 self.rebuild_if_unbuilt)
207
208 - def add(self, dep_pkg, dep):
209 parent = dep.collapsed_parent 210 priority = dep.collapsed_priority 211 rebuild_exclude = self._frozen_config.rebuild_exclude 212 rebuild_ignore = self._frozen_config.rebuild_ignore 213 if (self.rebuild and isinstance(parent, Package) and 214 parent.built and priority.buildtime and 215 isinstance(dep_pkg, Package) and 216 not rebuild_exclude.findAtomForPackage(parent) and 217 not rebuild_ignore.findAtomForPackage(dep_pkg)): 218 self._graph.add(dep_pkg, parent, priority)
219
220 - def _needs_rebuild(self, dep_pkg):
221 """Check whether packages that depend on dep_pkg need to be rebuilt.""" 222 dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom) 223 if dep_pkg.built or dep_root_slot in self.orig_rebuild_list: 224 return False 225 226 if self.rebuild_if_unbuilt: 227 # dep_pkg is being installed from source, so binary 228 # packages for parents are invalid. Force rebuild 229 return True 230 231 trees = self._frozen_config.trees 232 vardb = trees[dep_pkg.root]["vartree"].dbapi 233 if self.rebuild_if_new_rev: 234 # Parent packages are valid if a package with the same 235 # cpv is already installed. 236 return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom) 237 238 # Otherwise, parent packages are valid if a package with the same 239 # version (excluding revision) is already installed. 240 assert self.rebuild_if_new_ver 241 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1] 242 for inst_cpv in vardb.match(dep_pkg.slot_atom): 243 inst_cpv_norev = catpkgsplit(inst_cpv)[:-1] 244 if inst_cpv_norev == cpv_norev: 245 return False 246 247 return True
248
249 - def _trigger_rebuild(self, parent, build_deps):
250 root_slot = (parent.root, parent.slot_atom) 251 if root_slot in self.rebuild_list: 252 return False 253 trees = self._frozen_config.trees 254 reinstall = False 255 for slot_atom, dep_pkg in build_deps.items(): 256 dep_root_slot = (dep_pkg.root, slot_atom) 257 if self._needs_rebuild(dep_pkg): 258 self.rebuild_list.add(root_slot) 259 return True 260 elif ("--usepkg" in self._frozen_config.myopts and 261 (dep_root_slot in self.reinstall_list or 262 dep_root_slot in self.rebuild_list or 263 not dep_pkg.installed)): 264 265 # A direct rebuild dependency is being installed. We 266 # should update the parent as well to the latest binary, 267 # if that binary is valid. 268 # 269 # To validate the binary, we check whether all of the 270 # rebuild dependencies are present on the same binhost. 271 # 272 # 1) If parent is present on the binhost, but one of its 273 # rebuild dependencies is not, then the parent should 274 # be rebuilt from source. 275 # 2) Otherwise, the parent binary is assumed to be valid, 276 # because all of its rebuild dependencies are 277 # consistent. 278 bintree = trees[parent.root]["bintree"] 279 uri = bintree.get_pkgindex_uri(parent.cpv) 280 dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv) 281 bindb = bintree.dbapi 282 if self.rebuild_if_new_ver and uri and uri != dep_uri: 283 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1] 284 for cpv in bindb.match(dep_pkg.slot_atom): 285 if cpv_norev == catpkgsplit(cpv)[:-1]: 286 dep_uri = bintree.get_pkgindex_uri(cpv) 287 if uri == dep_uri: 288 break 289 if uri and uri != dep_uri: 290 # 1) Remote binary package is invalid because it was 291 # built without dep_pkg. Force rebuild. 292 self.rebuild_list.add(root_slot) 293 return True 294 elif (parent.installed and 295 root_slot not in self.reinstall_list): 296 try: 297 bin_build_time, = bindb.aux_get(parent.cpv, 298 ["BUILD_TIME"]) 299 except KeyError: 300 continue 301 if bin_build_time != _unicode(parent.build_time): 302 # 2) Remote binary package is valid, and local package 303 # is not up to date. Force reinstall. 304 reinstall = True 305 if reinstall: 306 self.reinstall_list.add(root_slot) 307 return reinstall
308
309 - def trigger_rebuilds(self):
310 """ 311 Trigger rebuilds where necessary. If pkgA has been updated, and pkgB 312 depends on pkgA at both build-time and run-time, pkgB needs to be 313 rebuilt. 314 """ 315 need_restart = False 316 graph = self._graph 317 build_deps = {} 318 319 leaf_nodes = deque(graph.leaf_nodes()) 320 321 # Trigger rebuilds bottom-up (starting with the leaves) so that parents 322 # will always know which children are being rebuilt. 323 while graph: 324 if not leaf_nodes: 325 # We'll have to drop an edge. This should be quite rare. 326 leaf_nodes.append(graph.order[-1]) 327 328 node = leaf_nodes.popleft() 329 if node not in graph: 330 # This can be triggered by circular dependencies. 331 continue 332 slot_atom = node.slot_atom 333 334 # Remove our leaf node from the graph, keeping track of deps. 335 parents = graph.parent_nodes(node) 336 graph.remove(node) 337 node_build_deps = build_deps.get(node, {}) 338 for parent in parents: 339 if parent == node: 340 # Ignore a direct cycle. 341 continue 342 parent_bdeps = build_deps.setdefault(parent, {}) 343 parent_bdeps[slot_atom] = node 344 if not graph.child_nodes(parent): 345 leaf_nodes.append(parent) 346 347 # Trigger rebuilds for our leaf node. Because all of our children 348 # have been processed, the build_deps will be completely filled in, 349 # and self.rebuild_list / self.reinstall_list will tell us whether 350 # any of our children need to be rebuilt or reinstalled. 351 if self._trigger_rebuild(node, node_build_deps): 352 need_restart = True 353 354 return need_restart
355 356
357 -class _dynamic_depgraph_config(object):
358
359 - def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
360 self.myparams = myparams.copy() 361 self._vdb_loaded = False 362 self._allow_backtracking = allow_backtracking 363 # Maps nodes to the reasons they were selected for reinstallation. 364 self._reinstall_nodes = {} 365 # Contains a filtered view of preferred packages that are selected 366 # from available repositories. 367 self._filtered_trees = {} 368 # Contains installed packages and new packages that have been added 369 # to the graph. 370 self._graph_trees = {} 371 # Caches visible packages returned from _select_package, for use in 372 # depgraph._iter_atoms_for_pkg() SLOT logic. 373 self._visible_pkgs = {} 374 #contains the args created by select_files 375 self._initial_arg_list = [] 376 self.digraph = portage.digraph() 377 # manages sets added to the graph 378 self.sets = {} 379 # contains all nodes pulled in by self.sets 380 self._set_nodes = set() 381 # Contains only Blocker -> Uninstall edges 382 self._blocker_uninstalls = digraph() 383 # Contains only Package -> Blocker edges 384 self._blocker_parents = digraph() 385 # Contains only irrelevant Package -> Blocker edges 386 self._irrelevant_blockers = digraph() 387 # Contains only unsolvable Package -> Blocker edges 388 self._unsolvable_blockers = digraph() 389 # Contains all Blocker -> Blocked Package edges 390 self._blocked_pkgs = digraph() 391 # Contains world packages that have been protected from 392 # uninstallation but may not have been added to the graph 393 # if the graph is not complete yet. 394 self._blocked_world_pkgs = {} 395 # Contains packages whose dependencies have been traversed. 396 # This use used to check if we have accounted for blockers 397 # relevant to a package. 398 self._traversed_pkg_deps = set() 399 self._parent_atoms = {} 400 self._slot_conflict_handler = None 401 self._circular_dependency_handler = None 402 self._serialized_tasks_cache = None 403 self._scheduler_graph = None 404 self._displayed_list = None 405 self._pprovided_args = [] 406 self._missing_args = [] 407 self._masked_installed = set() 408 self._masked_license_updates = set() 409 self._unsatisfied_deps_for_display = [] 410 self._unsatisfied_blockers_for_display = None 411 self._circular_deps_for_display = None 412 self._dep_stack = [] 413 self._dep_disjunctive_stack = [] 414 self._unsatisfied_deps = [] 415 self._initially_unsatisfied_deps = [] 416 self._ignored_deps = [] 417 self._highest_pkg_cache = {} 418 self._highest_pkg_cache_cp_map = {} 419 self._flatten_atoms_cache = {} 420 421 # Binary packages that have been rejected because their USE 422 # didn't match the user's config. It maps packages to a set 423 # of flags causing the rejection. 424 self.ignored_binaries = {} 425 426 self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords 427 self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes 428 self._needed_license_changes = backtrack_parameters.needed_license_changes 429 self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes 430 self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask 431 self._slot_operator_replace_installed = backtrack_parameters.slot_operator_replace_installed 432 self._prune_rebuilds = backtrack_parameters.prune_rebuilds 433 self._need_restart = False 434 self._need_config_reload = False 435 # For conditions that always require user intervention, such as 436 # unsatisfied REQUIRED_USE (currently has no autounmask support). 437 self._skip_restart = False 438 self._backtrack_infos = {} 439 440 self._buildpkgonly_deps_unsatisfied = False 441 self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n' 442 self._displayed_autounmask = False 443 self._success_without_autounmask = False 444 self._required_use_unsatisfied = False 445 self._traverse_ignored_deps = False 446 self._complete_mode = False 447 self._slot_operator_deps = {} 448 self._installed_sonames = collections.defaultdict(list) 449 self._package_tracker = PackageTracker( 450 soname_deps=depgraph._frozen_config.soname_deps_enabled) 451 # Track missed updates caused by solved conflicts. 452 self._conflict_missed_update = collections.defaultdict(dict) 453 454 for myroot in depgraph._frozen_config.trees: 455 self.sets[myroot] = _depgraph_sets() 456 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi 457 # This dbapi instance will model the state that the vdb will 458 # have after new packages have been installed. 459 fakedb = PackageTrackerDbapiWrapper(myroot, self._package_tracker) 460 461 def graph_tree(): 462 pass
463 graph_tree.dbapi = fakedb 464 self._graph_trees[myroot] = {} 465 self._filtered_trees[myroot] = {} 466 # Substitute the graph tree for the vartree in dep_check() since we 467 # want atom selections to be consistent with package selections 468 # have already been made. 469 self._graph_trees[myroot]["porttree"] = graph_tree 470 self._graph_trees[myroot]["vartree"] = graph_tree 471 self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi 472 self._graph_trees[myroot]["graph"] = self.digraph 473 self._graph_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg 474 self._graph_trees[myroot]["downgrade_probe"] = depgraph._downgrade_probe 475 def filtered_tree(): 476 pass
477 filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot) 478 self._filtered_trees[myroot]["porttree"] = filtered_tree 479 self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings) 480 481 # Passing in graph_tree as the vartree here could lead to better 482 # atom selections in some cases by causing atoms for packages that 483 # have been added to the graph to be preferred over other choices. 484 # However, it can trigger atom selections that result in 485 # unresolvable direct circular dependencies. For example, this 486 # happens with gwydion-dylan which depends on either itself or 487 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed, 488 # gwydion-dylan-bin needs to be selected in order to avoid a 489 # an unresolvable direct circular dependency. 490 # 491 # To solve the problem described above, pass in "graph_db" so that 492 # packages that have been added to the graph are distinguishable 493 # from other available packages and installed packages. Also, pass 494 # the parent package into self._select_atoms() calls so that 495 # unresolvable direct circular dependencies can be detected and 496 # avoided when possible. 497 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi 498 self._filtered_trees[myroot]["graph"] = self.digraph 499 self._filtered_trees[myroot]["vartree"] = \ 500 depgraph._frozen_config.trees[myroot]["vartree"] 501 self._filtered_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg 502 self._filtered_trees[myroot]["downgrade_probe"] = depgraph._downgrade_probe 503 504 dbs = [] 505 # (db, pkg_type, built, installed, db_keys) 506 if "remove" in self.myparams: 507 # For removal operations, use _dep_check_composite_db 508 # for availability and visibility checks. This provides 509 # consistency with install operations, so we don't 510 # get install/uninstall cycles like in bug #332719. 511 self._graph_trees[myroot]["porttree"] = filtered_tree 512 else: 513 if "--usepkgonly" not in depgraph._frozen_config.myopts: 514 portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi 515 db_keys = list(portdb._aux_cache_keys) 516 dbs.append((portdb, "ebuild", False, False, db_keys)) 517 518 if "--usepkg" in depgraph._frozen_config.myopts: 519 bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi 520 db_keys = list(bindb._aux_cache_keys) 521 dbs.append((bindb, "binary", True, False, db_keys)) 522 523 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi 524 db_keys = list(depgraph._frozen_config._trees_orig[myroot 525 ]["vartree"].dbapi._aux_cache_keys) 526 dbs.append((vardb, "installed", True, True, db_keys)) 527 self._filtered_trees[myroot]["dbs"] = dbs 528
529 -class depgraph(object):
530 531 # Represents the depth of a node that is unreachable from explicit 532 # user arguments (or their deep dependencies). Such nodes are pulled 533 # in by the _complete_graph method. 534 _UNREACHABLE_DEPTH = object() 535 536 pkg_tree_map = RootConfig.pkg_tree_map 537
538 - def __init__(self, settings, trees, myopts, myparams, spinner, 539 frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
540 if frozen_config is None: 541 frozen_config = _frozen_depgraph_config(settings, trees, 542 myopts, myparams, spinner) 543 self._frozen_config = frozen_config 544 self._dynamic_config = _dynamic_depgraph_config(self, myparams, 545 allow_backtracking, backtrack_parameters) 546 self._rebuild = _rebuild_config(frozen_config, backtrack_parameters) 547 548 self._select_atoms = self._select_atoms_highest_available 549 self._select_package = self._select_pkg_highest_available 550 551 self._event_loop = (portage._internal_caller and 552 global_event_loop() or EventLoop(main=False)) 553 554 self._select_atoms_parent = None 555 556 self.query = UserQuery(myopts).query
557
558 - def _index_binpkgs(self):
559 for root in self._frozen_config.trees: 560 bindb = self._frozen_config.trees[root]["bintree"].dbapi 561 if bindb._provides_index: 562 # don't repeat this when backtracking 563 continue 564 root_config = self._frozen_config.roots[root] 565 for cpv in self._frozen_config._trees_orig[ 566 root]["bintree"].dbapi.cpv_all(): 567 bindb._provides_inject( 568 self._pkg(cpv, "binary", root_config))
569
570 - def _load_vdb(self):
571 """ 572 Load installed package metadata if appropriate. This used to be called 573 from the constructor, but that wasn't very nice since this procedure 574 is slow and it generates spinner output. So, now it's called on-demand 575 by various methods when necessary. 576 """ 577 578 if self._dynamic_config._vdb_loaded: 579 return 580 581 for myroot in self._frozen_config.trees: 582 583 dynamic_deps = self._dynamic_config.myparams.get( 584 "dynamic_deps", "y") != "n" 585 preload_installed_pkgs = \ 586 "--nodeps" not in self._frozen_config.myopts 587 588 fake_vartree = self._frozen_config.trees[myroot]["vartree"] 589 if not fake_vartree.dbapi: 590 # This needs to be called for the first depgraph, but not for 591 # backtracking depgraphs that share the same frozen_config. 592 fake_vartree.sync() 593 594 # FakeVartree.sync() populates virtuals, and we want 595 # self.pkgsettings to have them populated too. 596 self._frozen_config.pkgsettings[myroot] = \ 597 portage.config(clone=fake_vartree.settings) 598 599 if preload_installed_pkgs: 600 vardb = fake_vartree.dbapi 601 602 if not dynamic_deps: 603 for pkg in vardb: 604 self._dynamic_config._package_tracker.add_installed_pkg(pkg) 605 self._add_installed_sonames(pkg) 606 else: 607 max_jobs = self._frozen_config.myopts.get("--jobs") 608 max_load = self._frozen_config.myopts.get("--load-average") 609 scheduler = TaskScheduler( 610 self._dynamic_deps_preload(fake_vartree), 611 max_jobs=max_jobs, 612 max_load=max_load, 613 event_loop=fake_vartree._portdb._event_loop) 614 scheduler.start() 615 scheduler.wait() 616 617 self._dynamic_config._vdb_loaded = True
618
619 - def _dynamic_deps_preload(self, fake_vartree):
620 portdb = fake_vartree._portdb 621 for pkg in fake_vartree.dbapi: 622 self._spinner_update() 623 self._dynamic_config._package_tracker.add_installed_pkg(pkg) 624 self._add_installed_sonames(pkg) 625 ebuild_path, repo_path = \ 626 portdb.findname2(pkg.cpv, myrepo=pkg.repo) 627 if ebuild_path is None: 628 fake_vartree.dynamic_deps_preload(pkg, None) 629 continue 630 metadata, ebuild_hash = portdb._pull_valid_cache( 631 pkg.cpv, ebuild_path, repo_path) 632 if metadata is not None: 633 fake_vartree.dynamic_deps_preload(pkg, metadata) 634 else: 635 proc = EbuildMetadataPhase(cpv=pkg.cpv, 636 ebuild_hash=ebuild_hash, 637 portdb=portdb, repo_path=repo_path, 638 settings=portdb.doebuild_settings) 639 proc.addExitListener( 640 self._dynamic_deps_proc_exit(pkg, fake_vartree)) 641 yield proc
642
643 - class _dynamic_deps_proc_exit(object):
644 645 __slots__ = ('_pkg', '_fake_vartree') 646
647 - def __init__(self, pkg, fake_vartree):
648 self._pkg = pkg 649 self._fake_vartree = fake_vartree
650
651 - def __call__(self, proc):
652 metadata = None 653 if proc.returncode == os.EX_OK: 654 metadata = proc.metadata 655 self._fake_vartree.dynamic_deps_preload(self._pkg, metadata)
656
657 - def _spinner_update(self):
658 if self._frozen_config.spinner: 659 self._frozen_config.spinner.update()
660
661 - def _compute_abi_rebuild_info(self):
662 """ 663 Fill self._forced_rebuilds with packages that cause rebuilds. 664 """ 665 666 debug = "--debug" in self._frozen_config.myopts 667 installed_sonames = self._dynamic_config._installed_sonames 668 package_tracker = self._dynamic_config._package_tracker 669 670 # Get all atoms that might have caused a forced rebuild. 671 atoms = {} 672 for s in self._dynamic_config._initial_arg_list: 673 if s.force_reinstall: 674 root = s.root_config.root 675 atoms.setdefault(root, set()).update(s.pset) 676 677 if debug: 678 writemsg_level("forced reinstall atoms:\n", 679 level=logging.DEBUG, noiselevel=-1) 680 681 for root in atoms: 682 writemsg_level(" root: %s\n" % root, 683 level=logging.DEBUG, noiselevel=-1) 684 for atom in atoms[root]: 685 writemsg_level(" atom: %s\n" % atom, 686 level=logging.DEBUG, noiselevel=-1) 687 writemsg_level("\n\n", 688 level=logging.DEBUG, noiselevel=-1) 689 690 # Go through all slot operator deps and check if one of these deps 691 # has a parent that is matched by one of the atoms from above. 692 forced_rebuilds = {} 693 694 for root, rebuild_atoms in atoms.items(): 695 696 for slot_atom in rebuild_atoms: 697 698 inst_pkg, reinst_pkg = \ 699 self._select_pkg_from_installed(root, slot_atom) 700 701 if inst_pkg is reinst_pkg or reinst_pkg is None: 702 continue 703 704 if (inst_pkg is not None and 705 inst_pkg.requires is not None): 706 for atom in inst_pkg.requires: 707 initial_providers = installed_sonames.get( 708 (root, atom)) 709 if initial_providers is None: 710 continue 711 final_provider = next( 712 package_tracker.match(root, atom), 713 None) 714 if final_provider: 715 continue 716 for provider in initial_providers: 717 # Find the replacement child. 718 child = next((pkg for pkg in 719 package_tracker.match( 720 root, provider.slot_atom) 721 if not pkg.installed), None) 722 723 if child is None: 724 continue 725 726 forced_rebuilds.setdefault( 727 root, {}).setdefault( 728 child, set()).add(inst_pkg) 729 730 # Generate pseudo-deps for any slot-operator deps of 731 # inst_pkg. Its deps aren't in _slot_operator_deps 732 # because it hasn't been added to the graph, but we 733 # are interested in any rebuilds that it triggered. 734 built_slot_op_atoms = [] 735 if inst_pkg is not None: 736 selected_atoms = self._select_atoms_probe( 737 inst_pkg.root, inst_pkg) 738 for atom in selected_atoms: 739 if atom.slot_operator_built: 740 built_slot_op_atoms.append(atom) 741 742 if not built_slot_op_atoms: 743 continue 744 745 # Use a cloned list, since we may append to it below. 746 deps = self._dynamic_config._slot_operator_deps.get( 747 (root, slot_atom), [])[:] 748 749 if built_slot_op_atoms and reinst_pkg is not None: 750 for child in self._dynamic_config.digraph.child_nodes( 751 reinst_pkg): 752 753 if child.installed: 754 continue 755 756 for atom in built_slot_op_atoms: 757 # NOTE: Since atom comes from inst_pkg, and 758 # reinst_pkg is the replacement parent, there's 759 # no guarantee that atom will completely match 760 # child. So, simply use atom.cp and atom.slot 761 # for matching. 762 if atom.cp != child.cp: 763 continue 764 if atom.slot and atom.slot != child.slot: 765 continue 766 deps.append(Dependency(atom=atom, child=child, 767 root=child.root, parent=reinst_pkg)) 768 769 for dep in deps: 770 if dep.child.installed: 771 # Find the replacement child. 772 child = next((pkg for pkg in 773 self._dynamic_config._package_tracker.match( 774 dep.root, dep.child.slot_atom) 775 if not pkg.installed), None) 776 777 if child is None: 778 continue 779 780 inst_child = dep.child 781 782 else: 783 child = dep.child 784 inst_child = self._select_pkg_from_installed( 785 child.root, child.slot_atom)[0] 786 787 # Make sure the child's slot/subslot has changed. If it 788 # hasn't, then another child has forced this rebuild. 789 if inst_child and inst_child.slot == child.slot and \ 790 inst_child.sub_slot == child.sub_slot: 791 continue 792 793 if dep.parent.installed: 794 # Find the replacement parent. 795 parent = next((pkg for pkg in 796 self._dynamic_config._package_tracker.match( 797 dep.parent.root, dep.parent.slot_atom) 798 if not pkg.installed), None) 799 800 if parent is None: 801 continue 802 803 else: 804 parent = dep.parent 805 806 # The child has forced a rebuild of the parent 807 forced_rebuilds.setdefault(root, {} 808 ).setdefault(child, set()).add(parent) 809 810 if debug: 811 writemsg_level("slot operator dependencies:\n", 812 level=logging.DEBUG, noiselevel=-1) 813 814 for (root, slot_atom), deps in self._dynamic_config._slot_operator_deps.items(): 815 writemsg_level(" (%s, %s)\n" % \ 816 (root, slot_atom), level=logging.DEBUG, noiselevel=-1) 817 for dep in deps: 818 writemsg_level(" parent: %s\n" % dep.parent, level=logging.DEBUG, noiselevel=-1) 819 writemsg_level(" child: %s (%s)\n" % (dep.child, dep.priority), level=logging.DEBUG, noiselevel=-1) 820 821 writemsg_level("\n\n", 822 level=logging.DEBUG, noiselevel=-1) 823 824 825 writemsg_level("forced rebuilds:\n", 826 level=logging.DEBUG, noiselevel=-1) 827 828 for root in forced_rebuilds: 829 writemsg_level(" root: %s\n" % root, 830 level=logging.DEBUG, noiselevel=-1) 831 for child in forced_rebuilds[root]: 832 writemsg_level(" child: %s\n" % child, 833 level=logging.DEBUG, noiselevel=-1) 834 for parent in forced_rebuilds[root][child]: 835 writemsg_level(" parent: %s\n" % parent, 836 level=logging.DEBUG, noiselevel=-1) 837 writemsg_level("\n\n", 838 level=logging.DEBUG, noiselevel=-1) 839 840 self._forced_rebuilds = forced_rebuilds
841
842 - def _show_abi_rebuild_info(self):
843 844 if not self._forced_rebuilds: 845 return 846 847 writemsg_stdout("\nThe following packages are causing rebuilds:\n\n", noiselevel=-1) 848 849 for root in self._forced_rebuilds: 850 for child in self._forced_rebuilds[root]: 851 writemsg_stdout(" %s causes rebuilds for:\n" % (child,), noiselevel=-1) 852 for parent in self._forced_rebuilds[root][child]: 853 writemsg_stdout(" %s\n" % (parent,), noiselevel=-1)
854
855 - def _show_ignored_binaries(self):
856 """ 857 Show binaries that have been ignored because their USE didn't 858 match the user's config. 859 """ 860 if not self._dynamic_config.ignored_binaries \ 861 or '--quiet' in self._frozen_config.myopts: 862 return 863 864 ignored_binaries = {} 865 866 for pkg in list(self._dynamic_config.ignored_binaries): 867 868 for selected_pkg in self._dynamic_config._package_tracker.match( 869 pkg.root, pkg.slot_atom): 870 871 if selected_pkg > pkg: 872 self._dynamic_config.ignored_binaries.pop(pkg) 873 break 874 875 if selected_pkg.installed and \ 876 selected_pkg.cpv == pkg.cpv and \ 877 selected_pkg.build_time == pkg.build_time: 878 # We don't care about ignored binaries when an 879 # identical installed instance is selected to 880 # fill the slot. 881 self._dynamic_config.ignored_binaries.pop(pkg) 882 break 883 884 else: 885 for reason, info in self._dynamic_config.\ 886 ignored_binaries[pkg].items(): 887 ignored_binaries.setdefault(reason, {})[pkg] = info 888 889 if self._dynamic_config.myparams.get( 890 "binpkg_respect_use") in ("y", "n"): 891 ignored_binaries.pop("respect_use", None) 892 893 if self._dynamic_config.myparams.get( 894 "binpkg_changed_deps") in ("y", "n"): 895 ignored_binaries.pop("changed_deps", None) 896 897 if not ignored_binaries: 898 return 899 900 self._show_merge_list() 901 902 if ignored_binaries.get("respect_use"): 903 self._show_ignored_binaries_respect_use( 904 ignored_binaries["respect_use"]) 905 906 if ignored_binaries.get("changed_deps"): 907 self._show_ignored_binaries_changed_deps( 908 ignored_binaries["changed_deps"])
909
910 - def _show_ignored_binaries_respect_use(self, respect_use):
911 912 writemsg("\n!!! The following binary packages have been ignored " + \ 913 "due to non matching USE:\n\n", noiselevel=-1) 914 915 for pkg, flags in respect_use.items(): 916 flag_display = [] 917 for flag in sorted(flags): 918 if flag not in pkg.use.enabled: 919 flag = "-" + flag 920 flag_display.append(flag) 921 flag_display = " ".join(flag_display) 922 # The user can paste this line into package.use 923 writemsg(" =%s %s" % (pkg.cpv, flag_display), noiselevel=-1) 924 if pkg.root_config.settings["ROOT"] != "/": 925 writemsg(" # for %s" % (pkg.root,), noiselevel=-1) 926 writemsg("\n", noiselevel=-1) 927 928 msg = [ 929 "", 930 "NOTE: The --binpkg-respect-use=n option will prevent emerge", 931 " from ignoring these binary packages if possible.", 932 " Using --binpkg-respect-use=y will silence this warning." 933 ] 934 935 for line in msg: 936 if line: 937 line = colorize("INFORM", line) 938 writemsg(line + "\n", noiselevel=-1)
939
940 - def _show_ignored_binaries_changed_deps(self, changed_deps):
941 942 writemsg("\n!!! The following binary packages have been " 943 "ignored due to changed dependencies:\n\n", 944 noiselevel=-1) 945 946 for pkg in changed_deps: 947 msg = " %s%s%s" % (pkg.cpv, _repo_separator, pkg.repo) 948 if pkg.root_config.settings["ROOT"] != "/": 949 msg += " for %s" % pkg.root 950 writemsg("%s\n" % msg, noiselevel=-1) 951 952 msg = [ 953 "", 954 "NOTE: The --binpkg-changed-deps=n option will prevent emerge", 955 " from ignoring these binary packages if possible.", 956 " Using --binpkg-changed-deps=y will silence this warning." 957 ] 958 959 for line in msg: 960 if line: 961 line = colorize("INFORM", line) 962 writemsg(line + "\n", noiselevel=-1)
963
964 - def _get_missed_updates(self):
965 966 # In order to minimize noise, show only the highest 967 # missed update from each SLOT. 968 missed_updates = {} 969 for pkg, mask_reasons in \ 970 chain(self._dynamic_config._runtime_pkg_mask.items(), 971 self._dynamic_config._conflict_missed_update.items()): 972 if pkg.installed: 973 # Exclude installed here since we only 974 # want to show available updates. 975 continue 976 missed_update = True 977 any_selected = False 978 for chosen_pkg in self._dynamic_config._package_tracker.match( 979 pkg.root, pkg.slot_atom): 980 any_selected = True 981 if chosen_pkg > pkg or (not chosen_pkg.installed and \ 982 chosen_pkg.version == pkg.version): 983 missed_update = False 984 break 985 if any_selected and missed_update: 986 k = (pkg.root, pkg.slot_atom) 987 if k in missed_updates: 988 other_pkg, mask_type, parent_atoms = missed_updates[k] 989 if other_pkg > pkg: 990 continue 991 for mask_type, parent_atoms in mask_reasons.items(): 992 if not parent_atoms: 993 continue 994 missed_updates[k] = (pkg, mask_type, parent_atoms) 995 break 996 997 return missed_updates
998
999 - def _show_missed_update(self):
1000 1001 missed_updates = self._get_missed_updates() 1002 1003 if not missed_updates: 1004 return 1005 1006 missed_update_types = {} 1007 for pkg, mask_type, parent_atoms in missed_updates.values(): 1008 missed_update_types.setdefault(mask_type, 1009 []).append((pkg, parent_atoms)) 1010 1011 if '--quiet' in self._frozen_config.myopts and \ 1012 '--debug' not in self._frozen_config.myopts: 1013 missed_update_types.pop("slot conflict", None) 1014 missed_update_types.pop("missing dependency", None) 1015 1016 self._show_missed_update_slot_conflicts( 1017 missed_update_types.get("slot conflict")) 1018 1019 self._show_missed_update_unsatisfied_dep( 1020 missed_update_types.get("missing dependency"))
1021
1022 - def _show_missed_update_unsatisfied_dep(self, missed_updates):
1023 1024 if not missed_updates: 1025 return 1026 1027 self._show_merge_list() 1028 backtrack_masked = [] 1029 1030 for pkg, parent_atoms in missed_updates: 1031 1032 try: 1033 for parent, root, atom in parent_atoms: 1034 self._show_unsatisfied_dep(root, atom, myparent=parent, 1035 check_backtrack=True) 1036 except self._backtrack_mask: 1037 # This is displayed below in abbreviated form. 1038 backtrack_masked.append((pkg, parent_atoms)) 1039 continue 1040 1041 writemsg("\n!!! The following update has been skipped " + \ 1042 "due to unsatisfied dependencies:\n\n", noiselevel=-1) 1043 1044 writemsg(str(pkg.slot_atom), noiselevel=-1) 1045 if pkg.root_config.settings["ROOT"] != "/": 1046 writemsg(" for %s" % (pkg.root,), noiselevel=-1) 1047 writemsg("\n", noiselevel=-1) 1048 1049 for parent, root, atom in parent_atoms: 1050 self._show_unsatisfied_dep(root, atom, myparent=parent) 1051 writemsg("\n", noiselevel=-1) 1052 1053 if backtrack_masked: 1054 # These are shown in abbreviated form, in order to avoid terminal 1055 # flooding from mask messages as reported in bug #285832. 1056 writemsg("\n!!! The following update(s) have been skipped " + \ 1057 "due to unsatisfied dependencies\n" + \ 1058 "!!! triggered by backtracking:\n\n", noiselevel=-1) 1059 for pkg, parent_atoms in backtrack_masked: 1060 writemsg(str(pkg.slot_atom), noiselevel=-1) 1061 if pkg.root_config.settings["ROOT"] != "/": 1062 writemsg(" for %s" % (pkg.root,), noiselevel=-1) 1063 writemsg("\n", noiselevel=-1)
1064
1065 - def _show_missed_update_slot_conflicts(self, missed_updates):
1066 1067 if not missed_updates: 1068 return 1069 1070 self._show_merge_list() 1071 msg = [] 1072 msg.append("\nWARNING: One or more updates/rebuilds have been " + \ 1073 "skipped due to a dependency conflict:\n\n") 1074 1075 indent = " " 1076 for pkg, parent_atoms in missed_updates: 1077 msg.append(str(pkg.slot_atom)) 1078 if pkg.root_config.settings["ROOT"] != "/": 1079 msg.append(" for %s" % (pkg.root,)) 1080 msg.append("\n\n") 1081 1082 msg.append(indent) 1083 msg.append(str(pkg)) 1084 msg.append(" conflicts with\n") 1085 1086 for parent, atom in parent_atoms: 1087 if isinstance(parent, 1088 (PackageArg, AtomArg)): 1089 # For PackageArg and AtomArg types, it's 1090 # redundant to display the atom attribute. 1091 msg.append(2*indent) 1092 msg.append(str(parent)) 1093 msg.append("\n") 1094 else: 1095 # Display the specific atom from SetArg or 1096 # Package types. 1097 atom, marker = format_unmatched_atom( 1098 pkg, atom, self._pkg_use_enabled) 1099 1100 msg.append(2*indent) 1101 msg.append("%s required by %s\n" % (atom, parent)) 1102 msg.append(2*indent) 1103 msg.append(marker) 1104 msg.append("\n") 1105 msg.append("\n") 1106 1107 writemsg("".join(msg), noiselevel=-1)
1108
1110 """Show an informational message advising the user to mask one of the 1111 the packages. In some cases it may be possible to resolve this 1112 automatically, but support for backtracking (removal nodes that have 1113 already been selected) will be required in order to handle all possible 1114 cases. 1115 """ 1116 1117 if not any(self._dynamic_config._package_tracker.slot_conflicts()): 1118 return 1119 1120 self._show_merge_list() 1121 1122 self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self) 1123 handler = self._dynamic_config._slot_conflict_handler 1124 1125 conflict = handler.get_conflict() 1126 writemsg(conflict, noiselevel=-1) 1127 1128 explanation = handler.get_explanation() 1129 if explanation: 1130 writemsg(explanation, noiselevel=-1) 1131 return 1132 1133 if "--quiet" in self._frozen_config.myopts: 1134 return 1135 1136 msg = [] 1137 msg.append("It may be possible to solve this problem ") 1138 msg.append("by using package.mask to prevent one of ") 1139 msg.append("those packages from being selected. ") 1140 msg.append("However, it is also possible that conflicting ") 1141 msg.append("dependencies exist such that they are impossible to ") 1142 msg.append("satisfy simultaneously. If such a conflict exists in ") 1143 msg.append("the dependencies of two different packages, then those ") 1144 msg.append("packages can not be installed simultaneously.") 1145 backtrack_opt = self._frozen_config.myopts.get('--backtrack') 1146 if not self._dynamic_config._allow_backtracking and \ 1147 (backtrack_opt is None or \ 1148 (backtrack_opt > 0 and backtrack_opt < 30)): 1149 msg.append(" You may want to try a larger value of the ") 1150 msg.append("--backtrack option, such as --backtrack=30, ") 1151 msg.append("in order to see if that will solve this conflict ") 1152 msg.append("automatically.") 1153 1154 for line in textwrap.wrap(''.join(msg), 70): 1155 writemsg(line + '\n', noiselevel=-1) 1156 writemsg('\n', noiselevel=-1) 1157 1158 msg = [] 1159 msg.append("For more information, see MASKED PACKAGES ") 1160 msg.append("section in the emerge man page or refer ") 1161 msg.append("to the Gentoo Handbook.") 1162 for line in textwrap.wrap(''.join(msg), 70): 1163 writemsg(line + '\n', noiselevel=-1) 1164 writemsg('\n', noiselevel=-1)
1165
1167 """ 1168 This function solves slot conflicts which can 1169 be solved by simply choosing one of the conflicting 1170 and removing all the other ones. 1171 It is able to solve somewhat more complex cases where 1172 conflicts can only be solved simultaniously. 1173 """ 1174 debug = "--debug" in self._frozen_config.myopts 1175 1176 # List all conflicts. Ignore those that involve slot operator rebuilds 1177 # as the logic there needs special slot conflict behavior which isn't 1178 # provided by this function. 1179 conflicts = [] 1180 for conflict in self._dynamic_config._package_tracker.slot_conflicts(): 1181 slot_key = conflict.root, conflict.atom 1182 if slot_key not in self._dynamic_config._slot_operator_replace_installed: 1183 conflicts.append(conflict) 1184 1185 if not conflicts: 1186 return 1187 1188 if debug: 1189 writemsg_level( 1190 "\n!!! Slot conflict handler started.\n", 1191 level=logging.DEBUG, noiselevel=-1) 1192 1193 # Get a set of all conflicting packages. 1194 conflict_pkgs = set() 1195 for conflict in conflicts: 1196 conflict_pkgs.update(conflict) 1197 1198 # Get the list of other packages which are only 1199 # required by conflict packages. 1200 indirect_conflict_candidates = set() 1201 for pkg in conflict_pkgs: 1202 indirect_conflict_candidates.update(self._dynamic_config.digraph.child_nodes(pkg)) 1203 indirect_conflict_candidates.difference_update(conflict_pkgs) 1204 1205 indirect_conflict_pkgs = set() 1206 while indirect_conflict_candidates: 1207 pkg = indirect_conflict_candidates.pop() 1208 1209 only_conflict_parents = True 1210 for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []): 1211 if parent not in conflict_pkgs and parent not in indirect_conflict_pkgs: 1212 only_conflict_parents = False 1213 break 1214 if not only_conflict_parents: 1215 continue 1216 1217 indirect_conflict_pkgs.add(pkg) 1218 for child in self._dynamic_config.digraph.child_nodes(pkg): 1219 if child in conflict_pkgs or child in indirect_conflict_pkgs: 1220 continue 1221 indirect_conflict_candidates.add(child) 1222 1223 # Create a graph containing the conflict packages 1224 # and a special 'non_conflict_node' that represents 1225 # all non-conflict packages. 1226 conflict_graph = digraph() 1227 1228 non_conflict_node = "(non-conflict package)" 1229 conflict_graph.add(non_conflict_node, None) 1230 1231 for pkg in chain(conflict_pkgs, indirect_conflict_pkgs): 1232 conflict_graph.add(pkg, None) 1233 1234 # Add parent->child edges for each conflict package. 1235 # Parents, which aren't conflict packages are represented 1236 # by 'non_conflict_node'. 1237 # If several conflicting packages are matched, but not all, 1238 # add a tuple with the matched packages to the graph. 1239 class or_tuple(tuple): 1240 """ 1241 Helper class for debug printing. 1242 """ 1243 def __str__(self): 1244 return "(%s)" % ",".join(str(pkg) for pkg in self)
1245 1246 non_matching_forced = set() 1247 for conflict in conflicts: 1248 if debug: 1249 writemsg_level(" conflict:\n", level=logging.DEBUG, noiselevel=-1) 1250 writemsg_level(" root: %s\n" % conflict.root, level=logging.DEBUG, noiselevel=-1) 1251 writemsg_level(" atom: %s\n" % conflict.atom, level=logging.DEBUG, noiselevel=-1) 1252 for pkg in conflict: 1253 writemsg_level(" pkg: %s\n" % pkg, level=logging.DEBUG, noiselevel=-1) 1254 1255 all_parent_atoms = set() 1256 highest_pkg = None 1257 inst_pkg = None 1258 for pkg in conflict: 1259 if pkg.installed: 1260 inst_pkg = pkg 1261 if highest_pkg is None or highest_pkg < pkg: 1262 highest_pkg = pkg 1263 all_parent_atoms.update( 1264 self._dynamic_config._parent_atoms.get(pkg, [])) 1265 1266 for parent, atom in all_parent_atoms: 1267 is_arg_parent = isinstance(parent, AtomArg) 1268 is_non_conflict_parent = parent not in conflict_pkgs and \ 1269 parent not in indirect_conflict_pkgs 1270 1271 if debug: 1272 writemsg_level(" parent: %s\n" % parent, level=logging.DEBUG, noiselevel=-1) 1273 writemsg_level(" arg, non-conflict: %s, %s\n" % (is_arg_parent, is_non_conflict_parent), 1274 level=logging.DEBUG, noiselevel=-1) 1275 writemsg_level(" atom: %s\n" % atom, level=logging.DEBUG, noiselevel=-1) 1276 1277 if is_non_conflict_parent: 1278 parent = non_conflict_node 1279 1280 matched = [] 1281 for pkg in conflict: 1282 if (pkg is highest_pkg and 1283 not highest_pkg.installed and 1284 inst_pkg is not None and 1285 inst_pkg.sub_slot != highest_pkg.sub_slot and 1286 not self._downgrade_probe(highest_pkg)): 1287 # If an upgrade is desired, force the highest 1288 # version into the graph (bug #531656). 1289 non_matching_forced.add(highest_pkg) 1290 1291 if atom.match(pkg.with_use( 1292 self._pkg_use_enabled(pkg))) and \ 1293 not (is_arg_parent and pkg.installed): 1294 matched.append(pkg) 1295 1296 if debug: 1297 for match in matched: 1298 writemsg_level(" match: %s\n" % match, level=logging.DEBUG, noiselevel=-1) 1299 1300 if len(matched) > 1: 1301 # Even if all packages match, this parent must still 1302 # be added to the conflict_graph. Otherwise, we risk 1303 # removing all of these packages from the depgraph, 1304 # which could cause a missed update (bug #522084). 1305 conflict_graph.add(or_tuple(matched), parent) 1306 elif len(matched) == 1: 1307 conflict_graph.add(matched[0], parent) 1308 else: 1309 # This typically means that autounmask broke a 1310 # USE-dep, but it could also be due to the slot 1311 # not matching due to multislot (bug #220341). 1312 # Either way, don't try to solve this conflict. 1313 # Instead, force them all into the graph so that 1314 # they are protected from removal. 1315 non_matching_forced.update(conflict) 1316 if debug: 1317 for pkg in conflict: 1318 writemsg_level(" non-match: %s\n" % pkg, 1319 level=logging.DEBUG, noiselevel=-1) 1320 1321 for pkg in indirect_conflict_pkgs: 1322 for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []): 1323 if parent not in conflict_pkgs and \ 1324 parent not in indirect_conflict_pkgs: 1325 parent = non_conflict_node 1326 conflict_graph.add(pkg, parent) 1327 1328 if debug: 1329 writemsg_level( 1330 "\n!!! Slot conflict graph:\n", 1331 level=logging.DEBUG, noiselevel=-1) 1332 conflict_graph.debug_print() 1333 1334 # Now select required packages. Collect them in the 1335 # 'forced' set. 1336 forced = set([non_conflict_node]) 1337 forced.update(non_matching_forced) 1338 unexplored = set([non_conflict_node]) 1339 # or_tuples get special handling. We first explore 1340 # all packages in the hope of having forced one of 1341 # the packages in the tuple. This way we don't have 1342 # to choose one. 1343 unexplored_tuples = set() 1344 explored_nodes = set() 1345 1346 while unexplored: 1347 # Handle all unexplored packages. 1348 while unexplored: 1349 node = unexplored.pop() 1350 for child in conflict_graph.child_nodes(node): 1351 # Don't explore a node more than once, in order 1352 # to avoid infinite recursion. The forced set 1353 # cannot be used for this purpose, since it can 1354 # contain unexplored nodes from non_matching_forced. 1355 if child in explored_nodes: 1356 continue 1357 explored_nodes.add(child) 1358 forced.add(child) 1359 if isinstance(child, Package): 1360 unexplored.add(child) 1361 else: 1362 unexplored_tuples.add(child) 1363 1364 # Now handle unexplored or_tuples. Move on with packages 1365 # once we had to choose one. 1366 while unexplored_tuples: 1367 nodes = unexplored_tuples.pop() 1368 if any(node in forced for node in nodes): 1369 # At least one of the packages in the 1370 # tuple is already forced, which means the 1371 # dependency represented by this tuple 1372 # is satisfied. 1373 continue 1374 1375 # We now have to choose one of packages in the tuple. 1376 # In theory one could solve more conflicts if we'd be 1377 # able to try different choices here, but that has lots 1378 # of other problems. For now choose the package that was 1379 # pulled first, as this should be the most desirable choice 1380 # (otherwise it wouldn't have been the first one). 1381 forced.add(nodes[0]) 1382 unexplored.add(nodes[0]) 1383 break 1384 1385 # Remove 'non_conflict_node' and or_tuples from 'forced'. 1386 forced = set(pkg for pkg in forced if isinstance(pkg, Package)) 1387 non_forced = set(pkg for pkg in conflict_pkgs if pkg not in forced) 1388 1389 if debug: 1390 writemsg_level( 1391 "\n!!! Slot conflict solution:\n", 1392 level=logging.DEBUG, noiselevel=-1) 1393 for conflict in conflicts: 1394 writemsg_level( 1395 " Conflict: (%s, %s)\n" % (conflict.root, conflict.atom), 1396 level=logging.DEBUG, noiselevel=-1) 1397 for pkg in conflict: 1398 if pkg in forced: 1399 writemsg_level( 1400 " keep: %s\n" % pkg, 1401 level=logging.DEBUG, noiselevel=-1) 1402 else: 1403 writemsg_level( 1404 " remove: %s\n" % pkg, 1405 level=logging.DEBUG, noiselevel=-1) 1406 1407 broken_packages = set() 1408 for pkg in non_forced: 1409 for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []): 1410 if isinstance(parent, Package) and parent not in non_forced: 1411 # Non-forcing set args are expected to be a parent of all 1412 # packages in the conflict. 1413 broken_packages.add(parent) 1414 self._remove_pkg(pkg) 1415 1416 # Process the dependencies of choosen conflict packages 1417 # again to properly account for blockers. 1418 broken_packages.update(forced) 1419 1420 # Filter out broken packages which have been removed during 1421 # recursive removal in self._remove_pkg. 1422 broken_packages = list(pkg for pkg in broken_packages if pkg in broken_packages \ 1423 if self._dynamic_config._package_tracker.contains(pkg, installed=False)) 1424 1425 self._dynamic_config._dep_stack.extend(broken_packages) 1426 1427 if broken_packages: 1428 # Process dependencies. This cannot fail because we just ensured that 1429 # the remaining packages satisfy all dependencies. 1430 self._create_graph() 1431 1432 # Record missed updates. 1433 for conflict in conflicts: 1434 if not any(pkg in non_forced for pkg in conflict): 1435 continue 1436 for pkg in conflict: 1437 if pkg not in non_forced: 1438 continue 1439 1440 for other in conflict: 1441 if other is pkg: 1442 continue 1443 1444 for parent, atom in self._dynamic_config._parent_atoms.get(other, []): 1445 atom_set = InternalPackageSet( 1446 initial_atoms=(atom,), allow_repo=True) 1447 if not atom_set.findAtomForPackage(pkg, 1448 modified_use=self._pkg_use_enabled(pkg)): 1449 self._dynamic_config._conflict_missed_update[pkg].setdefault( 1450 "slot conflict", set()) 1451 self._dynamic_config._conflict_missed_update[pkg]["slot conflict"].add( 1452 (parent, atom)) 1453 1454
1455 - def _process_slot_conflicts(self):
1456 """ 1457 If there are any slot conflicts and backtracking is enabled, 1458 _complete_graph should complete the graph before this method 1459 is called, so that all relevant reverse dependencies are 1460 available for use in backtracking decisions. 1461 """ 1462 1463 self._solve_non_slot_operator_slot_conflicts() 1464 1465 for conflict in self._dynamic_config._package_tracker.slot_conflicts(): 1466 self._process_slot_conflict(conflict)
1467
1468 - def _process_slot_conflict(self, conflict):
1469 """ 1470 Process slot conflict data to identify specific atoms which 1471 lead to conflict. These atoms only match a subset of the 1472 packages that have been pulled into a given slot. 1473 """ 1474 root = conflict.root 1475 slot_atom = conflict.atom 1476 slot_nodes = conflict.pkgs 1477 1478 debug = "--debug" in self._frozen_config.myopts 1479 1480 slot_parent_atoms = set() 1481 for pkg in slot_nodes: 1482 parent_atoms = self._dynamic_config._parent_atoms.get(pkg) 1483 if not parent_atoms: 1484 continue 1485 slot_parent_atoms.update(parent_atoms) 1486 1487 conflict_pkgs = [] 1488 conflict_atoms = {} 1489 for pkg in slot_nodes: 1490 1491 if self._dynamic_config._allow_backtracking and \ 1492 pkg in self._dynamic_config._runtime_pkg_mask: 1493 if debug: 1494 writemsg_level( 1495 "!!! backtracking loop detected: %s %s\n" % \ 1496 (pkg, 1497 self._dynamic_config._runtime_pkg_mask[pkg]), 1498 level=logging.DEBUG, noiselevel=-1) 1499 1500 parent_atoms = self._dynamic_config._parent_atoms.get(pkg) 1501 if parent_atoms is None: 1502 parent_atoms = set() 1503 self._dynamic_config._parent_atoms[pkg] = parent_atoms 1504 1505 all_match = True 1506 for parent_atom in slot_parent_atoms: 1507 if parent_atom in parent_atoms: 1508 continue 1509 parent, atom = parent_atom 1510 if atom.match(pkg.with_use(self._pkg_use_enabled(pkg))): 1511 parent_atoms.add(parent_atom) 1512 else: 1513 all_match = False 1514 conflict_atoms.setdefault(parent_atom, set()).add(pkg) 1515 1516 if not all_match: 1517 conflict_pkgs.append(pkg) 1518 1519 if conflict_pkgs and \ 1520 self._dynamic_config._allow_backtracking and \ 1521 not self._accept_blocker_conflicts(): 1522 remaining = [] 1523 for pkg in conflict_pkgs: 1524 if self._slot_conflict_backtrack_abi(pkg, 1525 slot_nodes, conflict_atoms): 1526 backtrack_infos = self._dynamic_config._backtrack_infos 1527 config = backtrack_infos.setdefault("config", {}) 1528 config.setdefault("slot_conflict_abi", set()).add(pkg) 1529 else: 1530 remaining.append(pkg) 1531 if remaining: 1532 self._slot_confict_backtrack(root, slot_atom, 1533 slot_parent_atoms, remaining)
1534
1535 - def _slot_confict_backtrack(self, root, slot_atom, 1536 all_parents, conflict_pkgs):
1537 1538 debug = "--debug" in self._frozen_config.myopts 1539 existing_node = next(self._dynamic_config._package_tracker.match( 1540 root, slot_atom, installed=False)) 1541 # In order to avoid a missed update, first mask lower versions 1542 # that conflict with higher versions (the backtracker visits 1543 # these in reverse order). 1544 conflict_pkgs.sort(reverse=True) 1545 backtrack_data = [] 1546 for to_be_masked in conflict_pkgs: 1547 # For missed update messages, find out which 1548 # atoms matched to_be_selected that did not 1549 # match to_be_masked. 1550 parent_atoms = \ 1551 self._dynamic_config._parent_atoms.get(to_be_masked, set()) 1552 conflict_atoms = set(parent_atom for parent_atom in all_parents \ 1553 if parent_atom not in parent_atoms) 1554 backtrack_data.append((to_be_masked, conflict_atoms)) 1555 1556 to_be_masked = backtrack_data[-1][0] 1557 1558 self._dynamic_config._backtrack_infos.setdefault( 1559 "slot conflict", []).append(backtrack_data) 1560 self._dynamic_config._need_restart = True 1561 if debug: 1562 msg = [] 1563 msg.append("") 1564 msg.append("") 1565 msg.append("backtracking due to slot conflict:") 1566 msg.append(" first package: %s" % existing_node) 1567 msg.append(" package to mask: %s" % to_be_masked) 1568 msg.append(" slot: %s" % slot_atom) 1569 msg.append(" parents: %s" % ", ".join( \ 1570 "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents)) 1571 msg.append("") 1572 writemsg_level("".join("%s\n" % l for l in msg), 1573 noiselevel=-1, level=logging.DEBUG)
1574
1575 - def _slot_conflict_backtrack_abi(self, pkg, slot_nodes, conflict_atoms):
1576 """ 1577 If one or more conflict atoms have a slot/sub-slot dep that can be resolved 1578 by rebuilding the parent package, then schedule the rebuild via 1579 backtracking, and return True. Otherwise, return False. 1580 """ 1581 1582 found_update = False 1583 for parent_atom, conflict_pkgs in conflict_atoms.items(): 1584 parent, atom = parent_atom 1585 1586 if not isinstance(parent, Package): 1587 continue 1588 1589 if not parent.built: 1590 continue 1591 1592 if not atom.soname and not ( 1593 atom.package and atom.slot_operator_built): 1594 continue 1595 1596 if pkg not in conflict_pkgs: 1597 continue 1598 1599 for other_pkg in slot_nodes: 1600 if other_pkg in conflict_pkgs: 1601 continue 1602 1603 dep = Dependency(atom=atom, child=other_pkg, 1604 parent=parent, root=pkg.root) 1605 1606 new_dep = \ 1607 self._slot_operator_update_probe_slot_conflict(dep) 1608 if new_dep is not None: 1609 self._slot_operator_update_backtrack(dep, 1610 new_dep=new_dep) 1611 found_update = True 1612 1613 return found_update
1614
1615 - def _slot_change_probe(self, dep):
1616 """ 1617 @rtype: bool 1618 @return: True if dep.child should be rebuilt due to a change 1619 in sub-slot (without revbump, as in bug #456208). 1620 """ 1621 if not (isinstance(dep.parent, Package) and \ 1622 not dep.parent.built and dep.child.built): 1623 return None 1624 1625 root_config = self._frozen_config.roots[dep.root] 1626 matches = [] 1627 try: 1628 matches.append(self._pkg(dep.child.cpv, "ebuild", 1629 root_config, myrepo=dep.child.repo)) 1630 except PackageNotFound: 1631 pass 1632 1633 for unbuilt_child in chain(matches, 1634 self._iter_match_pkgs(root_config, "ebuild", 1635 Atom("=%s" % (dep.child.cpv,)))): 1636 if unbuilt_child in self._dynamic_config._runtime_pkg_mask: 1637 continue 1638 if self._frozen_config.excluded_pkgs.findAtomForPackage( 1639 unbuilt_child, 1640 modified_use=self._pkg_use_enabled(unbuilt_child)): 1641 continue 1642 if not self._pkg_visibility_check(unbuilt_child): 1643 continue 1644 break 1645 else: 1646 return None 1647 1648 if unbuilt_child.slot == dep.child.slot and \ 1649 unbuilt_child.sub_slot == dep.child.sub_slot: 1650 return None 1651 1652 return unbuilt_child
1653
1654 - def _slot_change_backtrack(self, dep, new_child_slot):
1655 child = dep.child 1656 if "--debug" in self._frozen_config.myopts: 1657 msg = [] 1658 msg.append("") 1659 msg.append("") 1660 msg.append("backtracking due to slot/sub-slot change:") 1661 msg.append(" child package: %s" % child) 1662 msg.append(" child slot: %s/%s" % 1663 (child.slot, child.sub_slot)) 1664 msg.append(" new child: %s" % new_child_slot) 1665 msg.append(" new child slot: %s/%s" % 1666 (new_child_slot.slot, new_child_slot.sub_slot)) 1667 msg.append(" parent package: %s" % dep.parent) 1668 msg.append(" atom: %s" % dep.atom) 1669 msg.append("") 1670 writemsg_level("\n".join(msg), 1671 noiselevel=-1, level=logging.DEBUG) 1672 backtrack_infos = self._dynamic_config._backtrack_infos 1673 config = backtrack_infos.setdefault("config", {}) 1674 1675 # mask unwanted binary packages if necessary 1676 masks = {} 1677 if not child.installed: 1678 masks.setdefault(dep.child, {})["slot_operator_mask_built"] = None 1679 if masks: 1680 config.setdefault("slot_operator_mask_built", {}).update(masks) 1681 1682 # trigger replacement of installed packages if necessary 1683 reinstalls = set() 1684 if child.installed: 1685 replacement_atom = self._replace_installed_atom(child) 1686 if replacement_atom is not None: 1687 reinstalls.add((child.root, replacement_atom)) 1688 if reinstalls: 1689 config.setdefault("slot_operator_replace_installed", 1690 set()).update(reinstalls) 1691 1692 self._dynamic_config._need_restart = True
1693
1694 - def _slot_operator_update_backtrack(self, dep, new_child_slot=None, 1695 new_dep=None):
1696 if new_child_slot is None: 1697 child = dep.child 1698 else: 1699 child = new_child_slot 1700 if "--debug" in self._frozen_config.myopts: 1701 msg = [] 1702 msg.append("") 1703 msg.append("") 1704 msg.append("backtracking due to missed slot abi update:") 1705 msg.append(" child package: %s" % child) 1706 if new_child_slot is not None: 1707 msg.append(" new child slot package: %s" % new_child_slot) 1708 msg.append(" parent package: %s" % dep.parent) 1709 if new_dep is not None: 1710 msg.append(" new parent pkg: %s" % new_dep.parent) 1711 msg.append(" atom: %s" % dep.atom) 1712 msg.append("") 1713 writemsg_level("\n".join(msg), 1714 noiselevel=-1, level=logging.DEBUG) 1715 backtrack_infos = self._dynamic_config._backtrack_infos 1716 config = backtrack_infos.setdefault("config", {}) 1717 1718 # mask unwanted binary packages if necessary 1719 abi_masks = {} 1720 if new_child_slot is None: 1721 if not child.installed: 1722 abi_masks.setdefault(child, {})["slot_operator_mask_built"] = None 1723 if not dep.parent.installed: 1724 abi_masks.setdefault(dep.parent, {})["slot_operator_mask_built"] = None 1725 if abi_masks: 1726 config.setdefault("slot_operator_mask_built", {}).update(abi_masks) 1727 1728 # trigger replacement of installed packages if necessary 1729 abi_reinstalls = set() 1730 if dep.parent.installed: 1731 if new_dep is not None: 1732 replacement_atom = new_dep.parent.slot_atom 1733 else: 1734 replacement_atom = self._replace_installed_atom(dep.parent) 1735 if replacement_atom is not None: 1736 abi_reinstalls.add((dep.parent.root, replacement_atom)) 1737 if new_child_slot is None and child.installed: 1738 replacement_atom = self._replace_installed_atom(child) 1739 if replacement_atom is not None: 1740 abi_reinstalls.add((child.root, replacement_atom)) 1741 if abi_reinstalls: 1742 config.setdefault("slot_operator_replace_installed", 1743 set()).update(abi_reinstalls) 1744 1745 self._dynamic_config._need_restart = True
1746
1747 - def _slot_operator_update_probe_slot_conflict(self, dep):
1748 new_dep = self._slot_operator_update_probe(dep, slot_conflict=True) 1749 1750 if new_dep is not None: 1751 return new_dep 1752 1753 if self._dynamic_config._autounmask is True: 1754 1755 for autounmask_level in self._autounmask_levels(): 1756 1757 new_dep = self._slot_operator_update_probe(dep, 1758 slot_conflict=True, autounmask_level=autounmask_level) 1759 1760 if new_dep is not None: 1761 return new_dep 1762 1763 return None
1764
1765 - def _slot_operator_update_probe(self, dep, new_child_slot=False, 1766 slot_conflict=False, autounmask_level=None):
1767 """ 1768 slot/sub-slot := operators tend to prevent updates from getting pulled in, 1769 since installed packages pull in packages with the slot/sub-slot that they 1770 were built against. Detect this case so that we can schedule rebuilds 1771 and reinstalls when appropriate. 1772 NOTE: This function only searches for updates that involve upgrades 1773 to higher versions, since the logic required to detect when a 1774 downgrade would be desirable is not implemented. 1775 """ 1776 1777 if dep.child.installed and \ 1778 self._frozen_config.excluded_pkgs.findAtomForPackage(dep.child, 1779 modified_use=self._pkg_use_enabled(dep.child)): 1780 return None 1781 1782 if dep.parent.installed and \ 1783 self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent, 1784 modified_use=self._pkg_use_enabled(dep.parent)): 1785 return None 1786 1787 debug = "--debug" in self._frozen_config.myopts 1788 selective = "selective" in self._dynamic_config.myparams 1789 want_downgrade = None 1790 want_downgrade_parent = None 1791 1792 def check_reverse_dependencies(existing_pkg, candidate_pkg, 1793 replacement_parent=None): 1794 """ 1795 Check if candidate_pkg satisfies all of existing_pkg's non- 1796 slot operator parents. 1797 """ 1798 built_slot_operator_parents = set() 1799 for parent, atom in self._dynamic_config._parent_atoms.get(existing_pkg, []): 1800 if atom.soname or atom.slot_operator_built: 1801 built_slot_operator_parents.add(parent) 1802 1803 for parent, atom in self._dynamic_config._parent_atoms.get(existing_pkg, []): 1804 if isinstance(parent, Package): 1805 if parent in built_slot_operator_parents: 1806 # This parent may need to be rebuilt, so its 1807 # dependencies aren't necessarily relevant. 1808 continue 1809 1810 if replacement_parent is not None and \ 1811 (replacement_parent.slot_atom == parent.slot_atom 1812 or replacement_parent.cpv == parent.cpv): 1813 # This parent is irrelevant because we intend to 1814 # replace it with replacement_parent. 1815 continue 1816 1817 if any(pkg is not parent and 1818 (pkg.slot_atom == parent.slot_atom or 1819 pkg.cpv == parent.cpv) for pkg in 1820 self._dynamic_config._package_tracker.match( 1821 parent.root, Atom(parent.cp))): 1822 # This parent may need to be eliminated due to a 1823 # slot conflict, so its dependencies aren't 1824 # necessarily relevant. 1825 continue 1826 1827 if (not self._too_deep(parent.depth) and 1828 not self._frozen_config.excluded_pkgs. 1829 findAtomForPackage(parent, 1830 modified_use=self._pkg_use_enabled(parent)) and 1831 self._upgrade_available(parent)): 1832 # This parent may be irrelevant, since an 1833 # update is available (see bug 584626). 1834 continue 1835 1836 atom_set = InternalPackageSet(initial_atoms=(atom,), 1837 allow_repo=True) 1838 if not atom_set.findAtomForPackage(candidate_pkg, 1839 modified_use=self._pkg_use_enabled(candidate_pkg)): 1840 return False 1841 return True
1842 1843 1844 for replacement_parent in self._iter_similar_available(dep.parent, 1845 dep.parent.slot_atom, autounmask_level=autounmask_level): 1846 1847 if replacement_parent is dep.parent: 1848 continue 1849 1850 if replacement_parent < dep.parent: 1851 if want_downgrade_parent is None: 1852 want_downgrade_parent = self._downgrade_probe( 1853 dep.parent) 1854 if not want_downgrade_parent: 1855 continue 1856 1857 if not check_reverse_dependencies(dep.parent, replacement_parent): 1858 continue 1859 1860 selected_atoms = None 1861 1862 try: 1863 atoms = self._flatten_atoms(replacement_parent, 1864 self._pkg_use_enabled(replacement_parent)) 1865 except InvalidDependString: 1866 continue 1867 1868 if replacement_parent.requires is not None: 1869 atoms = list(atoms) 1870 atoms.extend(replacement_parent.requires) 1871 1872 # List of list of child,atom pairs for each atom. 1873 replacement_candidates = [] 1874 # Set of all packages all atoms can agree on. 1875 all_candidate_pkgs = None 1876 1877 for atom in atoms: 1878 atom_not_selected = False 1879 1880 if not atom.package: 1881 unevaluated_atom = None 1882 if atom.match(dep.child): 1883 # We are searching for a replacement_parent 1884 # atom that will pull in a different child, 1885 # so continue checking the rest of the atoms. 1886 continue 1887 else: 1888 1889 if atom.blocker or \ 1890 atom.cp != dep.child.cp: 1891 continue 1892 1893 # Discard USE deps, we're only searching for an 1894 # approximate pattern, and dealing with USE states 1895 # is too complex for this purpose. 1896 unevaluated_atom = atom.unevaluated_atom 1897 atom = atom.without_use 1898 1899 if replacement_parent.built and \ 1900 portage.dep._match_slot(atom, dep.child): 1901 # We are searching for a replacement_parent 1902 # atom that will pull in a different child, 1903 # so continue checking the rest of the atoms. 1904 continue 1905 1906 candidate_pkg_atoms = [] 1907 candidate_pkgs = [] 1908 for pkg in self._iter_similar_available( 1909 dep.child, atom): 1910 if (dep.atom.package and 1911 pkg.slot == dep.child.slot and 1912 pkg.sub_slot == dep.child.sub_slot)