Package _emerge :: Module depgraph
[hide private]

Source Code for Module _emerge.depgraph

   1  # Copyright 1999-2017 Gentoo Foundation 
   2  # Distributed under the terms of the GNU General Public License v2 
   3   
   4  from __future__ import division, print_function, unicode_literals 
   5   
   6  import collections 
   7  import errno 
   8  import functools 
   9  import io 
  10  import logging 
  11  import stat 
  12  import sys 
  13  import textwrap 
  14  import warnings 
  15  from collections import deque 
  16  from itertools import chain 
  17   
  18  import portage 
  19  from portage import os, OrderedDict 
  20  from portage import _unicode_decode, _unicode_encode, _encodings 
  21  from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH, VCS_DIRS 
  22  from portage.dbapi import dbapi 
  23  from portage.dbapi.dep_expand import dep_expand 
  24  from portage.dbapi.DummyTree import DummyTree 
  25  from portage.dbapi.IndexedPortdb import IndexedPortdb 
  26  from portage.dbapi._similar_name_search import similar_name_search 
  27  from portage.dep import Atom, best_match_to_list, extract_affecting_use, \ 
  28          check_required_use, human_readable_required_use, match_from_list, \ 
  29          _repo_separator 
  30  from portage.dep._slot_operator import (ignore_built_slot_operator_deps, 
  31          strip_slots) 
  32  from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use, \ 
  33          _get_eapi_attrs 
  34  from portage.exception import (InvalidAtom, InvalidData, InvalidDependString, 
  35          PackageNotFound, PortageException) 
  36  from portage.localization import _ 
  37  from portage.output import colorize, create_color_func, \ 
  38          darkgreen, green 
  39  bad = create_color_func("BAD") 
  40  from portage.package.ebuild.config import _get_feature_flags 
  41  from portage.package.ebuild.getmaskingstatus import \ 
  42          _getmaskingstatus, _MaskReason 
  43  from portage._sets import SETPREFIX 
  44  from portage._sets.base import InternalPackageSet 
  45  from portage.util import ConfigProtect, shlex_split, new_protect_filename 
  46  from portage.util import cmp_sort_key, writemsg, writemsg_stdout 
  47  from portage.util import ensure_dirs 
  48  from portage.util import writemsg_level, write_atomic 
  49  from portage.util.digraph import digraph 
  50  from portage.util._async.TaskScheduler import TaskScheduler 
  51  from portage.util._eventloop.EventLoop import EventLoop 
  52  from portage.util._eventloop.global_event_loop import global_event_loop 
  53  from portage.versions import catpkgsplit 
  54   
  55  from _emerge.AtomArg import AtomArg 
  56  from _emerge.Blocker import Blocker 
  57  from _emerge.BlockerCache import BlockerCache 
  58  from _emerge.BlockerDepPriority import BlockerDepPriority 
  59  from .chk_updated_cfg_files import chk_updated_cfg_files 
  60  from _emerge.countdown import countdown 
  61  from _emerge.create_world_atom import create_world_atom 
  62  from _emerge.Dependency import Dependency 
  63  from _emerge.DependencyArg import DependencyArg 
  64  from _emerge.DepPriority import DepPriority 
  65  from _emerge.DepPriorityNormalRange import DepPriorityNormalRange 
  66  from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange 
  67  from _emerge.EbuildMetadataPhase import EbuildMetadataPhase 
  68  from _emerge.FakeVartree import FakeVartree 
  69  from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps 
  70  from _emerge.is_valid_package_atom import insert_category_into_atom, \ 
  71          is_valid_package_atom 
  72  from _emerge.Package import Package 
  73  from _emerge.PackageArg import PackageArg 
  74  from _emerge.PackageVirtualDbapi import PackageVirtualDbapi 
  75  from _emerge.RootConfig import RootConfig 
  76  from _emerge.search import search 
  77  from _emerge.SetArg import SetArg 
  78  from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice 
  79  from _emerge.UnmergeDepPriority import UnmergeDepPriority 
  80  from _emerge.UseFlagDisplay import pkg_use_display 
  81  from _emerge.UserQuery import UserQuery 
  82   
  83  from _emerge.resolver.backtracking import Backtracker, BacktrackParameter 
  84  from _emerge.resolver.DbapiProvidesIndex import DbapiProvidesIndex 
  85  from _emerge.resolver.package_tracker import PackageTracker, PackageTrackerDbapiWrapper 
  86  from _emerge.resolver.slot_collision import slot_conflict_handler 
  87  from _emerge.resolver.circular_dependency import circular_dependency_handler 
  88  from _emerge.resolver.output import Display, format_unmatched_atom 
  89   
  90  if sys.hexversion >= 0x3000000: 
  91          basestring = str 
  92          long = int 
  93          _unicode = str 
  94  else: 
  95          _unicode = unicode 
  96   
97 -class _scheduler_graph_config(object):
98 - def __init__(self, trees, pkg_cache, graph, mergelist):
99 self.trees = trees 100 self.pkg_cache = pkg_cache 101 self.graph = graph 102 self.mergelist = mergelist
103
104 -def _wildcard_set(atoms):
105 pkgs = InternalPackageSet(allow_wildcard=True) 106 for x in atoms: 107 try: 108 x = Atom(x, allow_wildcard=True, allow_repo=False) 109 except portage.exception.InvalidAtom: 110 x = Atom("*/" + x, allow_wildcard=True, allow_repo=False) 111 pkgs.add(x) 112 return pkgs
113
114 -class _frozen_depgraph_config(object):
115
116 - def __init__(self, settings, trees, myopts, params, spinner):
117 self.settings = settings 118 self.target_root = settings["EROOT"] 119 self.myopts = myopts 120 self.edebug = 0 121 if settings.get("PORTAGE_DEBUG", "") == "1": 122 self.edebug = 1 123 self.spinner = spinner 124 self.requested_depth = params.get("deep", 0) 125 self._running_root = trees[trees._running_eroot]["root_config"] 126 self.pkgsettings = {} 127 self.trees = {} 128 self._trees_orig = trees 129 self.roots = {} 130 # All Package instances 131 self._pkg_cache = {} 132 self._highest_license_masked = {} 133 # We can't know that an soname dep is unsatisfied if there are 134 # any unbuilt ebuilds in the graph, since unbuilt ebuilds have 135 # no soname data. Therefore, only enable soname dependency 136 # resolution if --usepkgonly is enabled, or for removal actions. 137 self.soname_deps_enabled = ( 138 ("--usepkgonly" in myopts or "remove" in params) and 139 params.get("ignore_soname_deps") != "y") 140 dynamic_deps = myopts.get("--dynamic-deps", "y") != "n" 141 ignore_built_slot_operator_deps = myopts.get( 142 "--ignore-built-slot-operator-deps", "n") == "y" 143 for myroot in trees: 144 self.trees[myroot] = {} 145 # Create a RootConfig instance that references 146 # the FakeVartree instead of the real one. 147 self.roots[myroot] = RootConfig( 148 trees[myroot]["vartree"].settings, 149 self.trees[myroot], 150 trees[myroot]["root_config"].setconfig) 151 for tree in ("porttree", "bintree"): 152 self.trees[myroot][tree] = trees[myroot][tree] 153 self.trees[myroot]["vartree"] = \ 154 FakeVartree(trees[myroot]["root_config"], 155 pkg_cache=self._pkg_cache, 156 pkg_root_config=self.roots[myroot], 157 dynamic_deps=dynamic_deps, 158 ignore_built_slot_operator_deps=ignore_built_slot_operator_deps, 159 soname_deps=self.soname_deps_enabled) 160 self.pkgsettings[myroot] = portage.config( 161 clone=self.trees[myroot]["vartree"].settings) 162 if self.soname_deps_enabled and "remove" not in params: 163 self.trees[myroot]["bintree"] = DummyTree( 164 DbapiProvidesIndex(trees[myroot]["bintree"].dbapi)) 165 166 self._required_set_names = set(["world"]) 167 168 atoms = ' '.join(myopts.get("--exclude", [])).split() 169 self.excluded_pkgs = _wildcard_set(atoms) 170 atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split() 171 self.reinstall_atoms = _wildcard_set(atoms) 172 atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split() 173 self.usepkg_exclude = _wildcard_set(atoms) 174 atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split() 175 self.useoldpkg_atoms = _wildcard_set(atoms) 176 atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split() 177 self.rebuild_exclude = _wildcard_set(atoms) 178 atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split() 179 self.rebuild_ignore = _wildcard_set(atoms) 180 181 self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts 182 self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts 183 self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
184
185 -class _depgraph_sets(object):
186 - def __init__(self):
187 # contains all sets added to the graph 188 self.sets = {} 189 # contains non-set atoms given as arguments 190 self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True) 191 # contains all atoms from all sets added to the graph, including 192 # atoms given as arguments 193 self.atoms = InternalPackageSet(allow_repo=True) 194 self.atom_arg_map = {}
195
196 -class _rebuild_config(object):
197 - def __init__(self, frozen_config, backtrack_parameters):
198 self._graph = digraph() 199 self._frozen_config = frozen_config 200 self.rebuild_list = backtrack_parameters.rebuild_list.copy() 201 self.orig_rebuild_list = self.rebuild_list.copy() 202 self.reinstall_list = backtrack_parameters.reinstall_list.copy() 203 self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev 204 self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver 205 self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt 206 self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or 207 self.rebuild_if_unbuilt)
208
209 - def add(self, dep_pkg, dep):
210 parent = dep.collapsed_parent 211 priority = dep.collapsed_priority 212 rebuild_exclude = self._frozen_config.rebuild_exclude 213 rebuild_ignore = self._frozen_config.rebuild_ignore 214 if (self.rebuild and isinstance(parent, Package) and 215 parent.built and priority.buildtime and 216 isinstance(dep_pkg, Package) and 217 not rebuild_exclude.findAtomForPackage(parent) and 218 not rebuild_ignore.findAtomForPackage(dep_pkg)): 219 self._graph.add(dep_pkg, parent, priority)
220
221 - def _needs_rebuild(self, dep_pkg):
222 """Check whether packages that depend on dep_pkg need to be rebuilt.""" 223 dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom) 224 if dep_pkg.built or dep_root_slot in self.orig_rebuild_list: 225 return False 226 227 if self.rebuild_if_unbuilt: 228 # dep_pkg is being installed from source, so binary 229 # packages for parents are invalid. Force rebuild 230 return True 231 232 trees = self._frozen_config.trees 233 vardb = trees[dep_pkg.root]["vartree"].dbapi 234 if self.rebuild_if_new_rev: 235 # Parent packages are valid if a package with the same 236 # cpv is already installed. 237 return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom) 238 239 # Otherwise, parent packages are valid if a package with the same 240 # version (excluding revision) is already installed. 241 assert self.rebuild_if_new_ver 242 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1] 243 for inst_cpv in vardb.match(dep_pkg.slot_atom): 244 inst_cpv_norev = catpkgsplit(inst_cpv)[:-1] 245 if inst_cpv_norev == cpv_norev: 246 return False 247 248 return True
249
250 - def _trigger_rebuild(self, parent, build_deps):
251 root_slot = (parent.root, parent.slot_atom) 252 if root_slot in self.rebuild_list: 253 return False 254 trees = self._frozen_config.trees 255 reinstall = False 256 for slot_atom, dep_pkg in build_deps.items(): 257 dep_root_slot = (dep_pkg.root, slot_atom) 258 if self._needs_rebuild(dep_pkg): 259 self.rebuild_list.add(root_slot) 260 return True 261 elif ("--usepkg" in self._frozen_config.myopts and 262 (dep_root_slot in self.reinstall_list or 263 dep_root_slot in self.rebuild_list or 264 not dep_pkg.installed)): 265 266 # A direct rebuild dependency is being installed. We 267 # should update the parent as well to the latest binary, 268 # if that binary is valid. 269 # 270 # To validate the binary, we check whether all of the 271 # rebuild dependencies are present on the same binhost. 272 # 273 # 1) If parent is present on the binhost, but one of its 274 # rebuild dependencies is not, then the parent should 275 # be rebuilt from source. 276 # 2) Otherwise, the parent binary is assumed to be valid, 277 # because all of its rebuild dependencies are 278 # consistent. 279 bintree = trees[parent.root]["bintree"] 280 uri = bintree.get_pkgindex_uri(parent.cpv) 281 dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv) 282 bindb = bintree.dbapi 283 if self.rebuild_if_new_ver and uri and uri != dep_uri: 284 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1] 285 for cpv in bindb.match(dep_pkg.slot_atom): 286 if cpv_norev == catpkgsplit(cpv)[:-1]: 287 dep_uri = bintree.get_pkgindex_uri(cpv) 288 if uri == dep_uri: 289 break 290 if uri and uri != dep_uri: 291 # 1) Remote binary package is invalid because it was 292 # built without dep_pkg. Force rebuild. 293 self.rebuild_list.add(root_slot) 294 return True 295 elif (parent.installed and 296 root_slot not in self.reinstall_list): 297 try: 298 bin_build_time, = bindb.aux_get(parent.cpv, 299 ["BUILD_TIME"]) 300 except KeyError: 301 continue 302 if bin_build_time != _unicode(parent.build_time): 303 # 2) Remote binary package is valid, and local package 304 # is not up to date. Force reinstall. 305 reinstall = True 306 if reinstall: 307 self.reinstall_list.add(root_slot) 308 return reinstall
309
310 - def trigger_rebuilds(self):
311 """ 312 Trigger rebuilds where necessary. If pkgA has been updated, and pkgB 313 depends on pkgA at both build-time and run-time, pkgB needs to be 314 rebuilt. 315 """ 316 need_restart = False 317 graph = self._graph 318 build_deps = {} 319 320 leaf_nodes = deque(graph.leaf_nodes()) 321 322 # Trigger rebuilds bottom-up (starting with the leaves) so that parents 323 # will always know which children are being rebuilt. 324 while graph: 325 if not leaf_nodes: 326 # We'll have to drop an edge. This should be quite rare. 327 leaf_nodes.append(graph.order[-1]) 328 329 node = leaf_nodes.popleft() 330 if node not in graph: 331 # This can be triggered by circular dependencies. 332 continue 333 slot_atom = node.slot_atom 334 335 # Remove our leaf node from the graph, keeping track of deps. 336 parents = graph.parent_nodes(node) 337 graph.remove(node) 338 node_build_deps = build_deps.get(node, {}) 339 for parent in parents: 340 if parent == node: 341 # Ignore a direct cycle. 342 continue 343 parent_bdeps = build_deps.setdefault(parent, {}) 344 parent_bdeps[slot_atom] = node 345 if not graph.child_nodes(parent): 346 leaf_nodes.append(parent) 347 348 # Trigger rebuilds for our leaf node. Because all of our children 349 # have been processed, the build_deps will be completely filled in, 350 # and self.rebuild_list / self.reinstall_list will tell us whether 351 # any of our children need to be rebuilt or reinstalled. 352 if self._trigger_rebuild(node, node_build_deps): 353 need_restart = True 354 355 return need_restart
356 357
358 -class _dynamic_depgraph_config(object):
359
360 - def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
361 self.myparams = myparams.copy() 362 self._vdb_loaded = False 363 self._allow_backtracking = allow_backtracking 364 # Maps nodes to the reasons they were selected for reinstallation. 365 self._reinstall_nodes = {} 366 # Contains a filtered view of preferred packages that are selected 367 # from available repositories. 368 self._filtered_trees = {} 369 # Contains installed packages and new packages that have been added 370 # to the graph. 371 self._graph_trees = {} 372 # Caches visible packages returned from _select_package, for use in 373 # depgraph._iter_atoms_for_pkg() SLOT logic. 374 self._visible_pkgs = {} 375 #contains the args created by select_files 376 self._initial_arg_list = [] 377 self.digraph = portage.digraph() 378 # manages sets added to the graph 379 self.sets = {} 380 # contains all nodes pulled in by self.sets 381 self._set_nodes = set() 382 # Contains only Blocker -> Uninstall edges 383 self._blocker_uninstalls = digraph() 384 # Contains only Package -> Blocker edges 385 self._blocker_parents = digraph() 386 # Contains only irrelevant Package -> Blocker edges 387 self._irrelevant_blockers = digraph() 388 # Contains only unsolvable Package -> Blocker edges 389 self._unsolvable_blockers = digraph() 390 # Contains all Blocker -> Blocked Package edges 391 # Do not initialize this until the depgraph _validate_blockers 392 # method is called, so that the _in_blocker_conflict method can 393 # assert that _validate_blockers has been called first. 394 self._blocked_pkgs = None 395 # Contains world packages that have been protected from 396 # uninstallation but may not have been added to the graph 397 # if the graph is not complete yet. 398 self._blocked_world_pkgs = {} 399 # Contains packages whose dependencies have been traversed. 400 # This use used to check if we have accounted for blockers 401 # relevant to a package. 402 self._traversed_pkg_deps = set() 403 self._parent_atoms = {} 404 self._slot_conflict_handler = None 405 self._circular_dependency_handler = None 406 self._serialized_tasks_cache = None 407 self._scheduler_graph = None 408 self._displayed_list = None 409 self._pprovided_args = [] 410 self._missing_args = [] 411 self._masked_installed = set() 412 self._masked_license_updates = set() 413 self._unsatisfied_deps_for_display = [] 414 self._unsatisfied_blockers_for_display = None 415 self._circular_deps_for_display = None 416 self._dep_stack = [] 417 self._dep_disjunctive_stack = [] 418 self._unsatisfied_deps = [] 419 self._initially_unsatisfied_deps = [] 420 self._ignored_deps = [] 421 self._highest_pkg_cache = {} 422 self._highest_pkg_cache_cp_map = {} 423 self._flatten_atoms_cache = {} 424 425 # Binary packages that have been rejected because their USE 426 # didn't match the user's config. It maps packages to a set 427 # of flags causing the rejection. 428 self.ignored_binaries = {} 429 430 self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords 431 self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes 432 self._needed_license_changes = backtrack_parameters.needed_license_changes 433 self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes 434 self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask 435 self._slot_operator_replace_installed = backtrack_parameters.slot_operator_replace_installed 436 self._prune_rebuilds = backtrack_parameters.prune_rebuilds 437 self._need_restart = False 438 self._need_config_reload = False 439 # For conditions that always require user intervention, such as 440 # unsatisfied REQUIRED_USE (currently has no autounmask support). 441 self._skip_restart = False 442 self._backtrack_infos = {} 443 444 self._buildpkgonly_deps_unsatisfied = False 445 self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n' 446 self._displayed_autounmask = False 447 self._success_without_autounmask = False 448 self._autounmask_backtrack_disabled = False 449 self._required_use_unsatisfied = False 450 self._traverse_ignored_deps = False 451 self._complete_mode = False 452 self._slot_operator_deps = {} 453 self._installed_sonames = collections.defaultdict(list) 454 self._package_tracker = PackageTracker( 455 soname_deps=depgraph._frozen_config.soname_deps_enabled) 456 # Track missed updates caused by solved conflicts. 457 self._conflict_missed_update = collections.defaultdict(dict) 458 459 for myroot in depgraph._frozen_config.trees: 460 self.sets[myroot] = _depgraph_sets() 461 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi 462 # This dbapi instance will model the state that the vdb will 463 # have after new packages have been installed. 464 fakedb = PackageTrackerDbapiWrapper(myroot, self._package_tracker) 465 466 def graph_tree(): 467 pass
468 graph_tree.dbapi = fakedb 469 self._graph_trees[myroot] = {} 470 self._filtered_trees[myroot] = {} 471 # Substitute the graph tree for the vartree in dep_check() since we 472 # want atom selections to be consistent with package selections 473 # have already been made. 474 self._graph_trees[myroot]["porttree"] = graph_tree 475 self._graph_trees[myroot]["vartree"] = graph_tree 476 self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi 477 self._graph_trees[myroot]["graph"] = self.digraph 478 self._graph_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg 479 self._graph_trees[myroot]["downgrade_probe"] = depgraph._downgrade_probe 480 def filtered_tree(): 481 pass
482 filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot) 483 self._filtered_trees[myroot]["porttree"] = filtered_tree 484 self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings) 485 486 # Passing in graph_tree as the vartree here could lead to better 487 # atom selections in some cases by causing atoms for packages that 488 # have been added to the graph to be preferred over other choices. 489 # However, it can trigger atom selections that result in 490 # unresolvable direct circular dependencies. For example, this 491 # happens with gwydion-dylan which depends on either itself or 492 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed, 493 # gwydion-dylan-bin needs to be selected in order to avoid a 494 # an unresolvable direct circular dependency. 495 # 496 # To solve the problem described above, pass in "graph_db" so that 497 # packages that have been added to the graph are distinguishable 498 # from other available packages and installed packages. Also, pass 499 # the parent package into self._select_atoms() calls so that 500 # unresolvable direct circular dependencies can be detected and 501 # avoided when possible. 502 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi 503 self._filtered_trees[myroot]["graph"] = self.digraph 504 self._filtered_trees[myroot]["vartree"] = \ 505 depgraph._frozen_config.trees[myroot]["vartree"] 506 self._filtered_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg 507 self._filtered_trees[myroot]["downgrade_probe"] = depgraph._downgrade_probe 508 509 dbs = [] 510 # (db, pkg_type, built, installed, db_keys) 511 if "remove" in self.myparams: 512 # For removal operations, use _dep_check_composite_db 513 # for availability and visibility checks. This provides 514 # consistency with install operations, so we don't 515 # get install/uninstall cycles like in bug #332719. 516 self._graph_trees[myroot]["porttree"] = filtered_tree 517 else: 518 if "--usepkgonly" not in depgraph._frozen_config.myopts: 519 portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi 520 db_keys = list(portdb._aux_cache_keys) 521 dbs.append((portdb, "ebuild", False, False, db_keys)) 522 523 if "--usepkg" in depgraph._frozen_config.myopts: 524 bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi 525 db_keys = list(bindb._aux_cache_keys) 526 dbs.append((bindb, "binary", True, False, db_keys)) 527 528 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi 529 db_keys = list(depgraph._frozen_config._trees_orig[myroot 530 ]["vartree"].dbapi._aux_cache_keys) 531 dbs.append((vardb, "installed", True, True, db_keys)) 532 self._filtered_trees[myroot]["dbs"] = dbs 533
534 -class depgraph(object):
535 536 # Represents the depth of a node that is unreachable from explicit 537 # user arguments (or their deep dependencies). Such nodes are pulled 538 # in by the _complete_graph method. 539 _UNREACHABLE_DEPTH = object() 540 541 pkg_tree_map = RootConfig.pkg_tree_map 542
543 - def __init__(self, settings, trees, myopts, myparams, spinner, 544 frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
545 if frozen_config is None: 546 frozen_config = _frozen_depgraph_config(settings, trees, 547 myopts, myparams, spinner) 548 self._frozen_config = frozen_config 549 self._dynamic_config = _dynamic_depgraph_config(self, myparams, 550 allow_backtracking, backtrack_parameters) 551 self._rebuild = _rebuild_config(frozen_config, backtrack_parameters) 552 553 self._select_atoms = self._select_atoms_highest_available 554 self._select_package = self._select_pkg_highest_available 555 556 self._event_loop = (portage._internal_caller and 557 global_event_loop() or EventLoop(main=False)) 558 559 self._select_atoms_parent = None 560 561 self.query = UserQuery(myopts).query
562
563 - def _index_binpkgs(self):
564 for root in self._frozen_config.trees: 565 bindb = self._frozen_config.trees[root]["bintree"].dbapi 566 if bindb._provides_index: 567 # don't repeat this when backtracking 568 continue 569 root_config = self._frozen_config.roots[root] 570 for cpv in self._frozen_config._trees_orig[ 571 root]["bintree"].dbapi.cpv_all(): 572 bindb._provides_inject( 573 self._pkg(cpv, "binary", root_config))
574
575 - def _load_vdb(self):
576 """ 577 Load installed package metadata if appropriate. This used to be called 578 from the constructor, but that wasn't very nice since this procedure 579 is slow and it generates spinner output. So, now it's called on-demand 580 by various methods when necessary. 581 """ 582 583 if self._dynamic_config._vdb_loaded: 584 return 585 586 for myroot in self._frozen_config.trees: 587 588 dynamic_deps = self._dynamic_config.myparams.get( 589 "dynamic_deps", "y") != "n" 590 preload_installed_pkgs = \ 591 "--nodeps" not in self._frozen_config.myopts 592 593 fake_vartree = self._frozen_config.trees[myroot]["vartree"] 594 if not fake_vartree.dbapi: 595 # This needs to be called for the first depgraph, but not for 596 # backtracking depgraphs that share the same frozen_config. 597 fake_vartree.sync() 598 599 # FakeVartree.sync() populates virtuals, and we want 600 # self.pkgsettings to have them populated too. 601 self._frozen_config.pkgsettings[myroot] = \ 602 portage.config(clone=fake_vartree.settings) 603 604 if preload_installed_pkgs: 605 vardb = fake_vartree.dbapi 606 607 if not dynamic_deps: 608 for pkg in vardb: 609 self._dynamic_config._package_tracker.add_installed_pkg(pkg) 610 self._add_installed_sonames(pkg) 611 else: 612 max_jobs = self._frozen_config.myopts.get("--jobs") 613 max_load = self._frozen_config.myopts.get("--load-average") 614 scheduler = TaskScheduler( 615 self._dynamic_deps_preload(fake_vartree), 616 max_jobs=max_jobs, 617 max_load=max_load, 618 event_loop=fake_vartree._portdb._event_loop) 619 scheduler.start() 620 scheduler.wait() 621 622 self._dynamic_config._vdb_loaded = True
623
624 - def _dynamic_deps_preload(self, fake_vartree):
625 portdb = fake_vartree._portdb 626 for pkg in fake_vartree.dbapi: 627 self._spinner_update() 628 self._dynamic_config._package_tracker.add_installed_pkg(pkg) 629 self._add_installed_sonames(pkg) 630 ebuild_path, repo_path = \ 631 portdb.findname2(pkg.cpv, myrepo=pkg.repo) 632 if ebuild_path is None: 633 fake_vartree.dynamic_deps_preload(pkg, None) 634 continue 635 metadata, ebuild_hash = portdb._pull_valid_cache( 636 pkg.cpv, ebuild_path, repo_path) 637 if metadata is not None: 638 fake_vartree.dynamic_deps_preload(pkg, metadata) 639 else: 640 proc = EbuildMetadataPhase(cpv=pkg.cpv, 641 ebuild_hash=ebuild_hash, 642 portdb=portdb, repo_path=repo_path, 643 settings=portdb.doebuild_settings) 644 proc.addExitListener( 645 self._dynamic_deps_proc_exit(pkg, fake_vartree)) 646 yield proc
647
648 - class _dynamic_deps_proc_exit(object):
649 650 __slots__ = ('_pkg', '_fake_vartree') 651
652 - def __init__(self, pkg, fake_vartree):
653 self._pkg = pkg 654 self._fake_vartree = fake_vartree
655
656 - def __call__(self, proc):
657 metadata = None 658 if proc.returncode == os.EX_OK: 659 metadata = proc.metadata 660 self._fake_vartree.dynamic_deps_preload(self._pkg, metadata)
661
662 - def _spinner_update(self):
663 if self._frozen_config.spinner: 664 self._frozen_config.spinner.update()
665
666 - def _compute_abi_rebuild_info(self):
667 """ 668 Fill self._forced_rebuilds with packages that cause rebuilds. 669 """ 670 671 debug = "--debug" in self._frozen_config.myopts 672 installed_sonames = self._dynamic_config._installed_sonames 673 package_tracker = self._dynamic_config._package_tracker 674 675 # Get all atoms that might have caused a forced rebuild. 676 atoms = {} 677 for s in self._dynamic_config._initial_arg_list: 678 if s.force_reinstall: 679 root = s.root_config.root 680 atoms.setdefault(root, set()).update(s.pset) 681 682 if debug: 683 writemsg_level("forced reinstall atoms:\n", 684 level=logging.DEBUG, noiselevel=-1) 685 686 for root in atoms: 687 writemsg_level(" root: %s\n" % root, 688 level=logging.DEBUG, noiselevel=-1) 689 for atom in atoms[root]: 690 writemsg_level(" atom: %s\n" % atom, 691 level=logging.DEBUG, noiselevel=-1) 692 writemsg_level("\n\n", 693 level=logging.DEBUG, noiselevel=-1) 694 695 # Go through all slot operator deps and check if one of these deps 696 # has a parent that is matched by one of the atoms from above. 697 forced_rebuilds = {} 698 699 for root, rebuild_atoms in atoms.items(): 700 701 for slot_atom in rebuild_atoms: 702 703 inst_pkg, reinst_pkg = \ 704 self._select_pkg_from_installed(root, slot_atom) 705 706 if inst_pkg is reinst_pkg or reinst_pkg is None: 707 continue 708 709 if (inst_pkg is not None and 710 inst_pkg.requires is not None): 711 for atom in inst_pkg.requires: 712 initial_providers = installed_sonames.get( 713 (root, atom)) 714 if initial_providers is None: 715 continue 716 final_provider = next( 717 package_tracker.match(root, atom), 718 None) 719 if final_provider: 720 continue 721 for provider in initial_providers: 722 # Find the replacement child. 723 child = next((pkg for pkg in 724 package_tracker.match( 725 root, provider.slot_atom) 726 if not pkg.installed), None) 727 728 if child is None: 729 continue 730 731 forced_rebuilds.setdefault( 732 root, {}).setdefault( 733 child, set()).add(inst_pkg) 734 735 # Generate pseudo-deps for any slot-operator deps of 736 # inst_pkg. Its deps aren't in _slot_operator_deps 737 # because it hasn't been added to the graph, but we 738 # are interested in any rebuilds that it triggered. 739 built_slot_op_atoms = [] 740 if inst_pkg is not None: 741 selected_atoms = self._select_atoms_probe( 742 inst_pkg.root, inst_pkg) 743 for atom in selected_atoms: 744 if atom.slot_operator_built: 745 built_slot_op_atoms.append(atom) 746 747 if not built_slot_op_atoms: 748 continue 749 750 # Use a cloned list, since we may append to it below. 751 deps = self._dynamic_config._slot_operator_deps.get( 752 (root, slot_atom), [])[:] 753 754 if built_slot_op_atoms and reinst_pkg is not None: 755 for child in self._dynamic_config.digraph.child_nodes( 756 reinst_pkg): 757 758 if child.installed: 759 continue 760 761 for atom in built_slot_op_atoms: 762 # NOTE: Since atom comes from inst_pkg, and 763 # reinst_pkg is the replacement parent, there's 764 # no guarantee that atom will completely match 765 # child. So, simply use atom.cp and atom.slot 766 # for matching. 767 if atom.cp != child.cp: 768 continue 769 if atom.slot and atom.slot != child.slot: 770 continue 771 deps.append(Dependency(atom=atom, child=child, 772 root=child.root, parent=reinst_pkg)) 773 774 for dep in deps: 775 if dep.child.installed: 776 # Find the replacement child. 777 child = next((pkg for pkg in 778 self._dynamic_config._package_tracker.match( 779 dep.root, dep.child.slot_atom) 780 if not pkg.installed), None) 781 782 if child is None: 783 continue 784 785 inst_child = dep.child 786 787 else: 788 child = dep.child 789 inst_child = self._select_pkg_from_installed( 790 child.root, child.slot_atom)[0] 791 792 # Make sure the child's slot/subslot has changed. If it 793 # hasn't, then another child has forced this rebuild. 794 if inst_child and inst_child.slot == child.slot and \ 795 inst_child.sub_slot == child.sub_slot: 796 continue 797 798 if dep.parent.installed: 799 # Find the replacement parent. 800 parent = next((pkg for pkg in 801 self._dynamic_config._package_tracker.match( 802 dep.parent.root, dep.parent.slot_atom) 803 if not pkg.installed), None) 804 805 if parent is None: 806 continue 807 808 else: 809 parent = dep.parent 810 811 # The child has forced a rebuild of the parent 812 forced_rebuilds.setdefault(root, {} 813 ).setdefault(child, set()).add(parent) 814 815 if debug: 816 writemsg_level("slot operator dependencies:\n", 817 level=logging.DEBUG, noiselevel=-1) 818 819 for (root, slot_atom), deps in self._dynamic_config._slot_operator_deps.items(): 820 writemsg_level(" (%s, %s)\n" % \ 821 (root, slot_atom), level=logging.DEBUG, noiselevel=-1) 822 for dep in deps: 823 writemsg_level(" parent: %s\n" % dep.parent, level=logging.DEBUG, noiselevel=-1) 824 writemsg_level(" child: %s (%s)\n" % (dep.child, dep.priority), level=logging.DEBUG, noiselevel=-1) 825 826 writemsg_level("\n\n", 827 level=logging.DEBUG, noiselevel=-1) 828 829 830 writemsg_level("forced rebuilds:\n", 831 level=logging.DEBUG, noiselevel=-1) 832 833 for root in forced_rebuilds: 834 writemsg_level(" root: %s\n" % root, 835 level=logging.DEBUG, noiselevel=-1) 836 for child in forced_rebuilds[root]: 837 writemsg_level(" child: %s\n" % child, 838 level=logging.DEBUG, noiselevel=-1) 839 for parent in forced_rebuilds[root][child]: 840 writemsg_level(" parent: %s\n" % parent, 841 level=logging.DEBUG, noiselevel=-1) 842 writemsg_level("\n\n", 843 level=logging.DEBUG, noiselevel=-1) 844 845 self._forced_rebuilds = forced_rebuilds
846
847 - def _show_abi_rebuild_info(self):
848 849 if not self._forced_rebuilds: 850 return 851 852 writemsg_stdout("\nThe following packages are causing rebuilds:\n\n", noiselevel=-1) 853 854 for root in self._forced_rebuilds: 855 for child in self._forced_rebuilds[root]: 856 writemsg_stdout(" %s causes rebuilds for:\n" % (child,), noiselevel=-1) 857 for parent in self._forced_rebuilds[root][child]: 858 writemsg_stdout(" %s\n" % (parent,), noiselevel=-1)
859
861 """ 862 Eliminate any package from self._dynamic_config.ignored_binaries 863 for which a more optimal alternative exists. 864 """ 865 for pkg in list(self._dynamic_config.ignored_binaries): 866 867 for selected_pkg in self._dynamic_config._package_tracker.match( 868 pkg.root, pkg.slot_atom): 869 870 if selected_pkg > pkg: 871 self._dynamic_config.ignored_binaries.pop(pkg) 872 break 873 874 # NOTE: The Package.__ge__ implementation accounts for 875 # differences in build_time, so the warning about "ignored" 876 # packages will be triggered if both packages are the same 877 # version and selected_pkg is not the most recent build. 878 if (selected_pkg.type_name == "binary" and 879 selected_pkg >= pkg): 880 self._dynamic_config.ignored_binaries.pop(pkg) 881 break 882 883 if selected_pkg.installed and \ 884 selected_pkg.cpv == pkg.cpv and \ 885 selected_pkg.build_time == pkg.build_time: 886 # We don't care about ignored binaries when an 887 # identical installed instance is selected to 888 # fill the slot. 889 self._dynamic_config.ignored_binaries.pop(pkg) 890 break
891
893 """ 894 Check if there are ignored binaries that would have been 895 accepted with the current autounmask USE changes. 896 897 @rtype: bool 898 @return: True if there are unnecessary rebuilds that 899 can be avoided by backtracking 900 """ 901 if not all([ 902 self._dynamic_config._allow_backtracking, 903 self._dynamic_config._needed_use_config_changes, 904 self._dynamic_config.ignored_binaries]): 905 return False 906 907 self._eliminate_ignored_binaries() 908 909 # _eliminate_ignored_binaries may have eliminated 910 # all of the ignored binaries 911 if not self._dynamic_config.ignored_binaries: 912 return False 913 914 use_changes = collections.defaultdict( 915 functools.partial(collections.defaultdict, dict)) 916 for pkg, (new_use, changes) in self._dynamic_config._needed_use_config_changes.items(): 917 if pkg in self._dynamic_config.digraph: 918 use_changes[pkg.root][pkg.slot_atom] = (pkg, new_use) 919 920 for pkg in self._dynamic_config.ignored_binaries: 921 selected_pkg, new_use = use_changes[pkg.root].get( 922 pkg.slot_atom, (None, None)) 923 if new_use is None: 924 continue 925 926 if new_use != pkg.use.enabled: 927 continue 928 929 if selected_pkg > pkg: 930 continue 931 932 return True 933 934 return False
935
936 - def _show_ignored_binaries(self):
937 """ 938 Show binaries that have been ignored because their USE didn't 939 match the user's config. 940 """ 941 if not self._dynamic_config.ignored_binaries \ 942 or '--quiet' in self._frozen_config.myopts: 943 return 944 945 self._eliminate_ignored_binaries() 946 947 ignored_binaries = {} 948 949 for pkg in self._dynamic_config.ignored_binaries: 950 for reason, info in self._dynamic_config.\ 951 ignored_binaries[pkg].items(): 952 ignored_binaries.setdefault(reason, {})[pkg] = info 953 954 if self._dynamic_config.myparams.get( 955 "binpkg_respect_use") in ("y", "n"): 956 ignored_binaries.pop("respect_use", None) 957 958 if self._dynamic_config.myparams.get( 959 "binpkg_changed_deps") in ("y", "n"): 960 ignored_binaries.pop("changed_deps", None) 961 962 if not ignored_binaries: 963 return 964 965 self._show_merge_list() 966 967 if ignored_binaries.get("respect_use"): 968 self._show_ignored_binaries_respect_use( 969 ignored_binaries["respect_use"]) 970 971 if ignored_binaries.get("changed_deps"): 972 self._show_ignored_binaries_changed_deps( 973 ignored_binaries["changed_deps"])
974
975 - def _show_ignored_binaries_respect_use(self, respect_use):
976 977 writemsg("\n!!! The following binary packages have been ignored " + \ 978 "due to non matching USE:\n\n", noiselevel=-1) 979 980 for pkg, flags in respect_use.items(): 981 flag_display = [] 982 for flag in sorted(flags): 983 if flag not in pkg.use.enabled: 984 flag = "-" + flag 985 flag_display.append(flag) 986 flag_display = " ".join(flag_display) 987 # The user can paste this line into package.use 988 writemsg(" =%s %s" % (pkg.cpv, flag_display), noiselevel=-1) 989 if pkg.root_config.settings["ROOT"] != "/": 990 writemsg(" # for %s" % (pkg.root,), noiselevel=-1) 991 writemsg("\n", noiselevel=-1) 992 993 msg = [ 994 "", 995 "NOTE: The --binpkg-respect-use=n option will prevent emerge", 996 " from ignoring these binary packages if possible.", 997 " Using --binpkg-respect-use=y will silence this warning." 998 ] 999 1000 for line in msg: 1001 if line: 1002 line = colorize("INFORM", line) 1003 writemsg(line + "\n", noiselevel=-1)
1004
1005 - def _show_ignored_binaries_changed_deps(self, changed_deps):
1006 1007 writemsg("\n!!! The following binary packages have been " 1008 "ignored due to changed dependencies:\n\n", 1009 noiselevel=-1) 1010 1011 for pkg in changed_deps: 1012 msg = " %s%s%s" % (pkg.cpv, _repo_separator, pkg.repo) 1013 if pkg.root_config.settings["ROOT"] != "/": 1014 msg += " for %s" % pkg.root 1015 writemsg("%s\n" % msg, noiselevel=-1) 1016 1017 msg = [ 1018 "", 1019 "NOTE: The --binpkg-changed-deps=n option will prevent emerge", 1020 " from ignoring these binary packages if possible.", 1021 " Using --binpkg-changed-deps=y will silence this warning." 1022 ] 1023 1024 for line in msg: 1025 if line: 1026 line = colorize("INFORM", line) 1027 writemsg(line + "\n", noiselevel=-1)
1028
1029 - def _get_missed_updates(self):
1030 1031 # In order to minimize noise, show only the highest 1032 # missed update from each SLOT. 1033 missed_updates = {} 1034 for pkg, mask_reasons in \ 1035 chain(self._dynamic_config._runtime_pkg_mask.items(), 1036 self._dynamic_config._conflict_missed_update.items()): 1037 if pkg.installed: 1038 # Exclude installed here since we only 1039 # want to show available updates. 1040 continue 1041 missed_update = True 1042 any_selected = False 1043 for chosen_pkg in self._dynamic_config._package_tracker.match( 1044 pkg.root, pkg.slot_atom): 1045 any_selected = True 1046 if chosen_pkg > pkg or (not chosen_pkg.installed and \ 1047 chosen_pkg.version == pkg.version): 1048 missed_update = False 1049 break 1050 if any_selected and missed_update: 1051 k = (pkg.root, pkg.slot_atom) 1052 if k in missed_updates: 1053 other_pkg, mask_type, parent_atoms = missed_updates[k] 1054 if other_pkg > pkg: 1055 continue 1056 for mask_type, parent_atoms in mask_reasons.items(): 1057 if not parent_atoms: 1058 continue 1059 missed_updates[k] = (pkg, mask_type, parent_atoms) 1060 break 1061 1062 return missed_updates
1063
1064 - def _show_missed_update(self):
1065 1066 missed_updates = self._get_missed_updates() 1067 1068 if not missed_updates: 1069 return 1070 1071 missed_update_types = {} 1072 for pkg, mask_type, parent_atoms in missed_updates.values(): 1073 missed_update_types.setdefault(mask_type, 1074 []).append((pkg, parent_atoms)) 1075 1076 if '--quiet' in self._frozen_config.myopts and \ 1077 '--debug' not in self._frozen_config.myopts: 1078 missed_update_types.pop("slot conflict", None) 1079 missed_update_types.pop("missing dependency", None) 1080 1081 self._show_missed_update_slot_conflicts( 1082 missed_update_types.get("slot conflict")) 1083 1084 self._show_missed_update_unsatisfied_dep( 1085 missed_update_types.get("missing dependency"))
1086
1087 - def _show_missed_update_unsatisfied_dep(self, missed_updates):
1088 1089 if not missed_updates: 1090 return 1091 1092 self._show_merge_list() 1093 backtrack_masked = [] 1094 1095 for pkg, parent_atoms in missed_updates: 1096 1097 try: 1098 for parent, root, atom in parent_atoms: 1099 self._show_unsatisfied_dep(root, atom, myparent=parent, 1100 check_backtrack=True) 1101 except self._backtrack_mask: 1102 # This is displayed below in abbreviated form. 1103 backtrack_masked.append((pkg, parent_atoms)) 1104 continue 1105 1106 writemsg("\n!!! The following update has been skipped " + \ 1107 "due to unsatisfied dependencies:\n\n", noiselevel=-1) 1108 1109 writemsg(str(pkg.slot_atom), noiselevel=-1) 1110 if pkg.root_config.settings["ROOT"] != "/": 1111 writemsg(" for %s" % (pkg.root,), noiselevel=-1) 1112 writemsg("\n\n", noiselevel=-1) 1113 1114 selected_pkg = next(self._dynamic_config._package_tracker.match( 1115 pkg.root, pkg.slot_atom), None) 1116 1117 writemsg(" selected: %s\n" % (selected_pkg,), noiselevel=-1) 1118 writemsg(" skipped: %s (see unsatisfied dependency below)\n" 1119 % (pkg,), noiselevel=-1) 1120 1121 for parent, root, atom in parent_atoms: 1122 self._show_unsatisfied_dep(root, atom, myparent=parent) 1123 writemsg("\n", noiselevel=-1) 1124 1125 if backtrack_masked: 1126 # These are shown in abbreviated form, in order to avoid terminal 1127 # flooding from mask messages as reported in bug #285832. 1128 writemsg("\n!!! The following update(s) have been skipped " + \ 1129 "due to unsatisfied dependencies\n" + \ 1130 "!!! triggered by backtracking:\n\n", noiselevel=-1) 1131 for pkg, parent_atoms in backtrack_masked: 1132 writemsg(str(pkg.slot_atom), noiselevel=-1) 1133 if pkg.root_config.settings["ROOT"] != "/": 1134 writemsg(" for %s" % (pkg.root,), noiselevel=-1) 1135 writemsg("\n", noiselevel=-1)
1136
1137 - def _show_missed_update_slot_conflicts(self, missed_updates):
1138 1139 if not missed_updates: 1140 return 1141 1142 self._show_merge_list() 1143 msg = [] 1144 msg.append("\nWARNING: One or more updates/rebuilds have been " + \ 1145 "skipped due to a dependency conflict:\n\n") 1146 1147 indent = " " 1148 for pkg, parent_atoms in missed_updates: 1149 msg.append(str(pkg.slot_atom)) 1150 if pkg.root_config.settings["ROOT"] != "/": 1151 msg.append(" for %s" % (pkg.root,)) 1152 msg.append("\n\n") 1153 1154 msg.append(indent) 1155 msg.append(str(pkg)) 1156 msg.append(" conflicts with\n") 1157 1158 for parent, atom in parent_atoms: 1159 if isinstance(parent, 1160 (PackageArg, AtomArg)): 1161 # For PackageArg and AtomArg types, it's 1162 # redundant to display the atom attribute. 1163 msg.append(2*indent) 1164 msg.append(str(parent)) 1165 msg.append("\n") 1166 else: 1167 # Display the specific atom from SetArg or 1168 # Package types. 1169 atom, marker = format_unmatched_atom( 1170 pkg, atom, self._pkg_use_enabled) 1171 1172 msg.append(2*indent) 1173 msg.append("%s required by %s\n" % (atom, parent)) 1174 msg.append(2*indent) 1175 msg.append(marker) 1176 msg.append("\n") 1177 msg.append("\n") 1178 1179 writemsg("".join(msg), noiselevel=-1)
1180
1182 """Show an informational message advising the user to mask one of the 1183 the packages. In some cases it may be possible to resolve this 1184 automatically, but support for backtracking (removal nodes that have 1185 already been selected) will be required in order to handle all possible 1186 cases. 1187 """ 1188 1189 if not any(self._dynamic_config._package_tracker.slot_conflicts()): 1190 return 1191 1192 self._show_merge_list() 1193 1194 if self._dynamic_config._slot_conflict_handler is None: 1195 self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self) 1196 handler = self._dynamic_config._slot_conflict_handler 1197 1198 conflict = handler.get_conflict() 1199 writemsg(conflict, noiselevel=-1) 1200 1201 explanation = handler.get_explanation() 1202 if explanation: 1203 writemsg(explanation, noiselevel=-1) 1204 return 1205 1206 if "--quiet" in self._frozen_config.myopts: 1207 return 1208 1209 msg = [] 1210 msg.append("It may be possible to solve this problem ") 1211 msg.append("by using package.mask to prevent one of ") 1212 msg.append("those packages from being selected. ") 1213 msg.append("However, it is also possible that conflicting ") 1214 msg.append("dependencies exist such that they are impossible to ") 1215 msg.append("satisfy simultaneously. If such a conflict exists in ") 1216 msg.append("the dependencies of two different packages, then those ") 1217 msg.append("packages can not be installed simultaneously.") 1218 backtrack_opt = self._frozen_config.myopts.get('--backtrack') 1219 if not self._dynamic_config._allow_backtracking and \ 1220 (backtrack_opt is None or \ 1221 (backtrack_opt > 0 and backtrack_opt < 30)): 1222 msg.append(" You may want to try a larger value of the ") 1223 msg.append("--backtrack option, such as --backtrack=30, ") 1224 msg.append("in order to see if that will solve this conflict ") 1225 msg.append("automatically.") 1226 1227 for line in textwrap.wrap(''.join(msg), 70): 1228 writemsg(line + '\n', noiselevel=-1) 1229 writemsg('\n', noiselevel=-1) 1230 1231 msg = [] 1232 msg.append("For more information, see MASKED PACKAGES ") 1233 msg.append("section in the emerge man page or refer ") 1234 msg.append("to the Gentoo Handbook.") 1235 for line in textwrap.wrap(''.join(msg), 70): 1236 writemsg(line + '\n', noiselevel=-1) 1237 writemsg('\n', noiselevel=-1)
1238
1240 """ 1241 This function solves slot conflicts which can 1242 be solved by simply choosing one of the conflicting 1243 and removing all the other ones. 1244 It is able to solve somewhat more complex cases where 1245 conflicts can only be solved simultaniously. 1246 """ 1247 debug = "--debug" in self._frozen_config.myopts 1248 1249 # List all conflicts. Ignore those that involve slot operator rebuilds 1250 # as the logic there needs special slot conflict behavior which isn't 1251 # provided by this function. 1252 conflicts = [] 1253 for conflict in self._dynamic_config._package_tracker.slot_conflicts(): 1254 slot_key = conflict.root, conflict.atom 1255 if slot_key not in self._dynamic_config._slot_operator_replace_installed: 1256 conflicts.append(conflict) 1257 1258 if not conflicts: 1259 return 1260 1261 if debug: 1262 writemsg_level( 1263 "\n!!! Slot conflict handler started.\n", 1264 level=logging.DEBUG, noiselevel=-1) 1265 1266 # Get a set of all conflicting packages. 1267 conflict_pkgs = set() 1268 for conflict in conflicts: 1269 conflict_pkgs.update(conflict) 1270 1271 # Get the list of other packages which are only 1272 # required by conflict packages. 1273 indirect_conflict_candidates = set() 1274 for pkg in conflict_pkgs: 1275 indirect_conflict_candidates.update(self._dynamic_config.digraph.child_nodes(pkg)) 1276 indirect_conflict_candidates.difference_update(conflict_pkgs) 1277 1278 indirect_conflict_pkgs = set() 1279 while indirect_conflict_candidates: 1280 pkg = indirect_conflict_candidates.pop() 1281 1282 only_conflict_parents = True 1283 for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []): 1284 if parent not in conflict_pkgs and parent not in indirect_conflict_pkgs: 1285 only_conflict_parents = False 1286 break 1287 if not only_conflict_parents: 1288 continue 1289 1290 indirect_conflict_pkgs.add(pkg) 1291 for child in self._dynamic_config.digraph.child_nodes(pkg): 1292 if child in conflict_pkgs or child in indirect_conflict_pkgs: 1293 continue 1294 indirect_conflict_candidates.add(child) 1295 1296 # Create a graph containing the conflict packages 1297 # and a special 'non_conflict_node' that represents 1298 # all non-conflict packages. 1299 conflict_graph = digraph() 1300 1301 non_conflict_node = "(non-conflict package)" 1302 conflict_graph.add(non_conflict_node, None) 1303 1304 for pkg in chain(conflict_pkgs, indirect_conflict_pkgs): 1305 conflict_graph.add(pkg, None) 1306 1307 # Add parent->child edges for each conflict package. 1308 # Parents, which aren't conflict packages are represented 1309 # by 'non_conflict_node'. 1310 # If several conflicting packages are matched, but not all, 1311 # add a tuple with the matched packages to the graph. 1312 class or_tuple(tuple): 1313 """ 1314 Helper class for debug printing. 1315 """ 1316 def __str__(self): 1317 return "(%s)" % ",".join(str(pkg) for pkg in self)
1318 1319 non_matching_forced = set() 1320 for conflict in conflicts: 1321 if debug: 1322 writemsg_level(" conflict:\n", level=logging.DEBUG, noiselevel=-1) 1323 writemsg_level(" root: %s\n" % conflict.root, level=logging.DEBUG, noiselevel=-1) 1324 writemsg_level(" atom: %s\n" % conflict.atom, level=logging.DEBUG, noiselevel=-1) 1325 for pkg in conflict: 1326 writemsg_level(" pkg: %s\n" % pkg, level=logging.DEBUG, noiselevel=-1) 1327 1328 all_parent_atoms = set() 1329 highest_pkg = None 1330 inst_pkg = None 1331 for pkg in conflict: 1332 if pkg.installed: 1333 inst_pkg = pkg 1334 if highest_pkg is None or highest_pkg < pkg: 1335 highest_pkg = pkg 1336 all_parent_atoms.update( 1337 self._dynamic_config._parent_atoms.get(pkg, [])) 1338 1339 for parent, atom in all_parent_atoms: 1340 is_arg_parent = isinstance(parent, AtomArg) 1341 is_non_conflict_parent = parent not in conflict_pkgs and \ 1342 parent not in indirect_conflict_pkgs 1343 1344 if debug: 1345 writemsg_level(" parent: %s\n" % parent, level=logging.DEBUG, noiselevel=-1) 1346 writemsg_level(" arg, non-conflict: %s, %s\n" % (is_arg_parent, is_non_conflict_parent), 1347 level=logging.DEBUG, noiselevel=-1) 1348 writemsg_level(" atom: %s\n" % atom, level=logging.DEBUG, noiselevel=-1) 1349 1350 if is_non_conflict_parent: 1351 parent = non_conflict_node 1352 1353 matched = [] 1354 for pkg in conflict: 1355 if (pkg is highest_pkg and 1356 not highest_pkg.installed and 1357 inst_pkg is not None and 1358 inst_pkg.sub_slot != highest_pkg.sub_slot and 1359 not self._downgrade_probe(highest_pkg)): 1360 # If an upgrade is desired, force the highest 1361 # version into the graph (bug #531656). 1362 non_matching_forced.add(highest_pkg) 1363 1364 if atom.match(pkg.with_use( 1365 self._pkg_use_enabled(pkg))) and \ 1366 not (is_arg_parent and pkg.installed): 1367 matched.append(pkg) 1368 1369 if debug: 1370 for match in matched: 1371 writemsg_level(" match: %s\n" % match, level=logging.DEBUG, noiselevel=-1) 1372 1373 if len(matched) > 1: 1374 # Even if all packages match, this parent must still 1375 # be added to the conflict_graph. Otherwise, we risk 1376 # removing all of these packages from the depgraph, 1377 # which could cause a missed update (bug #522084). 1378 conflict_graph.add(or_tuple(matched), parent) 1379 elif len(matched) == 1: 1380 conflict_graph.add(matched[0], parent) 1381 else: 1382 # This typically means that autounmask broke a 1383 # USE-dep, but it could also be due to the slot 1384 # not matching due to multislot (bug #220341). 1385 # Either way, don't try to solve this conflict. 1386 # Instead, force them all into the graph so that 1387 # they are protected from removal. 1388 non_matching_forced.update(conflict) 1389 if debug: 1390 for pkg in conflict: 1391 writemsg_level(" non-match: %s\n" % pkg, 1392 level=logging.DEBUG, noiselevel=-1) 1393 1394 for pkg in indirect_conflict_pkgs: 1395 for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []): 1396 if parent not in conflict_pkgs and \ 1397 parent not in indirect_conflict_pkgs: 1398 parent = non_conflict_node 1399 conflict_graph.add(pkg, parent) 1400 1401 if debug: 1402 writemsg_level( 1403 "\n!!! Slot conflict graph:\n", 1404 level=logging.DEBUG, noiselevel=-1) 1405 conflict_graph.debug_print() 1406 1407 # Now select required packages. Collect them in the 1408 # 'forced' set. 1409 forced = set([non_conflict_node]) 1410 forced.update(non_matching_forced) 1411 unexplored = set([non_conflict_node]) 1412 # or_tuples get special handling. We first explore 1413 # all packages in the hope of having forced one of 1414 # the packages in the tuple. This way we don't have 1415 # to choose one. 1416 unexplored_tuples = set() 1417 explored_nodes = set() 1418 1419 while unexplored: 1420 # Handle all unexplored packages. 1421 while unexplored: 1422 node = unexplored.pop() 1423 for child in conflict_graph.child_nodes(node): 1424 # Don't explore a node more than once, in order 1425 # to avoid infinite recursion. The forced set 1426 # cannot be used for this purpose, since it can 1427 # contain unexplored nodes from non_matching_forced. 1428 if child in explored_nodes: 1429 continue 1430 explored_nodes.add(child) 1431 forced.add(child) 1432 if isinstance(child, Package): 1433 unexplored.add(child) 1434 else: 1435 unexplored_tuples.add(child) 1436 1437 # Now handle unexplored or_tuples. Move on with packages 1438 # once we had to choose one. 1439 while unexplored_tuples: 1440 nodes = unexplored_tuples.pop() 1441 if any(node in forced for node in nodes): 1442 # At least one of the packages in the 1443 # tuple is already forced, which means the 1444 # dependency represented by this tuple 1445 # is satisfied. 1446 continue 1447 1448 # We now have to choose one of packages in the tuple. 1449 # In theory one could solve more conflicts if we'd be 1450 # able to try different choices here, but that has lots 1451 # of other problems. For now choose the package that was 1452 # pulled first, as this should be the most desirable choice 1453 # (otherwise it wouldn't have been the first one). 1454 forced.add(nodes[0]) 1455 unexplored.add(nodes[0]) 1456 break 1457 1458 # Remove 'non_conflict_node' and or_tuples from 'forced'. 1459 forced = set(pkg for pkg in forced if isinstance(pkg, Package)) 1460 non_forced = set(pkg for pkg in conflict_pkgs if pkg not in forced) 1461 1462 if debug: 1463 writemsg_level( 1464 "\n!!! Slot conflict solution:\n", 1465 level=logging.DEBUG, noiselevel=-1) 1466 for conflict in conflicts: 1467 writemsg_level( 1468 " Conflict: (%s, %s)\n" % (conflict.root, conflict.atom), 1469 level=logging.DEBUG, noiselevel=-1) 1470 for pkg in conflict: 1471 if pkg in forced: 1472 writemsg_level( 1473 " keep: %s\n" % pkg, 1474 level=logging.DEBUG, noiselevel=-1) 1475 else: 1476 writemsg_level( 1477 " remove: %s\n" % pkg, 1478 level=logging.DEBUG, noiselevel=-1) 1479 1480 broken_packages = set() 1481 for pkg in non_forced: 1482 for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []): 1483 if isinstance(parent, Package) and parent not in non_forced: 1484 # Non-forcing set args are expected to be a parent of all 1485 # packages in the conflict. 1486 broken_packages.add(parent) 1487 self._remove_pkg(pkg) 1488 1489 # Process the dependencies of choosen conflict packages 1490 # again to properly account for blockers. 1491 broken_packages.update(forced) 1492 1493 # Filter out broken packages which have been removed during 1494 # recursive removal in self._remove_pkg. 1495 broken_packages = list(pkg for pkg in broken_packages if pkg in broken_packages \ 1496 if self._dynamic_config._package_tracker.contains(pkg, installed=False)) 1497 1498 self._dynamic_config._dep_stack.extend(broken_packages) 1499 1500 if broken_packages: 1501 # Process dependencies. This cannot fail because we just ensured that 1502 # the remaining packages satisfy all dependencies. 1503 self._create_graph() 1504 1505 # Record missed updates. 1506 for conflict in conflicts: 1507 if not any(pkg in non_forced for pkg in conflict): 1508 continue 1509 for pkg in conflict: 1510 if pkg not in non_forced: 1511 continue 1512 1513 for other in conflict: 1514 if other is pkg: 1515 continue 1516 1517 for parent, atom in self._dynamic_config._parent_atoms.get(other, []): 1518 if not atom.match(pkg.with_use(self._pkg_use_enabled(pkg))): 1519 self._dynamic_config._conflict_missed_update[pkg].setdefault( 1520 "slot conflict", set()) 1521 self._dynamic_config._conflict_missed_update[pkg]["slot conflict"].add( 1522 (parent, atom)) 1523 1524
1525 - def _process_slot_conflicts(self):
1526 """ 1527 If there are any slot conflicts and backtracking is enabled, 1528 _complete_graph should complete the graph before this method 1529 is called, so that all relevant reverse dependencies are 1530 available for use in backtracking decisions. 1531 """ 1532 1533 self._solve_non_slot_operator_slot_conflicts() 1534 1535 if not self._validate_blockers(): 1536 # Blockers don't trigger the _skip_restart flag, since 1537 # backtracking may solve blockers when it solves slot 1538 # conflicts (or by blind luck). 1539 raise self._unknown_internal_error() 1540 1541 # Both _process_slot_conflict and _slot_operator_trigger_reinstalls 1542 # can call _slot_operator_update_probe, which requires that 1543 # self._dynamic_config._blocked_pkgs has been initialized by a 1544 # call to the _validate_blockers method. 1545 for conflict in self._dynamic_config._package_tracker.slot_conflicts(): 1546 self._process_slot_conflict(conflict) 1547 1548 if self._dynamic_config._allow_backtracking: 1549 self._slot_operator_trigger_reinstalls()
1550
1551 - def _process_slot_conflict(self, conflict):
1552 """ 1553 Process slot conflict data to identify specific atoms which 1554 lead to conflict. These atoms only match a subset of the 1555 packages that have been pulled into a given slot. 1556 """ 1557 root = conflict.root 1558 slot_atom = conflict.atom 1559 slot_nodes = conflict.pkgs 1560 1561 debug = "--debug" in self._frozen_config.myopts 1562 1563 slot_parent_atoms = set() 1564 for pkg in slot_nodes: 1565 parent_atoms = self._dynamic_config._parent_atoms.get(pkg) 1566 if not parent_atoms: 1567 continue 1568 slot_parent_atoms.update(parent_atoms) 1569 1570 conflict_pkgs = [] 1571 conflict_atoms = {} 1572 for pkg in slot_nodes: 1573 1574 if self._dynamic_config._allow_backtracking and \ 1575 pkg in self._dynamic_config._runtime_pkg_mask: 1576 if debug: 1577 writemsg_level( 1578 "!!! backtracking loop detected: %s %s\n" % \ 1579 (pkg, 1580 self._dynamic_config._runtime_pkg_mask[pkg]), 1581 level=logging.DEBUG, noiselevel=-1) 1582 1583 parent_atoms = self._dynamic_config._parent_atoms.get(pkg) 1584 if parent_atoms is None: 1585 parent_atoms = set() 1586 self._dynamic_config._parent_atoms[pkg] = parent_atoms 1587 1588 all_match = True 1589 for parent_atom in slot_parent_atoms: 1590 if parent_atom in parent_atoms: 1591 continue 1592 parent, atom = parent_atom 1593 if atom.match(pkg.with_use(self._pkg_use_enabled(pkg))): 1594 parent_atoms.add(parent_atom) 1595 else: 1596 all_match = False 1597 conflict_atoms.setdefault(parent_atom, set()).add(pkg) 1598 1599 if not all_match: 1600 conflict_pkgs.append(pkg) 1601 1602 if conflict_pkgs and \ 1603 self._dynamic_config._allow_backtracking and \ 1604 not self._accept_blocker_conflicts(): 1605 remaining = [] 1606 for pkg in conflict_pkgs: 1607 if self._slot_conflict_backtrack_abi(pkg, 1608 slot_nodes, conflict_atoms): 1609 backtrack_infos = self._dynamic_config._backtrack_infos 1610 config = backtrack_infos.setdefault("config", {}) 1611 config.setdefault("slot_conflict_abi", set()).add(pkg) 1612 else: 1613 remaining.append(pkg) 1614 if remaining: 1615 self._slot_confict_backtrack(root, slot_atom, 1616 slot_parent_atoms, remaining)
1617
1618 - def _slot_confict_backtrack(self, root, slot_atom, 1619 all_parents, conflict_pkgs):
1620 1621 debug = "--debug" in self._frozen_config.myopts 1622 existing_node = next(self._dynamic_config._package_tracker.match( 1623 root, slot_atom, installed=False)) 1624 # In order to avoid a missed update, first mask lower versions 1625 # that conflict with higher versions (the backtracker visits 1626 # these in reverse order). 1627 conflict_pkgs.sort(reverse=True) 1628 backtrack_data = [] 1629 for to_be_masked in conflict_pkgs: 1630 # For missed update messages, find out which 1631 # atoms matched to_be_selected that did not 1632 # match to_be_masked. 1633 parent_atoms = \ 1634 self._dynamic_config._parent_atoms.get(to_be_masked, set()) 1635 conflict_atoms = set(parent_atom for parent_atom in all_parents \ 1636 if parent_atom not in parent_atoms) 1637 backtrack_data.append((to_be_masked, conflict_atoms)) 1638 1639 to_be_masked = backtrack_data[-1][0] 1640 1641 self._dynamic_config._backtrack_infos.setdefault( 1642 "slot conflict", []).append(backtrack_data) 1643 self._dynamic_config._need_restart = True 1644 if debug: 1645 msg = [] 1646 msg.append("") 1647 msg.append("") 1648 msg.append("backtracking due to slot conflict:") 1649 msg.append(" first package: %s" % existing_node) 1650 msg.append(" package to mask: %s" % to_be_masked) 1651 msg.append(" slot: %s" % slot_atom) 1652 msg.append(" parents: %s" % ", ".join( \ 1653 "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents)) 1654 msg.append("") 1655 writemsg_level("".join("%s\n" % l for l in msg), 1656 noiselevel=-1, level=logging.DEBUG)
1657
1658 - def _slot_conflict_backtrack_abi(self, pkg, slot_nodes, conflict_atoms):
1659 """ 1660 If one or more conflict atoms have a slot/sub-slot dep that can be resolved 1661 by rebuilding the parent package, then schedule the rebuild via 1662 backtracking, and return True. Otherwise, return False. 1663 """ 1664 1665 found_update = False 1666 for parent_atom, conflict_pkgs in conflict_atoms.items(): 1667 parent, atom = parent_atom 1668 1669 if not isinstance(parent, Package): 1670 continue 1671 1672 if not parent.built: 1673 continue 1674 1675 if not atom.soname and not ( 1676 atom.package and atom.slot_operator_built): 1677 continue 1678 1679 for other_pkg in slot_nodes: 1680 if other_pkg in conflict_pkgs: 1681 continue 1682 1683 dep = Dependency(atom=atom, child=other_pkg, 1684 parent=parent, root=pkg.root) 1685 1686 new_dep = \ 1687 self._slot_operator_update_probe_slot_conflict(dep) 1688 if new_dep is not None: 1689 self._slot_operator_update_backtrack(dep, 1690 new_dep=new_dep) 1691 found_update = True 1692 1693 return found_update
1694
1695 - def _slot_change_probe(self, dep):
1696 """ 1697 @rtype: bool 1698 @return: True if dep.child should be rebuilt due to a change 1699 in sub-slot (without revbump, as in bug #456208). 1700 """ 1701 if not (isinstance(dep.parent, Package) and \ 1702 not dep.parent.built and dep.child.built): 1703 return None 1704 1705 root_config = self._frozen_config.roots[dep.root] 1706 matches = [] 1707 try: 1708 matches.append(self._pkg(dep.child.cpv, "ebuild", 1709 root_config, myrepo=dep.child.repo)) 1710 except PackageNotFound: 1711 pass 1712 1713 for unbuilt_child in chain(matches, 1714 self._iter_match_pkgs(root_config, "ebuild", 1715 Atom("=%s" % (dep.child.cpv,)))): 1716 if unbuilt_child in self._dynamic_config._runtime_pkg_mask: 1717 continue 1718 if self._frozen_config.excluded_pkgs.findAtomForPackage( 1719 unbuilt_child, 1720 modified_use=self._pkg_use_enabled(unbuilt_child)): 1721 continue 1722 if not self._pkg_visibility_check(unbuilt_child): 1723 continue 1724 break 1725 else: 1726 return None 1727 1728 if unbuilt_child.slot == dep.child.slot and \ 1729 unbuilt_child.sub_slot == dep.child.sub_slot: 1730 return None 1731 1732 return unbuilt_child
1733
1734 - def _slot_change_backtrack(self, dep, new_child_slot):
1735 child = dep.child 1736 if "--debug" in self._frozen_config.myopts: 1737 msg = [] 1738 msg.append("") 1739 msg.append("") 1740 msg.append("backtracking due to slot/sub-slot change:") 1741 msg.append(" child package: %s" % child) 1742 msg.append(" child slot: %s/%s" % 1743 (child.slot, child.sub_slot)) 1744 msg.append(" new child: %s" % new_child_slot) 1745 msg.append(" new child slot: %s/%s" % 1746 (new_child_slot.slot, new_child_slot.sub_slot)) 1747 msg.append(" parent package: %s" % dep.parent) 1748 msg.append(" atom: %s" % dep.atom) 1749 msg.append("") 1750 writemsg_level("\n".join(msg), 1751 noiselevel=-1, level=logging.DEBUG) 1752 backtrack_infos = self._dynamic_config._backtrack_infos 1753 config = backtrack_infos.setdefault("config", {}) 1754 1755 # mask unwanted binary packages if necessary 1756 masks = {} 1757 if not child.installed: 1758 masks.setdefault(dep.child, {})["slot_operator_mask_built"] = None 1759 if masks: 1760 config.setdefault("slot_operator_mask_built", {}).update(masks) 1761 1762 # trigger replacement of installed packages if necessary 1763 reinstalls = set() 1764 if child.installed: 1765 replacement_atom = self._replace_installed_atom(child) 1766 if replacement_atom is not None: 1767 reinstalls.add((child.root, replacement_atom)) 1768 if reinstalls: 1769 config.setdefault("slot_operator_replace_installed", 1770 set()).update(reinstalls) 1771 1772 self._dynamic_config._need_restart = True
1773
1774 - def _slot_operator_update_backtrack(self, dep, new_child_slot=None, 1775 new_dep=None):
1776 if new_child_slot is None: 1777 child = dep.child 1778 else: 1779 child = new_child_slot 1780 if "--debug" in self._frozen_config.myopts: 1781 msg = [] 1782 msg.append("") 1783 msg.append("") 1784 msg.append("backtracking due to missed slot abi update:") 1785 msg.append(" child package: %s" % child) 1786 if new_child_slot is not None: 1787 msg.append(" new child slot package: %s" % new_child_slot) 1788 msg.append(" parent package: %s" % dep.parent) 1789 if new_dep is not None: 1790 msg.append(" new parent pkg: %s" % new_dep.parent) 1791 msg.append(" atom: %s" % dep.atom) 1792 msg.append("") 1793 writemsg_level("\n".join(msg), 1794 noiselevel=-1, level=logging.DEBUG) 1795 backtrack_infos = self._dynamic_config._backtrack_infos 1796 config = backtrack_infos.setdefault("config", {}) 1797 1798 # mask unwanted binary packages if necessary 1799 abi_masks = {} 1800 if new_child_slot is None: 1801 if not child.installed: 1802 abi_masks.setdefault(child, {})["slot_operator_mask_built"] = None 1803 if not dep.parent.installed: 1804 abi_masks.setdefault(dep.parent, {})["slot_operator_mask_built"] = None 1805 if abi_masks: 1806 config.setdefault("slot_operator_mask_built", {}).update(abi_masks) 1807 1808 # trigger replacement of installed packages if necessary 1809 abi_reinstalls = set() 1810 if dep.parent.installed: 1811 if new_dep is not None: 1812 replacement_atom = new_dep.parent.slot_atom 1813 else: 1814 replacement_atom = self._replace_installed_atom(dep.parent) 1815 if replacement_atom is not None: 1816 abi_reinstalls.add((dep.parent.root, replacement_atom)) 1817 if new_child_slot is None and child.installed: 1818 replacement_atom = self._replace_installed_atom(child) 1819 if replacement_atom is not None: 1820 abi_reinstalls.add((child.root, replacement_atom)) 1821 if abi_reinstalls: 1822 config.setdefault("slot_operator_replace_installed", 1823 set()).update(abi_reinstalls) 1824 1825 self._dynamic_config._need_restart = True
1826
1827 - def _slot_operator_update_probe_slot_conflict(self, dep):
1828 new_dep = self._slot_operator_update_probe(dep, slot_conflict=True) 1829 1830 if new_dep is not None: 1831 return new_dep 1832 1833 if self._dynamic_config._autounmask is True: 1834 1835 for autounmask_level in self._autounmask_levels(): 1836 1837 new_dep = self._slot_operator_update_probe(dep, 1838 slot_conflict=True, autounmask_level=autounmask_level) 1839 1840 if new_dep is not None: 1841 return new_dep 1842 1843 return None
1844
1845 - def _slot_operator_update_probe(self, dep, new_child_slot=False, 1846 slot_conflict=False, autounmask_level=None):
1847 """ 1848 slot/sub-slot := operators tend to prevent updates from getting pulled in, 1849 since installed packages pull in packages with the slot/sub-slot that they 1850 were built against. Detect this case so that we can schedule rebuilds 1851 and reinstalls when appropriate. 1852 NOTE: This function only searches for updates that involve upgrades 1853 to higher versions, since the logic required to detect when a 1854 downgrade would be desirable is not implemented. 1855 """ 1856 1857 if dep.child.installed and \ 1858 self._frozen_config.excluded_pkgs.findAtomForPackage(dep.child, 1859 modified_use=self._pkg_use_enabled(dep.child)): 1860 return None 1861 1862 if dep.parent.installed and \ 1863 self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent, 1864 modified_use=self._pkg_use_enabled(dep.parent)): 1865 return None 1866 1867 debug = "--debug" in self._frozen_config.myopts 1868 selective = "selective" in self._dynamic_config.myparams 1869 want_downgrade = None 1870 want_downgrade_parent = None 1871 1872 def check_reverse_dependencies(existing_pkg, candidate_pkg, 1873 replacement_parent=None): 1874 """ 1875 Check if candidate_pkg satisfies all of existing_pkg's non- 1876 slot operator parents. 1877 """ 1878 built_slot_operator_parents = set() 1879 for parent, atom in self._dynamic_config._parent_atoms.get(existing_pkg, []): 1880 if atom.soname or atom.slot_operator_built: 1881 built_slot_operator_parents.add(parent) 1882 1883 for parent, atom in self._dynamic_config._parent_atoms.get(existing_pkg, []): 1884 if isinstance(parent, Package): 1885 if parent in built_slot_operator_parents: 1886 # This parent may need to be rebuilt, so its 1887 # dependencies aren't necessarily relevant. 1888 continue 1889 1890 if replacement_parent is not None and \ 1891 (replacement_parent.slot_atom == parent.slot_atom 1892 or replacement_parent.cpv == parent.cpv): 1893 # This parent is irrelevant because we intend to 1894 # replace it with replacement_parent. 1895 continue 1896 1897 if any(pkg is not parent and 1898 (pkg.slot_atom == parent.slot_atom or 1899 pkg.cpv == parent.cpv) for pkg in 1900 self._dynamic_config._package_tracker