Package _emerge :: Module depgraph
[hide private]

Source Code for Module _emerge.depgraph

   1  # Copyright 1999-2014 Gentoo Foundation 
   2  # Distributed under the terms of the GNU General Public License v2 
   3   
   4  from __future__ import division, print_function, unicode_literals 
   5   
   6  import collections 
   7  import errno 
   8  import io 
   9  import logging 
  10  import stat 
  11  import sys 
  12  import textwrap 
  13  import warnings 
  14  from collections import deque 
  15  from itertools import chain 
  16   
  17  import portage 
  18  from portage import os, OrderedDict 
  19  from portage import _unicode_decode, _unicode_encode, _encodings 
  20  from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH, VCS_DIRS 
  21  from portage.dbapi import dbapi 
  22  from portage.dbapi.dep_expand import dep_expand 
  23  from portage.dbapi._similar_name_search import similar_name_search 
  24  from portage.dep import Atom, best_match_to_list, extract_affecting_use, \ 
  25          check_required_use, human_readable_required_use, match_from_list, \ 
  26          _repo_separator 
  27  from portage.dep._slot_operator import ignore_built_slot_operator_deps 
  28  from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use, \ 
  29          _get_eapi_attrs 
  30  from portage.exception import (InvalidAtom, InvalidData, InvalidDependString, 
  31          PackageNotFound, PortageException) 
  32  from portage.output import colorize, create_color_func, \ 
  33          darkgreen, green 
  34  bad = create_color_func("BAD") 
  35  from portage.package.ebuild.config import _get_feature_flags 
  36  from portage.package.ebuild.getmaskingstatus import \ 
  37          _getmaskingstatus, _MaskReason 
  38  from portage._sets import SETPREFIX 
  39  from portage._sets.base import InternalPackageSet 
  40  from portage.util import ConfigProtect, shlex_split, new_protect_filename 
  41  from portage.util import cmp_sort_key, writemsg, writemsg_stdout 
  42  from portage.util import ensure_dirs 
  43  from portage.util import writemsg_level, write_atomic 
  44  from portage.util.digraph import digraph 
  45  from portage.util._async.TaskScheduler import TaskScheduler 
  46  from portage.util._eventloop.EventLoop import EventLoop 
  47  from portage.util._eventloop.global_event_loop import global_event_loop 
  48  from portage.versions import catpkgsplit 
  49   
  50  from _emerge.AtomArg import AtomArg 
  51  from _emerge.Blocker import Blocker 
  52  from _emerge.BlockerCache import BlockerCache 
  53  from _emerge.BlockerDepPriority import BlockerDepPriority 
  54  from .chk_updated_cfg_files import chk_updated_cfg_files 
  55  from _emerge.countdown import countdown 
  56  from _emerge.create_world_atom import create_world_atom 
  57  from _emerge.Dependency import Dependency 
  58  from _emerge.DependencyArg import DependencyArg 
  59  from _emerge.DepPriority import DepPriority 
  60  from _emerge.DepPriorityNormalRange import DepPriorityNormalRange 
  61  from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange 
  62  from _emerge.EbuildMetadataPhase import EbuildMetadataPhase 
  63  from _emerge.FakeVartree import FakeVartree 
  64  from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps 
  65  from _emerge.is_valid_package_atom import insert_category_into_atom, \ 
  66          is_valid_package_atom 
  67  from _emerge.Package import Package 
  68  from _emerge.PackageArg import PackageArg 
  69  from _emerge.PackageVirtualDbapi import PackageVirtualDbapi 
  70  from _emerge.RootConfig import RootConfig 
  71  from _emerge.search import search 
  72  from _emerge.SetArg import SetArg 
  73  from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice 
  74  from _emerge.UnmergeDepPriority import UnmergeDepPriority 
  75  from _emerge.UseFlagDisplay import pkg_use_display 
  76  from _emerge.UserQuery import UserQuery 
  77   
  78  from _emerge.resolver.backtracking import Backtracker, BacktrackParameter 
  79  from _emerge.resolver.package_tracker import PackageTracker, PackageTrackerDbapiWrapper 
  80  from _emerge.resolver.slot_collision import slot_conflict_handler 
  81  from _emerge.resolver.circular_dependency import circular_dependency_handler 
  82  from _emerge.resolver.output import Display, format_unmatched_atom 
  83   
  84  if sys.hexversion >= 0x3000000: 
  85          basestring = str 
  86          long = int 
  87          _unicode = str 
  88  else: 
  89          _unicode = unicode 
  90   
91 -class _scheduler_graph_config(object):
92 - def __init__(self, trees, pkg_cache, graph, mergelist):
93 self.trees = trees 94 self.pkg_cache = pkg_cache 95 self.graph = graph 96 self.mergelist = mergelist
97
98 -def _wildcard_set(atoms):
99 pkgs = InternalPackageSet(allow_wildcard=True) 100 for x in atoms: 101 try: 102 x = Atom(x, allow_wildcard=True, allow_repo=False) 103 except portage.exception.InvalidAtom: 104 x = Atom("*/" + x, allow_wildcard=True, allow_repo=False) 105 pkgs.add(x) 106 return pkgs
107
108 -class _frozen_depgraph_config(object):
109
110 - def __init__(self, settings, trees, myopts, params, spinner):
111 self.settings = settings 112 self.target_root = settings["EROOT"] 113 self.myopts = myopts 114 self.edebug = 0 115 if settings.get("PORTAGE_DEBUG", "") == "1": 116 self.edebug = 1 117 self.spinner = spinner 118 self.requested_depth = params.get("deep", 0) 119 self._running_root = trees[trees._running_eroot]["root_config"] 120 self.pkgsettings = {} 121 self.trees = {} 122 self._trees_orig = trees 123 self.roots = {} 124 # All Package instances 125 self._pkg_cache = {} 126 self._highest_license_masked = {} 127 dynamic_deps = myopts.get("--dynamic-deps", "y") != "n" 128 ignore_built_slot_operator_deps = myopts.get( 129 "--ignore-built-slot-operator-deps", "n") == "y" 130 for myroot in trees: 131 self.trees[myroot] = {} 132 # Create a RootConfig instance that references 133 # the FakeVartree instead of the real one. 134 self.roots[myroot] = RootConfig( 135 trees[myroot]["vartree"].settings, 136 self.trees[myroot], 137 trees[myroot]["root_config"].setconfig) 138 for tree in ("porttree", "bintree"): 139 self.trees[myroot][tree] = trees[myroot][tree] 140 self.trees[myroot]["vartree"] = \ 141 FakeVartree(trees[myroot]["root_config"], 142 pkg_cache=self._pkg_cache, 143 pkg_root_config=self.roots[myroot], 144 dynamic_deps=dynamic_deps, 145 ignore_built_slot_operator_deps=ignore_built_slot_operator_deps) 146 self.pkgsettings[myroot] = portage.config( 147 clone=self.trees[myroot]["vartree"].settings) 148 149 self._required_set_names = set(["world"]) 150 151 atoms = ' '.join(myopts.get("--exclude", [])).split() 152 self.excluded_pkgs = _wildcard_set(atoms) 153 atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split() 154 self.reinstall_atoms = _wildcard_set(atoms) 155 atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split() 156 self.usepkg_exclude = _wildcard_set(atoms) 157 atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split() 158 self.useoldpkg_atoms = _wildcard_set(atoms) 159 atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split() 160 self.rebuild_exclude = _wildcard_set(atoms) 161 atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split() 162 self.rebuild_ignore = _wildcard_set(atoms) 163 164 self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts 165 self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts 166 self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
167
168 -class _depgraph_sets(object):
169 - def __init__(self):
170 # contains all sets added to the graph 171 self.sets = {} 172 # contains non-set atoms given as arguments 173 self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True) 174 # contains all atoms from all sets added to the graph, including 175 # atoms given as arguments 176 self.atoms = InternalPackageSet(allow_repo=True) 177 self.atom_arg_map = {}
178
179 -class _rebuild_config(object):
180 - def __init__(self, frozen_config, backtrack_parameters):
181 self._graph = digraph() 182 self._frozen_config = frozen_config 183 self.rebuild_list = backtrack_parameters.rebuild_list.copy() 184 self.orig_rebuild_list = self.rebuild_list.copy() 185 self.reinstall_list = backtrack_parameters.reinstall_list.copy() 186 self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev 187 self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver 188 self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt 189 self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or 190 self.rebuild_if_unbuilt)
191
192 - def add(self, dep_pkg, dep):
193 parent = dep.collapsed_parent 194 priority = dep.collapsed_priority 195 rebuild_exclude = self._frozen_config.rebuild_exclude 196 rebuild_ignore = self._frozen_config.rebuild_ignore 197 if (self.rebuild and isinstance(parent, Package) and 198 parent.built and priority.buildtime and 199 isinstance(dep_pkg, Package) and 200 not rebuild_exclude.findAtomForPackage(parent) and 201 not rebuild_ignore.findAtomForPackage(dep_pkg)): 202 self._graph.add(dep_pkg, parent, priority)
203
204 - def _needs_rebuild(self, dep_pkg):
205 """Check whether packages that depend on dep_pkg need to be rebuilt.""" 206 dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom) 207 if dep_pkg.built or dep_root_slot in self.orig_rebuild_list: 208 return False 209 210 if self.rebuild_if_unbuilt: 211 # dep_pkg is being installed from source, so binary 212 # packages for parents are invalid. Force rebuild 213 return True 214 215 trees = self._frozen_config.trees 216 vardb = trees[dep_pkg.root]["vartree"].dbapi 217 if self.rebuild_if_new_rev: 218 # Parent packages are valid if a package with the same 219 # cpv is already installed. 220 return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom) 221 222 # Otherwise, parent packages are valid if a package with the same 223 # version (excluding revision) is already installed. 224 assert self.rebuild_if_new_ver 225 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1] 226 for inst_cpv in vardb.match(dep_pkg.slot_atom): 227 inst_cpv_norev = catpkgsplit(inst_cpv)[:-1] 228 if inst_cpv_norev == cpv_norev: 229 return False 230 231 return True
232
233 - def _trigger_rebuild(self, parent, build_deps):
234 root_slot = (parent.root, parent.slot_atom) 235 if root_slot in self.rebuild_list: 236 return False 237 trees = self._frozen_config.trees 238 reinstall = False 239 for slot_atom, dep_pkg in build_deps.items(): 240 dep_root_slot = (dep_pkg.root, slot_atom) 241 if self._needs_rebuild(dep_pkg): 242 self.rebuild_list.add(root_slot) 243 return True 244 elif ("--usepkg" in self._frozen_config.myopts and 245 (dep_root_slot in self.reinstall_list or 246 dep_root_slot in self.rebuild_list or 247 not dep_pkg.installed)): 248 249 # A direct rebuild dependency is being installed. We 250 # should update the parent as well to the latest binary, 251 # if that binary is valid. 252 # 253 # To validate the binary, we check whether all of the 254 # rebuild dependencies are present on the same binhost. 255 # 256 # 1) If parent is present on the binhost, but one of its 257 # rebuild dependencies is not, then the parent should 258 # be rebuilt from source. 259 # 2) Otherwise, the parent binary is assumed to be valid, 260 # because all of its rebuild dependencies are 261 # consistent. 262 bintree = trees[parent.root]["bintree"] 263 uri = bintree.get_pkgindex_uri(parent.cpv) 264 dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv) 265 bindb = bintree.dbapi 266 if self.rebuild_if_new_ver and uri and uri != dep_uri: 267 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1] 268 for cpv in bindb.match(dep_pkg.slot_atom): 269 if cpv_norev == catpkgsplit(cpv)[:-1]: 270 dep_uri = bintree.get_pkgindex_uri(cpv) 271 if uri == dep_uri: 272 break 273 if uri and uri != dep_uri: 274 # 1) Remote binary package is invalid because it was 275 # built without dep_pkg. Force rebuild. 276 self.rebuild_list.add(root_slot) 277 return True 278 elif (parent.installed and 279 root_slot not in self.reinstall_list): 280 try: 281 bin_build_time, = bindb.aux_get(parent.cpv, 282 ["BUILD_TIME"]) 283 except KeyError: 284 continue 285 if bin_build_time != _unicode(parent.build_time): 286 # 2) Remote binary package is valid, and local package 287 # is not up to date. Force reinstall. 288 reinstall = True 289 if reinstall: 290 self.reinstall_list.add(root_slot) 291 return reinstall
292
293 - def trigger_rebuilds(self):
294 """ 295 Trigger rebuilds where necessary. If pkgA has been updated, and pkgB 296 depends on pkgA at both build-time and run-time, pkgB needs to be 297 rebuilt. 298 """ 299 need_restart = False 300 graph = self._graph 301 build_deps = {} 302 303 leaf_nodes = deque(graph.leaf_nodes()) 304 305 # Trigger rebuilds bottom-up (starting with the leaves) so that parents 306 # will always know which children are being rebuilt. 307 while graph: 308 if not leaf_nodes: 309 # We'll have to drop an edge. This should be quite rare. 310 leaf_nodes.append(graph.order[-1]) 311 312 node = leaf_nodes.popleft() 313 if node not in graph: 314 # This can be triggered by circular dependencies. 315 continue 316 slot_atom = node.slot_atom 317 318 # Remove our leaf node from the graph, keeping track of deps. 319 parents = graph.parent_nodes(node) 320 graph.remove(node) 321 node_build_deps = build_deps.get(node, {}) 322 for parent in parents: 323 if parent == node: 324 # Ignore a direct cycle. 325 continue 326 parent_bdeps = build_deps.setdefault(parent, {}) 327 parent_bdeps[slot_atom] = node 328 if not graph.child_nodes(parent): 329 leaf_nodes.append(parent) 330 331 # Trigger rebuilds for our leaf node. Because all of our children 332 # have been processed, the build_deps will be completely filled in, 333 # and self.rebuild_list / self.reinstall_list will tell us whether 334 # any of our children need to be rebuilt or reinstalled. 335 if self._trigger_rebuild(node, node_build_deps): 336 need_restart = True 337 338 return need_restart
339 340
341 -class _dynamic_depgraph_config(object):
342
343 - def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
344 self.myparams = myparams.copy() 345 self._vdb_loaded = False 346 self._allow_backtracking = allow_backtracking 347 # Maps nodes to the reasons they were selected for reinstallation. 348 self._reinstall_nodes = {} 349 # Contains a filtered view of preferred packages that are selected 350 # from available repositories. 351 self._filtered_trees = {} 352 # Contains installed packages and new packages that have been added 353 # to the graph. 354 self._graph_trees = {} 355 # Caches visible packages returned from _select_package, for use in 356 # depgraph._iter_atoms_for_pkg() SLOT logic. 357 self._visible_pkgs = {} 358 #contains the args created by select_files 359 self._initial_arg_list = [] 360 self.digraph = portage.digraph() 361 # manages sets added to the graph 362 self.sets = {} 363 # contains all nodes pulled in by self.sets 364 self._set_nodes = set() 365 # Contains only Blocker -> Uninstall edges 366 self._blocker_uninstalls = digraph() 367 # Contains only Package -> Blocker edges 368 self._blocker_parents = digraph() 369 # Contains only irrelevant Package -> Blocker edges 370 self._irrelevant_blockers = digraph() 371 # Contains only unsolvable Package -> Blocker edges 372 self._unsolvable_blockers = digraph() 373 # Contains all Blocker -> Blocked Package edges 374 self._blocked_pkgs = digraph() 375 # Contains world packages that have been protected from 376 # uninstallation but may not have been added to the graph 377 # if the graph is not complete yet. 378 self._blocked_world_pkgs = {} 379 # Contains packages whose dependencies have been traversed. 380 # This use used to check if we have accounted for blockers 381 # relevant to a package. 382 self._traversed_pkg_deps = set() 383 self._parent_atoms = {} 384 self._slot_conflict_handler = None 385 self._circular_dependency_handler = None 386 self._serialized_tasks_cache = None 387 self._scheduler_graph = None 388 self._displayed_list = None 389 self._pprovided_args = [] 390 self._missing_args = [] 391 self._masked_installed = set() 392 self._masked_license_updates = set() 393 self._unsatisfied_deps_for_display = [] 394 self._unsatisfied_blockers_for_display = None 395 self._circular_deps_for_display = None 396 self._dep_stack = [] 397 self._dep_disjunctive_stack = [] 398 self._unsatisfied_deps = [] 399 self._initially_unsatisfied_deps = [] 400 self._ignored_deps = [] 401 self._highest_pkg_cache = {} 402 self._highest_pkg_cache_cp_map = {} 403 self._flatten_atoms_cache = {} 404 405 # Binary packages that have been rejected because their USE 406 # didn't match the user's config. It maps packages to a set 407 # of flags causing the rejection. 408 self.ignored_binaries = {} 409 410 self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords 411 self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes 412 self._needed_license_changes = backtrack_parameters.needed_license_changes 413 self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes 414 self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask 415 self._slot_operator_replace_installed = backtrack_parameters.slot_operator_replace_installed 416 self._prune_rebuilds = backtrack_parameters.prune_rebuilds 417 self._need_restart = False 418 # For conditions that always require user intervention, such as 419 # unsatisfied REQUIRED_USE (currently has no autounmask support). 420 self._skip_restart = False 421 self._backtrack_infos = {} 422 423 self._buildpkgonly_deps_unsatisfied = False 424 self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n' 425 self._success_without_autounmask = False 426 self._required_use_unsatisfied = False 427 self._traverse_ignored_deps = False 428 self._complete_mode = False 429 self._slot_operator_deps = {} 430 self._package_tracker = PackageTracker() 431 # Track missed updates caused by solved conflicts. 432 self._conflict_missed_update = collections.defaultdict(dict) 433 434 for myroot in depgraph._frozen_config.trees: 435 self.sets[myroot] = _depgraph_sets() 436 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi 437 # This dbapi instance will model the state that the vdb will 438 # have after new packages have been installed. 439 fakedb = PackageTrackerDbapiWrapper(myroot, self._package_tracker) 440 441 def graph_tree(): 442 pass
443 graph_tree.dbapi = fakedb 444 self._graph_trees[myroot] = {} 445 self._filtered_trees[myroot] = {} 446 # Substitute the graph tree for the vartree in dep_check() since we 447 # want atom selections to be consistent with package selections 448 # have already been made. 449 self._graph_trees[myroot]["porttree"] = graph_tree 450 self._graph_trees[myroot]["vartree"] = graph_tree 451 self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi 452 self._graph_trees[myroot]["graph"] = self.digraph 453 self._graph_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg 454 def filtered_tree(): 455 pass
456 filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot) 457 self._filtered_trees[myroot]["porttree"] = filtered_tree 458 self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings) 459 460 # Passing in graph_tree as the vartree here could lead to better 461 # atom selections in some cases by causing atoms for packages that 462 # have been added to the graph to be preferred over other choices. 463 # However, it can trigger atom selections that result in 464 # unresolvable direct circular dependencies. For example, this 465 # happens with gwydion-dylan which depends on either itself or 466 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed, 467 # gwydion-dylan-bin needs to be selected in order to avoid a 468 # an unresolvable direct circular dependency. 469 # 470 # To solve the problem described above, pass in "graph_db" so that 471 # packages that have been added to the graph are distinguishable 472 # from other available packages and installed packages. Also, pass 473 # the parent package into self._select_atoms() calls so that 474 # unresolvable direct circular dependencies can be detected and 475 # avoided when possible. 476 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi 477 self._filtered_trees[myroot]["graph"] = self.digraph 478 self._filtered_trees[myroot]["vartree"] = \ 479 depgraph._frozen_config.trees[myroot]["vartree"] 480 self._filtered_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg 481 482 dbs = [] 483 # (db, pkg_type, built, installed, db_keys) 484 if "remove" in self.myparams: 485 # For removal operations, use _dep_check_composite_db 486 # for availability and visibility checks. This provides 487 # consistency with install operations, so we don't 488 # get install/uninstall cycles like in bug #332719. 489 self._graph_trees[myroot]["porttree"] = filtered_tree 490 else: 491 if "--usepkgonly" not in depgraph._frozen_config.myopts: 492 portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi 493 db_keys = list(portdb._aux_cache_keys) 494 dbs.append((portdb, "ebuild", False, False, db_keys)) 495 496 if "--usepkg" in depgraph._frozen_config.myopts: 497 bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi 498 db_keys = list(bindb._aux_cache_keys) 499 dbs.append((bindb, "binary", True, False, db_keys)) 500 501 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi 502 db_keys = list(depgraph._frozen_config._trees_orig[myroot 503 ]["vartree"].dbapi._aux_cache_keys) 504 dbs.append((vardb, "installed", True, True, db_keys)) 505 self._filtered_trees[myroot]["dbs"] = dbs 506
507 -class depgraph(object):
508 509 # Represents the depth of a node that is unreachable from explicit 510 # user arguments (or their deep dependencies). Such nodes are pulled 511 # in by the _complete_graph method. 512 _UNREACHABLE_DEPTH = object() 513 514 pkg_tree_map = RootConfig.pkg_tree_map 515
516 - def __init__(self, settings, trees, myopts, myparams, spinner, 517 frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
518 if frozen_config is None: 519 frozen_config = _frozen_depgraph_config(settings, trees, 520 myopts, myparams, spinner) 521 self._frozen_config = frozen_config 522 self._dynamic_config = _dynamic_depgraph_config(self, myparams, 523 allow_backtracking, backtrack_parameters) 524 self._rebuild = _rebuild_config(frozen_config, backtrack_parameters) 525 526 self._select_atoms = self._select_atoms_highest_available 527 self._select_package = self._select_pkg_highest_available 528 529 self._event_loop = (portage._internal_caller and 530 global_event_loop() or EventLoop(main=False)) 531 532 self._select_atoms_parent = None 533 534 self.query = UserQuery(myopts).query
535
536 - def _load_vdb(self):
537 """ 538 Load installed package metadata if appropriate. This used to be called 539 from the constructor, but that wasn't very nice since this procedure 540 is slow and it generates spinner output. So, now it's called on-demand 541 by various methods when necessary. 542 """ 543 544 if self._dynamic_config._vdb_loaded: 545 return 546 547 for myroot in self._frozen_config.trees: 548 549 dynamic_deps = self._dynamic_config.myparams.get( 550 "dynamic_deps", "y") != "n" 551 preload_installed_pkgs = \ 552 "--nodeps" not in self._frozen_config.myopts 553 554 fake_vartree = self._frozen_config.trees[myroot]["vartree"] 555 if not fake_vartree.dbapi: 556 # This needs to be called for the first depgraph, but not for 557 # backtracking depgraphs that share the same frozen_config. 558 fake_vartree.sync() 559 560 # FakeVartree.sync() populates virtuals, and we want 561 # self.pkgsettings to have them populated too. 562 self._frozen_config.pkgsettings[myroot] = \ 563 portage.config(clone=fake_vartree.settings) 564 565 if preload_installed_pkgs: 566 vardb = fake_vartree.dbapi 567 568 if not dynamic_deps: 569 for pkg in vardb: 570 self._dynamic_config._package_tracker.add_installed_pkg(pkg) 571 else: 572 max_jobs = self._frozen_config.myopts.get("--jobs") 573 max_load = self._frozen_config.myopts.get("--load-average") 574 scheduler = TaskScheduler( 575 self._dynamic_deps_preload(fake_vartree), 576 max_jobs=max_jobs, 577 max_load=max_load, 578 event_loop=fake_vartree._portdb._event_loop) 579 scheduler.start() 580 scheduler.wait() 581 582 self._dynamic_config._vdb_loaded = True
583
584 - def _dynamic_deps_preload(self, fake_vartree):
585 portdb = fake_vartree._portdb 586 for pkg in fake_vartree.dbapi: 587 self._spinner_update() 588 self._dynamic_config._package_tracker.add_installed_pkg(pkg) 589 ebuild_path, repo_path = \ 590 portdb.findname2(pkg.cpv, myrepo=pkg.repo) 591 if ebuild_path is None: 592 fake_vartree.dynamic_deps_preload(pkg, None) 593 continue 594 metadata, ebuild_hash = portdb._pull_valid_cache( 595 pkg.cpv, ebuild_path, repo_path) 596 if metadata is not None: 597 fake_vartree.dynamic_deps_preload(pkg, metadata) 598 else: 599 proc = EbuildMetadataPhase(cpv=pkg.cpv, 600 ebuild_hash=ebuild_hash, 601 portdb=portdb, repo_path=repo_path, 602 settings=portdb.doebuild_settings) 603 proc.addExitListener( 604 self._dynamic_deps_proc_exit(pkg, fake_vartree)) 605 yield proc
606
607 - class _dynamic_deps_proc_exit(object):
608 609 __slots__ = ('_pkg', '_fake_vartree') 610
611 - def __init__(self, pkg, fake_vartree):
612 self._pkg = pkg 613 self._fake_vartree = fake_vartree
614
615 - def __call__(self, proc):
616 metadata = None 617 if proc.returncode == os.EX_OK: 618 metadata = proc.metadata 619 self._fake_vartree.dynamic_deps_preload(self._pkg, metadata)
620
621 - def _spinner_update(self):
622 if self._frozen_config.spinner: 623 self._frozen_config.spinner.update()
624
625 - def _compute_abi_rebuild_info(self):
626 """ 627 Fill self._forced_rebuilds with packages that cause rebuilds. 628 """ 629 630 debug = "--debug" in self._frozen_config.myopts 631 632 # Get all atoms that might have caused a forced rebuild. 633 atoms = {} 634 for s in self._dynamic_config._initial_arg_list: 635 if s.force_reinstall: 636 root = s.root_config.root 637 atoms.setdefault(root, set()).update(s.pset) 638 639 if debug: 640 writemsg_level("forced reinstall atoms:\n", 641 level=logging.DEBUG, noiselevel=-1) 642 643 for root in atoms: 644 writemsg_level(" root: %s\n" % root, 645 level=logging.DEBUG, noiselevel=-1) 646 for atom in atoms[root]: 647 writemsg_level(" atom: %s\n" % atom, 648 level=logging.DEBUG, noiselevel=-1) 649 writemsg_level("\n\n", 650 level=logging.DEBUG, noiselevel=-1) 651 652 # Go through all slot operator deps and check if one of these deps 653 # has a parent that is matched by one of the atoms from above. 654 forced_rebuilds = {} 655 656 for root, rebuild_atoms in atoms.items(): 657 658 for slot_atom in rebuild_atoms: 659 660 inst_pkg, reinst_pkg = \ 661 self._select_pkg_from_installed(root, slot_atom) 662 663 if inst_pkg is reinst_pkg or reinst_pkg is None: 664 continue 665 666 # Generate pseudo-deps for any slot-operator deps of 667 # inst_pkg. Its deps aren't in _slot_operator_deps 668 # because it hasn't been added to the graph, but we 669 # are interested in any rebuilds that it triggered. 670 built_slot_op_atoms = [] 671 if inst_pkg is not None: 672 selected_atoms = self._select_atoms_probe( 673 inst_pkg.root, inst_pkg) 674 for atom in selected_atoms: 675 if atom.slot_operator_built: 676 built_slot_op_atoms.append(atom) 677 678 if not built_slot_op_atoms: 679 continue 680 681 # Use a cloned list, since we may append to it below. 682 deps = self._dynamic_config._slot_operator_deps.get( 683 (root, slot_atom), [])[:] 684 685 if built_slot_op_atoms and reinst_pkg is not None: 686 for child in self._dynamic_config.digraph.child_nodes( 687 reinst_pkg): 688 689 if child.installed: 690 continue 691 692 for atom in built_slot_op_atoms: 693 # NOTE: Since atom comes from inst_pkg, and 694 # reinst_pkg is the replacement parent, there's 695 # no guarantee that atom will completely match 696 # child. So, simply use atom.cp and atom.slot 697 # for matching. 698 if atom.cp != child.cp: 699 continue 700 if atom.slot and atom.slot != child.slot: 701 continue 702 deps.append(Dependency(atom=atom, child=child, 703 root=child.root, parent=reinst_pkg)) 704 705 for dep in deps: 706 if dep.child.installed: 707 # Find the replacement child. 708 child = next((pkg for pkg in 709 self._dynamic_config._package_tracker.match( 710 dep.root, dep.child.slot_atom) 711 if not pkg.installed), None) 712 713 if child is None: 714 continue 715 716 inst_child = dep.child.installed 717 718 else: 719 child = dep.child 720 inst_child = self._select_pkg_from_installed( 721 child.root, child.slot_atom)[0] 722 723 # Make sure the child's slot/subslot has changed. If it 724 # hasn't, then another child has forced this rebuild. 725 if inst_child and inst_child.slot == child.slot and \ 726 inst_child.sub_slot == child.sub_slot: 727 continue 728 729 if dep.parent.installed: 730 # Find the replacement parent. 731 parent = next((pkg for pkg in 732 self._dynamic_config._package_tracker.match( 733 dep.parent.root, dep.parent.slot_atom) 734 if not pkg.installed), None) 735 736 if parent is None: 737 continue 738 739 else: 740 parent = dep.parent 741 742 # The child has forced a rebuild of the parent 743 forced_rebuilds.setdefault(root, {} 744 ).setdefault(child, set()).add(parent) 745 746 if debug: 747 writemsg_level("slot operator dependencies:\n", 748 level=logging.DEBUG, noiselevel=-1) 749 750 for (root, slot_atom), deps in self._dynamic_config._slot_operator_deps.items(): 751 writemsg_level(" (%s, %s)\n" % \ 752 (root, slot_atom), level=logging.DEBUG, noiselevel=-1) 753 for dep in deps: 754 writemsg_level(" parent: %s\n" % dep.parent, level=logging.DEBUG, noiselevel=-1) 755 writemsg_level(" child: %s (%s)\n" % (dep.child, dep.priority), level=logging.DEBUG, noiselevel=-1) 756 757 writemsg_level("\n\n", 758 level=logging.DEBUG, noiselevel=-1) 759 760 761 writemsg_level("forced rebuilds:\n", 762 level=logging.DEBUG, noiselevel=-1) 763 764 for root in forced_rebuilds: 765 writemsg_level(" root: %s\n" % root, 766 level=logging.DEBUG, noiselevel=-1) 767 for child in forced_rebuilds[root]: 768 writemsg_level(" child: %s\n" % child, 769 level=logging.DEBUG, noiselevel=-1) 770 for parent in forced_rebuilds[root][child]: 771 writemsg_level(" parent: %s\n" % parent, 772 level=logging.DEBUG, noiselevel=-1) 773 writemsg_level("\n\n", 774 level=logging.DEBUG, noiselevel=-1) 775 776 self._forced_rebuilds = forced_rebuilds
777
778 - def _show_abi_rebuild_info(self):
779 780 if not self._forced_rebuilds: 781 return 782 783 writemsg_stdout("\nThe following packages are causing rebuilds:\n\n", noiselevel=-1) 784 785 for root in self._forced_rebuilds: 786 for child in self._forced_rebuilds[root]: 787 writemsg_stdout(" %s causes rebuilds for:\n" % (child,), noiselevel=-1) 788 for parent in self._forced_rebuilds[root][child]: 789 writemsg_stdout(" %s\n" % (parent,), noiselevel=-1)
790
791 - def _show_ignored_binaries(self):
792 """ 793 Show binaries that have been ignored because their USE didn't 794 match the user's config. 795 """ 796 if not self._dynamic_config.ignored_binaries \ 797 or '--quiet' in self._frozen_config.myopts \ 798 or self._dynamic_config.myparams.get( 799 "binpkg_respect_use") in ("y", "n"): 800 return 801 802 for pkg in list(self._dynamic_config.ignored_binaries): 803 804 selected_pkg = list() 805 806 for selected_pkg in self._dynamic_config._package_tracker.match( 807 pkg.root, pkg.slot_atom): 808 809 if selected_pkg > pkg: 810 self._dynamic_config.ignored_binaries.pop(pkg) 811 break 812 813 if selected_pkg.installed and \ 814 selected_pkg.cpv == pkg.cpv and \ 815 selected_pkg.build_time == pkg.build_time: 816 # We don't care about ignored binaries when an 817 # identical installed instance is selected to 818 # fill the slot. 819 self._dynamic_config.ignored_binaries.pop(pkg) 820 break 821 822 if not self._dynamic_config.ignored_binaries: 823 return 824 825 self._show_merge_list() 826 827 writemsg("\n!!! The following binary packages have been ignored " + \ 828 "due to non matching USE:\n\n", noiselevel=-1) 829 830 for pkg, flags in self._dynamic_config.ignored_binaries.items(): 831 flag_display = [] 832 for flag in sorted(flags): 833 if flag not in pkg.use.enabled: 834 flag = "-" + flag 835 flag_display.append(flag) 836 flag_display = " ".join(flag_display) 837 # The user can paste this line into package.use 838 writemsg(" =%s %s" % (pkg.cpv, flag_display), noiselevel=-1) 839 if pkg.root_config.settings["ROOT"] != "/": 840 writemsg(" # for %s" % (pkg.root,), noiselevel=-1) 841 writemsg("\n", noiselevel=-1) 842 843 msg = [ 844 "", 845 "NOTE: The --binpkg-respect-use=n option will prevent emerge", 846 " from ignoring these binary packages if possible.", 847 " Using --binpkg-respect-use=y will silence this warning." 848 ] 849 850 for line in msg: 851 if line: 852 line = colorize("INFORM", line) 853 writemsg(line + "\n", noiselevel=-1)
854
855 - def _get_missed_updates(self):
856 857 # In order to minimize noise, show only the highest 858 # missed update from each SLOT. 859 missed_updates = {} 860 for pkg, mask_reasons in \ 861 chain(self._dynamic_config._runtime_pkg_mask.items(), 862 self._dynamic_config._conflict_missed_update.items()): 863 if pkg.installed: 864 # Exclude installed here since we only 865 # want to show available updates. 866 continue 867 missed_update = True 868 any_selected = False 869 for chosen_pkg in self._dynamic_config._package_tracker.match( 870 pkg.root, pkg.slot_atom): 871 any_selected = True 872 if chosen_pkg > pkg or (not chosen_pkg.installed and \ 873 chosen_pkg.version == pkg.version): 874 missed_update = False 875 break 876 if any_selected and missed_update: 877 k = (pkg.root, pkg.slot_atom) 878 if k in missed_updates: 879 other_pkg, mask_type, parent_atoms = missed_updates[k] 880 if other_pkg > pkg: 881 continue 882 for mask_type, parent_atoms in mask_reasons.items(): 883 if not parent_atoms: 884 continue 885 missed_updates[k] = (pkg, mask_type, parent_atoms) 886 break 887 888 return missed_updates
889
890 - def _show_missed_update(self):
891 892 missed_updates = self._get_missed_updates() 893 894 if not missed_updates: 895 return 896 897 missed_update_types = {} 898 for pkg, mask_type, parent_atoms in missed_updates.values(): 899 missed_update_types.setdefault(mask_type, 900 []).append((pkg, parent_atoms)) 901 902 if '--quiet' in self._frozen_config.myopts and \ 903 '--debug' not in self._frozen_config.myopts: 904 missed_update_types.pop("slot conflict", None) 905 missed_update_types.pop("missing dependency", None) 906 907 self._show_missed_update_slot_conflicts( 908 missed_update_types.get("slot conflict")) 909 910 self._show_missed_update_unsatisfied_dep( 911 missed_update_types.get("missing dependency"))
912
913 - def _show_missed_update_unsatisfied_dep(self, missed_updates):
914 915 if not missed_updates: 916 return 917 918 self._show_merge_list() 919 backtrack_masked = [] 920 921 for pkg, parent_atoms in missed_updates: 922 923 try: 924 for parent, root, atom in parent_atoms: 925 self._show_unsatisfied_dep(root, atom, myparent=parent, 926 check_backtrack=True) 927 except self._backtrack_mask: 928 # This is displayed below in abbreviated form. 929 backtrack_masked.append((pkg, parent_atoms)) 930 continue 931 932 writemsg("\n!!! The following update has been skipped " + \ 933 "due to unsatisfied dependencies:\n\n", noiselevel=-1) 934 935 writemsg(str(pkg.slot_atom), noiselevel=-1) 936 if pkg.root_config.settings["ROOT"] != "/": 937 writemsg(" for %s" % (pkg.root,), noiselevel=-1) 938 writemsg("\n", noiselevel=-1) 939 940 for parent, root, atom in parent_atoms: 941 self._show_unsatisfied_dep(root, atom, myparent=parent) 942 writemsg("\n", noiselevel=-1) 943 944 if backtrack_masked: 945 # These are shown in abbreviated form, in order to avoid terminal 946 # flooding from mask messages as reported in bug #285832. 947 writemsg("\n!!! The following update(s) have been skipped " + \ 948 "due to unsatisfied dependencies\n" + \ 949 "!!! triggered by backtracking:\n\n", noiselevel=-1) 950 for pkg, parent_atoms in backtrack_masked: 951 writemsg(str(pkg.slot_atom), noiselevel=-1) 952 if pkg.root_config.settings["ROOT"] != "/": 953 writemsg(" for %s" % (pkg.root,), noiselevel=-1) 954 writemsg("\n", noiselevel=-1)
955
956 - def _show_missed_update_slot_conflicts(self, missed_updates):
957 958 if not missed_updates: 959 return 960 961 self._show_merge_list() 962 msg = [] 963 msg.append("\nWARNING: One or more updates/rebuilds have been " + \ 964 "skipped due to a dependency conflict:\n\n") 965 966 indent = " " 967 for pkg, parent_atoms in missed_updates: 968 msg.append(str(pkg.slot_atom)) 969 if pkg.root_config.settings["ROOT"] != "/": 970 msg.append(" for %s" % (pkg.root,)) 971 msg.append("\n\n") 972 973 msg.append(indent) 974 msg.append(str(pkg)) 975 msg.append(" conflicts with\n") 976 977 for parent, atom in parent_atoms: 978 if isinstance(parent, 979 (PackageArg, AtomArg)): 980 # For PackageArg and AtomArg types, it's 981 # redundant to display the atom attribute. 982 msg.append(2*indent) 983 msg.append(str(parent)) 984 msg.append("\n") 985 else: 986 # Display the specific atom from SetArg or 987 # Package types. 988 atom, marker = format_unmatched_atom( 989 pkg, atom, self._pkg_use_enabled) 990 991 msg.append(2*indent) 992 msg.append("%s required by %s\n" % (atom, parent)) 993 msg.append(2*indent) 994 msg.append(marker) 995 msg.append("\n") 996 msg.append("\n") 997 998 writemsg("".join(msg), noiselevel=-1)
999
1001 """Show an informational message advising the user to mask one of the 1002 the packages. In some cases it may be possible to resolve this 1003 automatically, but support for backtracking (removal nodes that have 1004 already been selected) will be required in order to handle all possible 1005 cases. 1006 """ 1007 1008 if not any(self._dynamic_config._package_tracker.slot_conflicts()): 1009 return 1010 1011 self._show_merge_list() 1012 1013 self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self) 1014 handler = self._dynamic_config._slot_conflict_handler 1015 1016 conflict = handler.get_conflict() 1017 writemsg(conflict, noiselevel=-1) 1018 1019 explanation = handler.get_explanation() 1020 if explanation: 1021 writemsg(explanation, noiselevel=-1) 1022 return 1023 1024 if "--quiet" in self._frozen_config.myopts: 1025 return 1026 1027 msg = [] 1028 msg.append("It may be possible to solve this problem ") 1029 msg.append("by using package.mask to prevent one of ") 1030 msg.append("those packages from being selected. ") 1031 msg.append("However, it is also possible that conflicting ") 1032 msg.append("dependencies exist such that they are impossible to ") 1033 msg.append("satisfy simultaneously. If such a conflict exists in ") 1034 msg.append("the dependencies of two different packages, then those ") 1035 msg.append("packages can not be installed simultaneously.") 1036 backtrack_opt = self._frozen_config.myopts.get('--backtrack') 1037 if not self._dynamic_config._allow_backtracking and \ 1038 (backtrack_opt is None or \ 1039 (backtrack_opt > 0 and backtrack_opt < 30)): 1040 msg.append(" You may want to try a larger value of the ") 1041 msg.append("--backtrack option, such as --backtrack=30, ") 1042 msg.append("in order to see if that will solve this conflict ") 1043 msg.append("automatically.") 1044 1045 for line in textwrap.wrap(''.join(msg), 70): 1046 writemsg(line + '\n', noiselevel=-1) 1047 writemsg('\n', noiselevel=-1) 1048 1049 msg = [] 1050 msg.append("For more information, see MASKED PACKAGES ") 1051 msg.append("section in the emerge man page or refer ") 1052 msg.append("to the Gentoo Handbook.") 1053 for line in textwrap.wrap(''.join(msg), 70): 1054 writemsg(line + '\n', noiselevel=-1) 1055 writemsg('\n', noiselevel=-1)
1056
1058 """ 1059 This function solves slot conflicts which can 1060 be solved by simply choosing one of the conflicting 1061 and removing all the other ones. 1062 It is able to solve somewhat more complex cases where 1063 conflicts can only be solved simultaniously. 1064 """ 1065 debug = "--debug" in self._frozen_config.myopts 1066 1067 # List all conflicts. Ignore those that involve slot operator rebuilds 1068 # as the logic there needs special slot conflict behavior which isn't 1069 # provided by this function. 1070 conflicts = [] 1071 for conflict in self._dynamic_config._package_tracker.slot_conflicts(): 1072 slot_key = conflict.root, conflict.atom 1073 if slot_key not in self._dynamic_config._slot_operator_replace_installed: 1074 conflicts.append(conflict) 1075 1076 if not conflicts: 1077 return 1078 1079 if debug: 1080 writemsg_level( 1081 "\n!!! Slot conflict handler started.\n", 1082 level=logging.DEBUG, noiselevel=-1) 1083 1084 # Get a set of all conflicting packages. 1085 conflict_pkgs = set() 1086 for conflict in conflicts: 1087 conflict_pkgs.update(conflict) 1088 1089 # Get the list of other packages which are only 1090 # required by conflict packages. 1091 indirect_conflict_candidates = set() 1092 for pkg in conflict_pkgs: 1093 indirect_conflict_candidates.update(self._dynamic_config.digraph.child_nodes(pkg)) 1094 indirect_conflict_candidates.difference_update(conflict_pkgs) 1095 1096 indirect_conflict_pkgs = set() 1097 while indirect_conflict_candidates: 1098 pkg = indirect_conflict_candidates.pop() 1099 1100 only_conflict_parents = True 1101 for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []): 1102 if parent not in conflict_pkgs and parent not in indirect_conflict_pkgs: 1103 only_conflict_parents = False 1104 break 1105 if not only_conflict_parents: 1106 continue 1107 1108 indirect_conflict_pkgs.add(pkg) 1109 for child in self._dynamic_config.digraph.child_nodes(pkg): 1110 if child in conflict_pkgs or child in indirect_conflict_pkgs: 1111 continue 1112 indirect_conflict_candidates.add(child) 1113 1114 # Create a graph containing the conflict packages 1115 # and a special 'non_conflict_node' that represents 1116 # all non-conflict packages. 1117 conflict_graph = digraph() 1118 1119 non_conflict_node = "(non-conflict package)" 1120 conflict_graph.add(non_conflict_node, None) 1121 1122 for pkg in chain(conflict_pkgs, indirect_conflict_pkgs): 1123 conflict_graph.add(pkg, None) 1124 1125 # Add parent->child edges for each conflict package. 1126 # Parents, which aren't conflict packages are represented 1127 # by 'non_conflict_node'. 1128 # If several conflicting packages are matched, but not all, 1129 # add a tuple with the matched packages to the graph. 1130 class or_tuple(tuple): 1131 """ 1132 Helper class for debug printing. 1133 """ 1134 def __str__(self): 1135 return "(%s)" % ",".join(str(pkg) for pkg in self)
1136 1137 non_matching_forced = set() 1138 for conflict in conflicts: 1139 if debug: 1140 writemsg_level(" conflict:\n", level=logging.DEBUG, noiselevel=-1) 1141 writemsg_level(" root: %s\n" % conflict.root, level=logging.DEBUG, noiselevel=-1) 1142 writemsg_level(" atom: %s\n" % conflict.atom, level=logging.DEBUG, noiselevel=-1) 1143 for pkg in conflict: 1144 writemsg_level(" pkg: %s\n" % pkg, level=logging.DEBUG, noiselevel=-1) 1145 1146 all_parent_atoms = set() 1147 for pkg in conflict: 1148 all_parent_atoms.update( 1149 self._dynamic_config._parent_atoms.get(pkg, [])) 1150 1151 for parent, atom in all_parent_atoms: 1152 is_arg_parent = isinstance(parent, AtomArg) 1153 is_non_conflict_parent = parent not in conflict_pkgs and \ 1154 parent not in indirect_conflict_pkgs 1155 1156 if debug: 1157 writemsg_level(" parent: %s\n" % parent, level=logging.DEBUG, noiselevel=-1) 1158 writemsg_level(" arg, non-conflict: %s, %s\n" % (is_arg_parent, is_non_conflict_parent), 1159 level=logging.DEBUG, noiselevel=-1) 1160 writemsg_level(" atom: %s\n" % atom, level=logging.DEBUG, noiselevel=-1) 1161 1162 if is_non_conflict_parent: 1163 parent = non_conflict_node 1164 1165 atom_set = InternalPackageSet( 1166 initial_atoms=(atom,), allow_repo=True) 1167 1168 matched = [] 1169 for pkg in conflict: 1170 if atom_set.findAtomForPackage(pkg, \ 1171 modified_use=self._pkg_use_enabled(pkg)) and \ 1172 not (is_arg_parent and pkg.installed): 1173 matched.append(pkg) 1174 1175 if debug: 1176 for match in matched: 1177 writemsg_level(" match: %s\n" % match, level=logging.DEBUG, noiselevel=-1) 1178 1179 if len(matched) > 1: 1180 # Even if all packages match, this parent must still 1181 # be added to the conflict_graph. Otherwise, we risk 1182 # removing all of these packages from the depgraph, 1183 # which could cause a missed update (bug #522084). 1184 conflict_graph.add(or_tuple(matched), parent) 1185 elif len(matched) == 1: 1186 conflict_graph.add(matched[0], parent) 1187 else: 1188 # This typically means that autounmask broke a 1189 # USE-dep, but it could also be due to the slot 1190 # not matching due to multislot (bug #220341). 1191 # Either way, don't try to solve this conflict. 1192 # Instead, force them all into the graph so that 1193 # they are protected from removal. 1194 non_matching_forced.update(conflict) 1195 if debug: 1196 for pkg in conflict: 1197 writemsg_level(" non-match: %s\n" % pkg, 1198 level=logging.DEBUG, noiselevel=-1) 1199 1200 for pkg in indirect_conflict_pkgs: 1201 for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []): 1202 if parent not in conflict_pkgs and \ 1203 parent not in indirect_conflict_pkgs: 1204 parent = non_conflict_node 1205 conflict_graph.add(pkg, parent) 1206 1207 if debug: 1208 writemsg_level( 1209 "\n!!! Slot conflict graph:\n", 1210 level=logging.DEBUG, noiselevel=-1) 1211 conflict_graph.debug_print() 1212 1213 # Now select required packages. Collect them in the 1214 # 'forced' set. 1215 forced = set([non_conflict_node]) 1216 forced.update(non_matching_forced) 1217 unexplored = set([non_conflict_node]) 1218 # or_tuples get special handling. We first explore 1219 # all packages in the hope of having forced one of 1220 # the packages in the tuple. This way we don't have 1221 # to choose one. 1222 unexplored_tuples = set() 1223 1224 while unexplored: 1225 # Handle all unexplored packages. 1226 while unexplored: 1227 node = unexplored.pop() 1228 for child in conflict_graph.child_nodes(node): 1229 if child in forced: 1230 continue 1231 forced.add(child) 1232 if isinstance(child, Package): 1233 unexplored.add(child) 1234 else: 1235 unexplored_tuples.add(child) 1236 1237 # Now handle unexplored or_tuples. Move on with packages 1238 # once we had to choose one. 1239 while unexplored_tuples: 1240 nodes = unexplored_tuples.pop() 1241 if any(node in forced for node in nodes): 1242 # At least one of the packages in the 1243 # tuple is already forced, which means the 1244 # dependency represented by this tuple 1245 # is satisfied. 1246 continue 1247 1248 # We now have to choose one of packages in the tuple. 1249 # In theory one could solve more conflicts if we'd be 1250 # able to try different choices here, but that has lots 1251 # of other problems. For now choose the package that was 1252 # pulled first, as this should be the most desirable choice 1253 # (otherwise it wouldn't have been the first one). 1254 forced.add(nodes[0]) 1255 unexplored.add(nodes[0]) 1256 break 1257 1258 # Remove 'non_conflict_node' and or_tuples from 'forced'. 1259 forced = set(pkg for pkg in forced if isinstance(pkg, Package)) 1260 non_forced = set(pkg for pkg in conflict_pkgs if pkg not in forced) 1261 1262 if debug: 1263 writemsg_level( 1264 "\n!!! Slot conflict solution:\n", 1265 level=logging.DEBUG, noiselevel=-1) 1266 for conflict in conflicts: 1267 writemsg_level( 1268 " Conflict: (%s, %s)\n" % (conflict.root, conflict.atom), 1269 level=logging.DEBUG, noiselevel=-1) 1270 for pkg in conflict: 1271 if pkg in forced: 1272 writemsg_level( 1273 " keep: %s\n" % pkg, 1274 level=logging.DEBUG, noiselevel=-1) 1275 else: 1276 writemsg_level( 1277 " remove: %s\n" % pkg, 1278 level=logging.DEBUG, noiselevel=-1) 1279 1280 broken_packages = set() 1281 for pkg in non_forced: 1282 for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []): 1283 if isinstance(parent, Package) and parent not in non_forced: 1284 # Non-forcing set args are expected to be a parent of all 1285 # packages in the conflict. 1286 broken_packages.add(parent) 1287 self._remove_pkg(pkg) 1288 1289 # Process the dependencies of choosen conflict packages 1290 # again to properly account for blockers. 1291 broken_packages.update(forced) 1292 1293 # Filter out broken packages which have been removed during 1294 # recursive removal in self._remove_pkg. 1295 broken_packages = list(pkg for pkg in broken_packages if pkg in broken_packages \ 1296 if self._dynamic_config._package_tracker.contains(pkg, installed=False)) 1297 1298 self._dynamic_config._dep_stack.extend(broken_packages) 1299 1300 if broken_packages: 1301 # Process dependencies. This cannot fail because we just ensured that 1302 # the remaining packages satisfy all dependencies. 1303 self._create_graph() 1304 1305 # Record missed updates. 1306 for conflict in conflicts: 1307 if not any(pkg in non_forced for pkg in conflict): 1308 continue 1309 for pkg in conflict: 1310 if pkg not in non_forced: 1311 continue 1312 1313 for other in conflict: 1314 if other is pkg: 1315 continue 1316 1317 for parent, atom in self._dynamic_config._parent_atoms.get(other, []): 1318 atom_set = InternalPackageSet( 1319 initial_atoms=(atom,), allow_repo=True) 1320 if not atom_set.findAtomForPackage(pkg, 1321 modified_use=self._pkg_use_enabled(pkg)): 1322 self._dynamic_config._conflict_missed_update[pkg].setdefault( 1323 "slot conflict", set()) 1324 self._dynamic_config._conflict_missed_update[pkg]["slot conflict"].add( 1325 (parent, atom)) 1326 1327
1328 - def _process_slot_conflicts(self):
1329 """ 1330 If there are any slot conflicts and backtracking is enabled, 1331 _complete_graph should complete the graph before this method 1332 is called, so that all relevant reverse dependencies are 1333 available for use in backtracking decisions. 1334 """ 1335 1336 self._solve_non_slot_operator_slot_conflicts() 1337 1338 for conflict in self._dynamic_config._package_tracker.slot_conflicts(): 1339 self._process_slot_conflict(conflict)
1340
1341 - def _process_slot_conflict(self, conflict):
1342 """ 1343 Process slot conflict data to identify specific atoms which 1344 lead to conflict. These atoms only match a subset of the 1345 packages that have been pulled into a given slot. 1346 """ 1347 root = conflict.root 1348 slot_atom = conflict.atom 1349 slot_nodes = conflict.pkgs 1350 1351 debug = "--debug" in self._frozen_config.myopts 1352 1353 slot_parent_atoms = set() 1354 for pkg in slot_nodes: 1355 parent_atoms = self._dynamic_config._parent_atoms.get(pkg) 1356 if not parent_atoms: 1357 continue 1358 slot_parent_atoms.update(parent_atoms) 1359 1360 conflict_pkgs = [] 1361 conflict_atoms = {} 1362 for pkg in slot_nodes: 1363 1364 if self._dynamic_config._allow_backtracking and \ 1365 pkg in self._dynamic_config._runtime_pkg_mask: 1366 if debug: 1367 writemsg_level( 1368 "!!! backtracking loop detected: %s %s\n" % \ 1369 (pkg, 1370 self._dynamic_config._runtime_pkg_mask[pkg]), 1371 level=logging.DEBUG, noiselevel=-1) 1372 1373 parent_atoms = self._dynamic_config._parent_atoms.get(pkg) 1374 if parent_atoms is None: 1375 parent_atoms = set() 1376 self._dynamic_config._parent_atoms[pkg] = parent_atoms 1377 1378 all_match = True 1379 for parent_atom in slot_parent_atoms: 1380 if parent_atom in parent_atoms: 1381 continue 1382 # Use package set for matching since it will match via 1383 # PROVIDE when necessary, while match_from_list does not. 1384 parent, atom = parent_atom 1385 atom_set = InternalPackageSet( 1386 initial_atoms=(atom,), allow_repo=True) 1387 if atom_set.findAtomForPackage(pkg, 1388 modified_use=self._pkg_use_enabled(pkg)): 1389 parent_atoms.add(parent_atom) 1390 else: 1391 all_match = False 1392 conflict_atoms.setdefault(parent_atom, set()).add(pkg) 1393 1394 if not all_match: 1395 conflict_pkgs.append(pkg) 1396 1397 if conflict_pkgs and \ 1398 self._dynamic_config._allow_backtracking and \ 1399 not self._accept_blocker_conflicts(): 1400 remaining = [] 1401 for pkg in conflict_pkgs: 1402 if self._slot_conflict_backtrack_abi(pkg, 1403 slot_nodes, conflict_atoms): 1404 backtrack_infos = self._dynamic_config._backtrack_infos 1405 config = backtrack_infos.setdefault("config", {}) 1406 config.setdefault("slot_conflict_abi", set()).add(pkg) 1407 else: 1408 remaining.append(pkg) 1409 if remaining: 1410 self._slot_confict_backtrack(root, slot_atom, 1411 slot_parent_atoms, remaining)
1412
1413 - def _slot_confict_backtrack(self, root, slot_atom, 1414 all_parents, conflict_pkgs):
1415 1416 debug = "--debug" in self._frozen_config.myopts 1417 existing_node = next(self._dynamic_config._package_tracker.match( 1418 root, slot_atom, installed=False)) 1419 # In order to avoid a missed update, first mask lower versions 1420 # that conflict with higher versions (the backtracker visits 1421 # these in reverse order). 1422 conflict_pkgs.sort(reverse=True) 1423 backtrack_data = [] 1424 for to_be_masked in conflict_pkgs: 1425 # For missed update messages, find out which 1426 # atoms matched to_be_selected that did not 1427 # match to_be_masked. 1428 parent_atoms = \ 1429 self._dynamic_config._parent_atoms.get(to_be_masked, set()) 1430 conflict_atoms = set(parent_atom for parent_atom in all_parents \ 1431 if parent_atom not in parent_atoms) 1432 backtrack_data.append((to_be_masked, conflict_atoms)) 1433 1434 to_be_masked = backtrack_data[-1][0] 1435 1436 self._dynamic_config._backtrack_infos.setdefault( 1437 "slot conflict", []).append(backtrack_data) 1438 self._dynamic_config._need_restart = True 1439 if debug: 1440 msg = [] 1441 msg.append("") 1442 msg.append("") 1443 msg.append("backtracking due to slot conflict:") 1444 msg.append(" first package: %s" % existing_node) 1445 msg.append(" package to mask: %s" % to_be_masked) 1446 msg.append(" slot: %s" % slot_atom) 1447 msg.append(" parents: %s" % ", ".join( \ 1448 "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents)) 1449 msg.append("") 1450 writemsg_level("".join("%s\n" % l for l in msg), 1451 noiselevel=-1, level=logging.DEBUG)
1452
1453 - def _slot_conflict_backtrack_abi(self, pkg, slot_nodes, conflict_atoms):
1454 """ 1455 If one or more conflict atoms have a slot/sub-slot dep that can be resolved 1456 by rebuilding the parent package, then schedule the rebuild via 1457 backtracking, and return True. Otherwise, return False. 1458 """ 1459 1460 found_update = False 1461 for parent_atom, conflict_pkgs in conflict_atoms.items(): 1462 parent, atom = parent_atom 1463 1464 if not isinstance(parent, Package): 1465 continue 1466 1467 if atom.slot_operator != "=" or not parent.built: 1468 continue 1469 1470 if pkg not in conflict_pkgs: 1471 continue 1472 1473 for other_pkg in slot_nodes: 1474 if other_pkg in conflict_pkgs: 1475 continue 1476 1477 dep = Dependency(atom=atom, child=other_pkg, 1478 parent=parent, root=pkg.root) 1479 1480 new_dep = \ 1481 self._slot_operator_update_probe_slot_conflict(dep) 1482 if new_dep is not None: 1483 self._slot_operator_update_backtrack(dep, 1484 new_dep=new_dep) 1485 found_update = True 1486 1487 return found_update
1488
1489 - def _slot_change_probe(self, dep):
1490 """ 1491 @rtype: bool 1492 @return: True if dep.child should be rebuilt due to a change 1493 in sub-slot (without revbump, as in bug #456208). 1494 """ 1495 if not (isinstance(dep.parent, Package) and \ 1496 not dep.parent.built and dep.child.built): 1497 return None 1498 1499 root_config = self._frozen_config.roots[dep.root] 1500 matches = [] 1501 try: 1502 matches.append(self._pkg(dep.child.cpv, "ebuild", 1503 root_config, myrepo=dep.child.repo)) 1504 except PackageNotFound: 1505 pass 1506 1507 for unbuilt_child in chain(matches, 1508 self._iter_match_pkgs(root_config, "ebuild", 1509 Atom("=%s" % (dep.child.cpv,)))): 1510 if unbuilt_child in self._dynamic_config._runtime_pkg_mask: 1511 continue 1512 if self._frozen_config.excluded_pkgs.findAtomForPackage( 1513 unbuilt_child, 1514 modified_use=self._pkg_use_enabled(unbuilt_child)): 1515 continue 1516 if not self._pkg_visibility_check(unbuilt_child): 1517 continue 1518 break 1519 else: 1520 return None 1521 1522 if unbuilt_child.slot == dep.child.slot and \ 1523 unbuilt_child.sub_slot == dep.child.sub_slot: 1524 return None 1525 1526 return unbuilt_child
1527
1528 - def _slot_change_backtrack(self, dep, new_child_slot):
1529 child = dep.child 1530 if "--debug" in self._frozen_config.myopts: 1531 msg = [] 1532 msg.append("") 1533 msg.append("") 1534 msg.append("backtracking due to slot/sub-slot change:") 1535 msg.append(" child package: %s" % child) 1536 msg.append(" child slot: %s/%s" % 1537 (child.slot, child.sub_slot)) 1538 msg.append(" new child: %s" % new_child_slot) 1539 msg.append(" new child slot: %s/%s" % 1540 (new_child_slot.slot, new_child_slot.sub_slot)) 1541 msg.append(" parent package: %s" % dep.parent) 1542 msg.append(" atom: %s" % dep.atom) 1543 msg.append("") 1544 writemsg_level("\n".join(msg), 1545 noiselevel=-1, level=logging.DEBUG) 1546 backtrack_infos = self._dynamic_config._backtrack_infos 1547 config = backtrack_infos.setdefault("config", {}) 1548 1549 # mask unwanted binary packages if necessary 1550 masks = {} 1551 if not child.installed: 1552 masks.setdefault(dep.child, {})["slot_operator_mask_built"] = None 1553 if masks: 1554 config.setdefault("slot_operator_mask_built", {}).update(masks) 1555 1556 # trigger replacement of installed packages if necessary 1557 reinstalls = set() 1558 if child.installed: 1559 replacement_atom = self._replace_installed_atom(child) 1560 if replacement_atom is not None: 1561 reinstalls.add((child.root, replacement_atom)) 1562 if reinstalls: 1563 config.setdefault("slot_operator_replace_installed", 1564 set()).update(reinstalls) 1565 1566 self._dynamic_config._need_restart = True
1567
1568 - def _slot_operator_update_backtrack(self, dep, new_child_slot=None, 1569 new_dep=None):
1570 if new_child_slot is None: 1571 child = dep.child 1572 else: 1573 child = new_child_slot 1574 if "--debug" in self._frozen_config.myopts: 1575 msg = [] 1576 msg.append("") 1577 msg.append("") 1578 msg.append("backtracking due to missed slot abi update:") 1579 msg.append(" child package: %s" % child) 1580 if new_child_slot is not None: 1581 msg.append(" new child slot package: %s" % new_child_slot) 1582 msg.append(" parent package: %s" % dep.parent) 1583 if new_dep is not None: 1584 msg.append(" new parent pkg: %s" % new_dep.parent) 1585 msg.append(" atom: %s" % dep.atom) 1586 msg.append("") 1587 writemsg_level("\n".join(msg), 1588 noiselevel=-1, level=logging.DEBUG) 1589 backtrack_infos = self._dynamic_config._backtrack_infos 1590 config = backtrack_infos.setdefault("config", {}) 1591 1592 # mask unwanted binary packages if necessary 1593 abi_masks = {} 1594 if new_child_slot is None: 1595 if not child.installed: 1596 abi_masks.setdefault(child, {})["slot_operator_mask_built"] = None 1597 if not dep.parent.installed: 1598 abi_masks.setdefault(dep.parent, {})["slot_operator_mask_built"] = None 1599 if abi_masks: 1600 config.setdefault("slot_operator_mask_built", {}).update(abi_masks) 1601 1602 # trigger replacement of installed packages if necessary 1603 abi_reinstalls = set() 1604 if dep.parent.installed: 1605 if new_dep is not None: 1606 replacement_atom = new_dep.parent.slot_atom 1607 else: 1608 replacement_atom = self._replace_installed_atom(dep.parent) 1609 if replacement_atom is not None: 1610 abi_reinstalls.add((dep.parent.root, replacement_atom)) 1611 if new_child_slot is None and child.installed: 1612 replacement_atom = self._replace_installed_atom(child) 1613 if replacement_atom is not None: 1614 abi_reinstalls.add((child.root, replacement_atom)) 1615 if abi_reinstalls: 1616 config.setdefault("slot_operator_replace_installed", 1617 set()).update(abi_reinstalls) 1618 1619 self._dynamic_config._need_restart = True
1620
1621 - def _slot_operator_update_probe_slot_conflict(self, dep):
1622 new_dep = self._slot_operator_update_probe(dep, slot_conflict=True) 1623 1624 if new_dep is not None: 1625 return new_dep 1626 1627 if self._dynamic_config._autounmask is True: 1628 1629 for autounmask_level in self._autounmask_levels(): 1630 1631 new_dep = self._slot_operator_update_probe(dep, 1632 slot_conflict=True, autounmask_level=autounmask_level) 1633 1634 if new_dep is not None: 1635 return new_dep 1636 1637 return None
1638
1639 - def _slot_operator_update_probe(self, dep, new_child_slot=False, 1640 slot_conflict=False, autounmask_level=None):
1641 """ 1642 slot/sub-slot := operators tend to prevent updates from getting pulled in, 1643 since installed packages pull in packages with the slot/sub-slot that they 1644 were built against. Detect this case so that we can schedule rebuilds 1645 and reinstalls when appropriate. 1646 NOTE: This function only searches for updates that involve upgrades 1647 to higher versions, since the logic required to detect when a 1648 downgrade would be desirable is not implemented. 1649 """ 1650 1651 if dep.child.installed and \ 1652 self._frozen_config.excluded_pkgs.findAtomForPackage(dep.child, 1653 modified_use=self._pkg_use_enabled(dep.child)): 1654 return None 1655 1656 if dep.parent.installed and \ 1657 self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent, 1658 modified_use=self._pkg_use_enabled(dep.parent)): 1659 return None 1660 1661 debug = "--debug" in self._frozen_config.myopts 1662 selective = "selective" in self._dynamic_config.myparams 1663 want_downgrade = None 1664 want_downgrade_parent = None 1665 1666 def check_reverse_dependencies(existing_pkg, candidate_pkg, 1667 replacement_parent=None): 1668 """ 1669 Check if candidate_pkg satisfies all of existing_pkg's non- 1670 slot operator parents. 1671 """ 1672 built_slot_operator_parents = set() 1673 for parent, atom in self._dynamic_config._parent_atoms.get(existing_pkg, []): 1674 if atom.slot_operator_built: 1675 built_slot_operator_parents.add(parent) 1676 1677 for parent, atom in self._dynamic_config._parent_atoms.get(existing_pkg, []): 1678 if isinstance(parent, Package): 1679 if parent in built_slot_operator_parents: 1680 # This parent may need to be rebuilt, so its 1681 # dependencies aren't necessarily relevant. 1682 continue 1683 1684 if replacement_parent is not None and \ 1685 (replacement_parent.slot_atom == parent.slot_atom 1686 or replacement_parent.cpv == parent.cpv): 1687 # This parent is irrelevant because we intend to 1688 # replace it with replacement_parent. 1689 continue 1690 1691 if any(pkg is not parent and 1692 (pkg.slot_atom == parent.slot_atom or 1693 pkg.cpv == parent.cpv) for pkg in 1694 self._dynamic_config._package_tracker.match( 1695 parent.root, Atom(parent.cp))): 1696 # This parent may need to be eliminated due to a 1697 # slot conflict, so its dependencies aren't 1698 # necessarily relevant. 1699 continue 1700 1701 atom_set = InternalPackageSet(initial_atoms=(atom,), 1702 allow_repo=True) 1703 if not atom_set.findAtomForPackage(candidate_pkg, 1704 modified_use=self._pkg_use_enabled(candidate_pkg)): 1705 return False 1706 return True
1707 1708 1709 for replacement_parent in self._iter_similar_available(dep.parent, 1710 dep.parent.slot_atom, autounmask_level=autounmask_level): 1711 1712 if replacement_parent < dep.parent: 1713 if want_downgrade_parent is None: 1714 want_downgrade_parent = self._downgrade_probe( 1715 dep.parent) 1716 if not want_downgrade_parent: 1717 continue 1718 1719 if not check_reverse_dependencies(dep.parent, replacement_parent): 1720 continue 1721 1722 selected_atoms = None 1723 1724 try: 1725 atoms = self._flatten_atoms(replacement_parent, 1726 self._pkg_use_enabled(replacement_parent)) 1727 except InvalidDependString: 1728 continue 1729 1730 # List of list of child,atom pairs for each atom. 1731 replacement_candidates = [] 1732 # Set of all packages all atoms can agree on. 1733 all_candidate_pkgs = None 1734 1735 for atom in atoms: 1736 if atom.blocker or \ 1737 atom.cp != dep.atom.cp: 1738 continue 1739 1740 # Discard USE deps, we're only searching for an approximate 1741 # pattern, and dealing with USE states is too complex for 1742 # this purpose. 1743 unevaluated_atom = atom.unevaluated_atom 1744 atom = atom.without_use 1745 1746 if replacement_parent.built and \ 1747 portage.dep._match_slot(atom, dep.child): 1748 # Our selected replacement_parent appears to be built 1749 # for the existing child selection. So, discard this 1750 # parent and search for another. 1751 break 1752 1753 candidate_pkg_atoms = [] 1754 candidate_pkgs = [] 1755 for pkg in self._iter_similar_available( 1756 dep.child, atom): 1757 if pkg.slot == dep.child.slot and \ 1758 pkg.sub_slot == dep.child.sub_slot: 1759 # If slot/sub-slot is identical, then there's 1760 # no point in updating. 1761 continue 1762 if new_child_slot: 1763 if pkg.slot == dep.child.slot: 1764 continue 1765 if pkg < dep.child: 1766 # the new slot only matters if the 1767 # package version is higher 1768 continue 1769 else: 1770 if pkg.slot != dep.child.slot: 1771 continue 1772 if pkg < dep.child: 1773 if want_downgrade is None: 1774 want_downgrade = self._downgrade_probe(dep.child) 1775 # be careful not to trigger a rebuild when 1776 # the only version available with a 1777 # different slot_operator is an older version 1778 if not want_downgrade: 1779 continue 1780 if pkg.version == dep.child.version and not dep.child.built: 1781 continue 1782 1783 insignificant = False 1784 if not slot_conflict and \ 1785 selective and \ 1786 dep.parent.installed and \ 1787 dep.child.installed and \ 1788 dep.parent >= replacement_parent and \ 1789 dep.child.cpv == pkg.cpv: 1790 # Then can happen if the child's sub-slot changed 1791 # without a revision bump. The sub-slot change is 1792 # considered insignificant until one of its parent 1793 # packages needs to be rebuilt (which may trigger a 1794 # slot conflict). 1795 insignificant = True 1796 1797 if not insignificant: 1798 # Evaluate USE conditionals and || deps, in order 1799 # to see if this atom is really desirable, since 1800 # otherwise we may trigger an undesirable rebuild 1801 # as in bug #460304. 1802 if selected_atoms is None: 1803 selected_atoms = self._select_atoms_probe( 1804 dep.child.root, replacement_parent) 1805 if unevaluated_atom not in selected_atoms: 1806 continue 1807 1808 if not insignificant and \ 1809 check_reverse_dependencies(dep.child, pkg, 1810 replacement_parent=replacement_parent): 1811 1812 candidate_pkg_atoms.append((pkg, unevaluated_atom)) 1813 candidate_pkgs.append(pkg) 1814 replacement_candidates.append(candidate_pkg_atoms) 1815 if all_candidate_pkgs is None: 1816 all_candidate_pkgs = set(candidate_pkgs) 1817 else: 1818 all_candidate_pkgs.intersection_update(candidate_pkgs) 1819 1820 if not all_candidate_pkgs: 1821 # If the atoms that connect parent and child can't agree on 1822 # any replacement child, we can't do anything. 1823 continue 1824 1825 # Now select one of the pkgs as replacement. This is as easy as 1826 # selecting the highest version. 1827 # The more complicated part is to choose an atom for the 1828 # new Dependency object. Choose the one which ranked the selected 1829 # parent highest. 1830 selected = None 1831 for candidate_pkg_atoms in replacement_candidates: 1832 for i, (pkg, atom) in enumerate(candidate_pkg_atoms): 1833 if pkg not in all_candidate_pkgs: 1834 continue 1835 if selected is None or \ 1836 selected[0] < pkg or \ 1837 (selected[0] is pkg and i < selected[2]): 1838 selected = (pkg, atom, i) 1839 1840 if debug: 1841 msg = [] 1842 msg.append("") 1843 msg.append("") 1844 msg.append("slot_operator_update_probe:") 1845 msg.append(" existing child package: %s" % dep.child) 1846 msg.append(" existing parent package: %s" % dep.parent) 1847 msg.append(" new child package: %s" % selected[0]) 1848 msg.append(" new parent package: %s" % replacement_parent) 1849 msg.append("") 1850 writemsg_level("\n".join(msg), 1851 noiselevel=-1, level=logging.DEBUG) 1852 1853 return Dependency(parent=replacement_parent, 1854 child=selected[0], atom=selected[1]) 1855 1856 if debug: 1857 msg = [] 1858 msg.append("") 1859 msg.append("") 1860 msg.append("slot_operator_update_probe:") 1861 msg.append(" existing child package: %s" % dep.child) 1862 msg.append(" existing parent package: %s" % dep.parent) 1863 msg.append(" new child package: %s" % None) 1864 msg.append(" new parent package: %s" % None) 1865 msg.append("") 1866 writemsg_level("\n".join(msg), 1867 noiselevel=-1, level=logging.DEBUG) 1868 1869 return None 1870
1871 - def _slot_operator_unsatisfied_probe(self, dep):
1872 1873 if dep.parent.installed and \ 1874 self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent, 1875 modified_use=self._pkg_use_enabled(dep.parent)): 1876 return False 1877 1878 debug = "--debug" in self._frozen_config.myopts 1879 1880 for replacement_parent in self._iter_similar_available(dep.parent, 1881 dep.parent.slot_atom): 1882 1883 for atom in replacement_parent.validated_atoms: 1884 if not atom.slot_operator == "=" or \ 1885 atom.blocker or \ 1886 atom.cp != dep.atom.cp: 1887 continue 1888 1889 # Discard USE deps, we're only searching for an approximate 1890 # pattern, and dealing with USE states is too complex for 1891 # this purpose. 1892 atom = atom.without_use 1893 1894 pkg, existing_node = self._select_package(dep.root, atom, 1895 onlydeps=dep.onlydeps) 1896 1897 if pkg is not None: 1898 1899 if debug: 1900 msg = [] 1901 msg.append("") 1902 msg.append("") 1903 msg.append("slot_operator_unsatisfied_probe:") 1904 msg.append(" existing parent package: %s" % dep.parent) 1905 msg.append(" existing parent atom: %s" % dep.atom) 1906 msg.append(" new parent package: %s" % replacement_parent) 1907 msg.append(" new child package: %s" % pkg) 1908 msg.append("") 1909 writemsg_level("\n".join(msg), 1910 noiselevel=-1, level=logging.DEBUG) 1911 1912 return True 1913 1914 if debug: 1915 msg = [] 1916 msg.append("") 1917 msg.append("") 1918 msg.append("slot_operator_unsatisfied_probe:") 1919 msg.append(" existing parent package: %s" % dep.parent) 1920 msg.append(" existing parent atom: %s" % dep.atom) 1921 msg.append(" new parent package: %s" % None) 1922 msg.append(" new child package: %s" % None) 1923 msg.append("") 1924 writemsg_level("\n".join(msg), 1925 noiselevel=-1, level=logging.DEBUG) 1926 1927 return False
1928
1929 - def _slot_operator_unsatisfied_backtrack(self, dep):
1930 1931 parent = dep.parent 1932 1933 if "--debug" in self._frozen_config.myopts: 1934 msg = [] 1935 msg.append("") 1936 msg.append("") 1937 msg.append("backtracking due to unsatisfied " 1938 "built slot-operator dep:") 1939 msg.append(" parent package: %s" % parent) 1940 msg.append(" atom: %s" % dep.atom) 1941 msg.append("") 1942 writemsg_level("\n".join(msg), 1943 noiselevel=-1, level=logging.DEBUG) 1944 1945 backtrack_infos = self._dynamic_config._backtrack_infos 1946 config = backtrack_infos.setdefault("config", {}) 1947 1948 # mask unwanted binary packages if necessary 1949 masks = {} 1950 if not parent.installed: 1951 masks.setdefault(parent, {})["slot_operator_mask_built"] = None 1952 if masks: 1953 config.setdefault("slot_operator_mask_built", {}).update(masks) 1954 1955 # trigger replacement of installed packages if necessary 1956 reinstalls = set() 1957 if parent.installed: 1958 replacement_atom = self._replace_installed_atom(parent) 1959 if replacement_atom is not None: 1960 reinstalls.add((parent.root, replacement_atom)) 1961 if reinstalls: 1962 config.setdefault("slot_operator_replace_installed", 1963 set()).update(reinstalls) 1964 1965 self._dynamic_config._need_restart = True
1966
1967 - def _downgrade_probe(self, pkg):
1968 """ 1969 Detect cases where a downgrade of the given package is considered 1970 desirable due to the current version being masked or unavailable. 1971 """ 1972 available_pkg = None 1973 for available_pkg in self._iter_similar_available(pkg, 1974 pkg.slot_atom): 1975 if available_pkg >= pkg: