Package _emerge :: Module depgraph
[hide private]

Source Code for Module _emerge.depgraph

   1  # Copyright 1999-2013 Gentoo Foundation 
   2  # Distributed under the terms of the GNU General Public License v2 
   3   
   4  from __future__ import print_function, unicode_literals 
   5   
   6  import errno 
   7  import io 
   8  import logging 
   9  import stat 
  10  import sys 
  11  import textwrap 
  12  import warnings 
  13  from collections import deque 
  14  from itertools import chain 
  15   
  16  import portage 
  17  from portage import os, OrderedDict 
  18  from portage import _unicode_decode, _unicode_encode, _encodings 
  19  from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH, VCS_DIRS 
  20  from portage.dbapi import dbapi 
  21  from portage.dbapi.dep_expand import dep_expand 
  22  from portage.dbapi._similar_name_search import similar_name_search 
  23  from portage.dep import Atom, best_match_to_list, extract_affecting_use, \ 
  24          check_required_use, human_readable_required_use, match_from_list, \ 
  25          _repo_separator 
  26  from portage.dep._slot_operator import ignore_built_slot_operator_deps 
  27  from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use, \ 
  28          _get_eapi_attrs 
  29  from portage.exception import (InvalidAtom, InvalidData, InvalidDependString, 
  30          PackageNotFound, PortageException) 
  31  from portage.output import colorize, create_color_func, \ 
  32          darkgreen, green 
  33  bad = create_color_func("BAD") 
  34  from portage.package.ebuild.config import _get_feature_flags 
  35  from portage.package.ebuild.getmaskingstatus import \ 
  36          _getmaskingstatus, _MaskReason 
  37  from portage._sets import SETPREFIX 
  38  from portage._sets.base import InternalPackageSet 
  39  from portage.util import ConfigProtect, shlex_split, new_protect_filename 
  40  from portage.util import cmp_sort_key, writemsg, writemsg_stdout 
  41  from portage.util import ensure_dirs 
  42  from portage.util import writemsg_level, write_atomic 
  43  from portage.util.digraph import digraph 
  44  from portage.util._async.TaskScheduler import TaskScheduler 
  45  from portage.util._eventloop.EventLoop import EventLoop 
  46  from portage.util._eventloop.global_event_loop import global_event_loop 
  47  from portage.versions import catpkgsplit 
  48   
  49  from _emerge.AtomArg import AtomArg 
  50  from _emerge.Blocker import Blocker 
  51  from _emerge.BlockerCache import BlockerCache 
  52  from _emerge.BlockerDepPriority import BlockerDepPriority 
  53  from .chk_updated_cfg_files import chk_updated_cfg_files 
  54  from _emerge.countdown import countdown 
  55  from _emerge.create_world_atom import create_world_atom 
  56  from _emerge.Dependency import Dependency 
  57  from _emerge.DependencyArg import DependencyArg 
  58  from _emerge.DepPriority import DepPriority 
  59  from _emerge.DepPriorityNormalRange import DepPriorityNormalRange 
  60  from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange 
  61  from _emerge.EbuildMetadataPhase import EbuildMetadataPhase 
  62  from _emerge.FakeVartree import FakeVartree 
  63  from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps 
  64  from _emerge.is_valid_package_atom import insert_category_into_atom, \ 
  65          is_valid_package_atom 
  66  from _emerge.Package import Package 
  67  from _emerge.PackageArg import PackageArg 
  68  from _emerge.PackageVirtualDbapi import PackageVirtualDbapi 
  69  from _emerge.RootConfig import RootConfig 
  70  from _emerge.search import search 
  71  from _emerge.SetArg import SetArg 
  72  from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice 
  73  from _emerge.UnmergeDepPriority import UnmergeDepPriority 
  74  from _emerge.UseFlagDisplay import pkg_use_display 
  75  from _emerge.userquery import userquery 
  76   
  77  from _emerge.resolver.backtracking import Backtracker, BacktrackParameter 
  78  from _emerge.resolver.slot_collision import slot_conflict_handler 
  79  from _emerge.resolver.circular_dependency import circular_dependency_handler 
  80  from _emerge.resolver.output import Display 
  81   
  82  if sys.hexversion >= 0x3000000: 
  83          basestring = str 
  84          long = int 
  85          _unicode = str 
  86  else: 
  87          _unicode = unicode 
  88   
89 -class _scheduler_graph_config(object):
90 - def __init__(self, trees, pkg_cache, graph, mergelist):
91 self.trees = trees 92 self.pkg_cache = pkg_cache 93 self.graph = graph 94 self.mergelist = mergelist
95
96 -def _wildcard_set(atoms):
97 pkgs = InternalPackageSet(allow_wildcard=True) 98 for x in atoms: 99 try: 100 x = Atom(x, allow_wildcard=True, allow_repo=False) 101 except portage.exception.InvalidAtom: 102 x = Atom("*/" + x, allow_wildcard=True, allow_repo=False) 103 pkgs.add(x) 104 return pkgs
105
106 -class _frozen_depgraph_config(object):
107
108 - def __init__(self, settings, trees, myopts, spinner):
109 self.settings = settings 110 self.target_root = settings["EROOT"] 111 self.myopts = myopts 112 self.edebug = 0 113 if settings.get("PORTAGE_DEBUG", "") == "1": 114 self.edebug = 1 115 self.spinner = spinner 116 self._running_root = trees[trees._running_eroot]["root_config"] 117 self.pkgsettings = {} 118 self.trees = {} 119 self._trees_orig = trees 120 self.roots = {} 121 # All Package instances 122 self._pkg_cache = {} 123 self._highest_license_masked = {} 124 dynamic_deps = myopts.get("--dynamic-deps", "y") != "n" 125 ignore_built_slot_operator_deps = myopts.get( 126 "--ignore-built-slot-operator-deps", "n") == "y" 127 for myroot in trees: 128 self.trees[myroot] = {} 129 # Create a RootConfig instance that references 130 # the FakeVartree instead of the real one. 131 self.roots[myroot] = RootConfig( 132 trees[myroot]["vartree"].settings, 133 self.trees[myroot], 134 trees[myroot]["root_config"].setconfig) 135 for tree in ("porttree", "bintree"): 136 self.trees[myroot][tree] = trees[myroot][tree] 137 self.trees[myroot]["vartree"] = \ 138 FakeVartree(trees[myroot]["root_config"], 139 pkg_cache=self._pkg_cache, 140 pkg_root_config=self.roots[myroot], 141 dynamic_deps=dynamic_deps, 142 ignore_built_slot_operator_deps=ignore_built_slot_operator_deps) 143 self.pkgsettings[myroot] = portage.config( 144 clone=self.trees[myroot]["vartree"].settings) 145 146 self._required_set_names = set(["world"]) 147 148 atoms = ' '.join(myopts.get("--exclude", [])).split() 149 self.excluded_pkgs = _wildcard_set(atoms) 150 atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split() 151 self.reinstall_atoms = _wildcard_set(atoms) 152 atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split() 153 self.usepkg_exclude = _wildcard_set(atoms) 154 atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split() 155 self.useoldpkg_atoms = _wildcard_set(atoms) 156 atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split() 157 self.rebuild_exclude = _wildcard_set(atoms) 158 atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split() 159 self.rebuild_ignore = _wildcard_set(atoms) 160 161 self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts 162 self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts 163 self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
164
165 -class _depgraph_sets(object):
166 - def __init__(self):
167 # contains all sets added to the graph 168 self.sets = {} 169 # contains non-set atoms given as arguments 170 self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True) 171 # contains all atoms from all sets added to the graph, including 172 # atoms given as arguments 173 self.atoms = InternalPackageSet(allow_repo=True) 174 self.atom_arg_map = {}
175
176 -class _rebuild_config(object):
177 - def __init__(self, frozen_config, backtrack_parameters):
178 self._graph = digraph() 179 self._frozen_config = frozen_config 180 self.rebuild_list = backtrack_parameters.rebuild_list.copy() 181 self.orig_rebuild_list = self.rebuild_list.copy() 182 self.reinstall_list = backtrack_parameters.reinstall_list.copy() 183 self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev 184 self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver 185 self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt 186 self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or 187 self.rebuild_if_unbuilt)
188
189 - def add(self, dep_pkg, dep):
190 parent = dep.collapsed_parent 191 priority = dep.collapsed_priority 192 rebuild_exclude = self._frozen_config.rebuild_exclude 193 rebuild_ignore = self._frozen_config.rebuild_ignore 194 if (self.rebuild and isinstance(parent, Package) and 195 parent.built and priority.buildtime and 196 isinstance(dep_pkg, Package) and 197 not rebuild_exclude.findAtomForPackage(parent) and 198 not rebuild_ignore.findAtomForPackage(dep_pkg)): 199 self._graph.add(dep_pkg, parent, priority)
200
201 - def _needs_rebuild(self, dep_pkg):
202 """Check whether packages that depend on dep_pkg need to be rebuilt.""" 203 dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom) 204 if dep_pkg.built or dep_root_slot in self.orig_rebuild_list: 205 return False 206 207 if self.rebuild_if_unbuilt: 208 # dep_pkg is being installed from source, so binary 209 # packages for parents are invalid. Force rebuild 210 return True 211 212 trees = self._frozen_config.trees 213 vardb = trees[dep_pkg.root]["vartree"].dbapi 214 if self.rebuild_if_new_rev: 215 # Parent packages are valid if a package with the same 216 # cpv is already installed. 217 return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom) 218 219 # Otherwise, parent packages are valid if a package with the same 220 # version (excluding revision) is already installed. 221 assert self.rebuild_if_new_ver 222 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1] 223 for inst_cpv in vardb.match(dep_pkg.slot_atom): 224 inst_cpv_norev = catpkgsplit(inst_cpv)[:-1] 225 if inst_cpv_norev == cpv_norev: 226 return False 227 228 return True
229
230 - def _trigger_rebuild(self, parent, build_deps):
231 root_slot = (parent.root, parent.slot_atom) 232 if root_slot in self.rebuild_list: 233 return False 234 trees = self._frozen_config.trees 235 reinstall = False 236 for slot_atom, dep_pkg in build_deps.items(): 237 dep_root_slot = (dep_pkg.root, slot_atom) 238 if self._needs_rebuild(dep_pkg): 239 self.rebuild_list.add(root_slot) 240 return True 241 elif ("--usepkg" in self._frozen_config.myopts and 242 (dep_root_slot in self.reinstall_list or 243 dep_root_slot in self.rebuild_list or 244 not dep_pkg.installed)): 245 246 # A direct rebuild dependency is being installed. We 247 # should update the parent as well to the latest binary, 248 # if that binary is valid. 249 # 250 # To validate the binary, we check whether all of the 251 # rebuild dependencies are present on the same binhost. 252 # 253 # 1) If parent is present on the binhost, but one of its 254 # rebuild dependencies is not, then the parent should 255 # be rebuilt from source. 256 # 2) Otherwise, the parent binary is assumed to be valid, 257 # because all of its rebuild dependencies are 258 # consistent. 259 bintree = trees[parent.root]["bintree"] 260 uri = bintree.get_pkgindex_uri(parent.cpv) 261 dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv) 262 bindb = bintree.dbapi 263 if self.rebuild_if_new_ver and uri and uri != dep_uri: 264 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1] 265 for cpv in bindb.match(dep_pkg.slot_atom): 266 if cpv_norev == catpkgsplit(cpv)[:-1]: 267 dep_uri = bintree.get_pkgindex_uri(cpv) 268 if uri == dep_uri: 269 break 270 if uri and uri != dep_uri: 271 # 1) Remote binary package is invalid because it was 272 # built without dep_pkg. Force rebuild. 273 self.rebuild_list.add(root_slot) 274 return True 275 elif (parent.installed and 276 root_slot not in self.reinstall_list): 277 try: 278 bin_build_time, = bindb.aux_get(parent.cpv, 279 ["BUILD_TIME"]) 280 except KeyError: 281 continue 282 if bin_build_time != _unicode(parent.build_time): 283 # 2) Remote binary package is valid, and local package 284 # is not up to date. Force reinstall. 285 reinstall = True 286 if reinstall: 287 self.reinstall_list.add(root_slot) 288 return reinstall
289
290 - def trigger_rebuilds(self):
291 """ 292 Trigger rebuilds where necessary. If pkgA has been updated, and pkgB 293 depends on pkgA at both build-time and run-time, pkgB needs to be 294 rebuilt. 295 """ 296 need_restart = False 297 graph = self._graph 298 build_deps = {} 299 300 leaf_nodes = deque(graph.leaf_nodes()) 301 302 # Trigger rebuilds bottom-up (starting with the leaves) so that parents 303 # will always know which children are being rebuilt. 304 while graph: 305 if not leaf_nodes: 306 # We'll have to drop an edge. This should be quite rare. 307 leaf_nodes.append(graph.order[-1]) 308 309 node = leaf_nodes.popleft() 310 if node not in graph: 311 # This can be triggered by circular dependencies. 312 continue 313 slot_atom = node.slot_atom 314 315 # Remove our leaf node from the graph, keeping track of deps. 316 parents = graph.parent_nodes(node) 317 graph.remove(node) 318 node_build_deps = build_deps.get(node, {}) 319 for parent in parents: 320 if parent == node: 321 # Ignore a direct cycle. 322 continue 323 parent_bdeps = build_deps.setdefault(parent, {}) 324 parent_bdeps[slot_atom] = node 325 if not graph.child_nodes(parent): 326 leaf_nodes.append(parent) 327 328 # Trigger rebuilds for our leaf node. Because all of our children 329 # have been processed, the build_deps will be completely filled in, 330 # and self.rebuild_list / self.reinstall_list will tell us whether 331 # any of our children need to be rebuilt or reinstalled. 332 if self._trigger_rebuild(node, node_build_deps): 333 need_restart = True 334 335 return need_restart
336 337
338 -class _dynamic_depgraph_config(object):
339
340 - def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
341 self.myparams = myparams.copy() 342 self._vdb_loaded = False 343 self._allow_backtracking = allow_backtracking 344 # Maps slot atom to package for each Package added to the graph. 345 self._slot_pkg_map = {} 346 # Maps nodes to the reasons they were selected for reinstallation. 347 self._reinstall_nodes = {} 348 self.mydbapi = {} 349 # Contains a filtered view of preferred packages that are selected 350 # from available repositories. 351 self._filtered_trees = {} 352 # Contains installed packages and new packages that have been added 353 # to the graph. 354 self._graph_trees = {} 355 # Caches visible packages returned from _select_package, for use in 356 # depgraph._iter_atoms_for_pkg() SLOT logic. 357 self._visible_pkgs = {} 358 #contains the args created by select_files 359 self._initial_arg_list = [] 360 self.digraph = portage.digraph() 361 # manages sets added to the graph 362 self.sets = {} 363 # contains all nodes pulled in by self.sets 364 self._set_nodes = set() 365 # Contains only Blocker -> Uninstall edges 366 self._blocker_uninstalls = digraph() 367 # Contains only Package -> Blocker edges 368 self._blocker_parents = digraph() 369 # Contains only irrelevant Package -> Blocker edges 370 self._irrelevant_blockers = digraph() 371 # Contains only unsolvable Package -> Blocker edges 372 self._unsolvable_blockers = digraph() 373 # Contains all Blocker -> Blocked Package edges 374 self._blocked_pkgs = digraph() 375 # Contains world packages that have been protected from 376 # uninstallation but may not have been added to the graph 377 # if the graph is not complete yet. 378 self._blocked_world_pkgs = {} 379 # Contains packages whose dependencies have been traversed. 380 # This use used to check if we have accounted for blockers 381 # relevant to a package. 382 self._traversed_pkg_deps = set() 383 # This should be ordered such that the backtracker will 384 # attempt to solve conflicts which occurred earlier first, 385 # since an earlier conflict can be the cause of a conflict 386 # which occurs later. 387 self._slot_collision_info = OrderedDict() 388 # Slot collision nodes are not allowed to block other packages since 389 # blocker validation is only able to account for one package per slot. 390 self._slot_collision_nodes = set() 391 self._parent_atoms = {} 392 self._slot_conflict_handler = None 393 self._circular_dependency_handler = None 394 self._serialized_tasks_cache = None 395 self._scheduler_graph = None 396 self._displayed_list = None 397 self._pprovided_args = [] 398 self._missing_args = [] 399 self._masked_installed = set() 400 self._masked_license_updates = set() 401 self._unsatisfied_deps_for_display = [] 402 self._unsatisfied_blockers_for_display = None 403 self._circular_deps_for_display = None 404 self._dep_stack = [] 405 self._dep_disjunctive_stack = [] 406 self._unsatisfied_deps = [] 407 self._initially_unsatisfied_deps = [] 408 self._ignored_deps = [] 409 self._highest_pkg_cache = {} 410 411 # Binary packages that have been rejected because their USE 412 # didn't match the user's config. It maps packages to a set 413 # of flags causing the rejection. 414 self.ignored_binaries = {} 415 416 self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords 417 self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes 418 self._needed_license_changes = backtrack_parameters.needed_license_changes 419 self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes 420 self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask 421 self._slot_operator_replace_installed = backtrack_parameters.slot_operator_replace_installed 422 self._prune_rebuilds = backtrack_parameters.prune_rebuilds 423 self._need_restart = False 424 # For conditions that always require user intervention, such as 425 # unsatisfied REQUIRED_USE (currently has no autounmask support). 426 self._skip_restart = False 427 self._backtrack_infos = {} 428 429 self._buildpkgonly_deps_unsatisfied = False 430 self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n' 431 self._success_without_autounmask = False 432 self._traverse_ignored_deps = False 433 self._complete_mode = False 434 self._slot_operator_deps = {} 435 436 for myroot in depgraph._frozen_config.trees: 437 self.sets[myroot] = _depgraph_sets() 438 self._slot_pkg_map[myroot] = {} 439 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi 440 # This dbapi instance will model the state that the vdb will 441 # have after new packages have been installed. 442 fakedb = PackageVirtualDbapi(vardb.settings) 443 444 self.mydbapi[myroot] = fakedb 445 def graph_tree(): 446 pass
447 graph_tree.dbapi = fakedb 448 self._graph_trees[myroot] = {} 449 self._filtered_trees[myroot] = {} 450 # Substitute the graph tree for the vartree in dep_check() since we 451 # want atom selections to be consistent with package selections 452 # have already been made. 453 self._graph_trees[myroot]["porttree"] = graph_tree 454 self._graph_trees[myroot]["vartree"] = graph_tree 455 self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi 456 self._graph_trees[myroot]["graph"] = self.digraph 457 self._graph_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg 458 def filtered_tree(): 459 pass
460 filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot) 461 self._filtered_trees[myroot]["porttree"] = filtered_tree 462 self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings) 463 464 # Passing in graph_tree as the vartree here could lead to better 465 # atom selections in some cases by causing atoms for packages that 466 # have been added to the graph to be preferred over other choices. 467 # However, it can trigger atom selections that result in 468 # unresolvable direct circular dependencies. For example, this 469 # happens with gwydion-dylan which depends on either itself or 470 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed, 471 # gwydion-dylan-bin needs to be selected in order to avoid a 472 # an unresolvable direct circular dependency. 473 # 474 # To solve the problem described above, pass in "graph_db" so that 475 # packages that have been added to the graph are distinguishable 476 # from other available packages and installed packages. Also, pass 477 # the parent package into self._select_atoms() calls so that 478 # unresolvable direct circular dependencies can be detected and 479 # avoided when possible. 480 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi 481 self._filtered_trees[myroot]["graph"] = self.digraph 482 self._filtered_trees[myroot]["vartree"] = \ 483 depgraph._frozen_config.trees[myroot]["vartree"] 484 self._filtered_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg 485 486 dbs = [] 487 # (db, pkg_type, built, installed, db_keys) 488 if "remove" in self.myparams: 489 # For removal operations, use _dep_check_composite_db 490 # for availability and visibility checks. This provides 491 # consistency with install operations, so we don't 492 # get install/uninstall cycles like in bug #332719. 493 self._graph_trees[myroot]["porttree"] = filtered_tree 494 else: 495 if "--usepkgonly" not in depgraph._frozen_config.myopts: 496 portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi 497 db_keys = list(portdb._aux_cache_keys) 498 dbs.append((portdb, "ebuild", False, False, db_keys)) 499 500 if "--usepkg" in depgraph._frozen_config.myopts: 501 bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi 502 db_keys = list(bindb._aux_cache_keys) 503 dbs.append((bindb, "binary", True, False, db_keys)) 504 505 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi 506 db_keys = list(depgraph._frozen_config._trees_orig[myroot 507 ]["vartree"].dbapi._aux_cache_keys) 508 dbs.append((vardb, "installed", True, True, db_keys)) 509 self._filtered_trees[myroot]["dbs"] = dbs 510
511 -class depgraph(object):
512 513 pkg_tree_map = RootConfig.pkg_tree_map 514
515 - def __init__(self, settings, trees, myopts, myparams, spinner, 516 frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
517 if frozen_config is None: 518 frozen_config = _frozen_depgraph_config(settings, trees, 519 myopts, spinner) 520 self._frozen_config = frozen_config 521 self._dynamic_config = _dynamic_depgraph_config(self, myparams, 522 allow_backtracking, backtrack_parameters) 523 self._rebuild = _rebuild_config(frozen_config, backtrack_parameters) 524 525 self._select_atoms = self._select_atoms_highest_available 526 self._select_package = self._select_pkg_highest_available 527 528 self._event_loop = (portage._internal_caller and 529 global_event_loop() or EventLoop(main=False))
530
531 - def _load_vdb(self):
532 """ 533 Load installed package metadata if appropriate. This used to be called 534 from the constructor, but that wasn't very nice since this procedure 535 is slow and it generates spinner output. So, now it's called on-demand 536 by various methods when necessary. 537 """ 538 539 if self._dynamic_config._vdb_loaded: 540 return 541 542 for myroot in self._frozen_config.trees: 543 544 dynamic_deps = self._dynamic_config.myparams.get( 545 "dynamic_deps", "y") != "n" 546 preload_installed_pkgs = \ 547 "--nodeps" not in self._frozen_config.myopts 548 549 fake_vartree = self._frozen_config.trees[myroot]["vartree"] 550 if not fake_vartree.dbapi: 551 # This needs to be called for the first depgraph, but not for 552 # backtracking depgraphs that share the same frozen_config. 553 fake_vartree.sync() 554 555 # FakeVartree.sync() populates virtuals, and we want 556 # self.pkgsettings to have them populated too. 557 self._frozen_config.pkgsettings[myroot] = \ 558 portage.config(clone=fake_vartree.settings) 559 560 if preload_installed_pkgs: 561 vardb = fake_vartree.dbapi 562 fakedb = self._dynamic_config._graph_trees[ 563 myroot]["vartree"].dbapi 564 565 if not dynamic_deps: 566 for pkg in vardb: 567 fakedb.cpv_inject(pkg) 568 else: 569 max_jobs = self._frozen_config.myopts.get("--jobs") 570 max_load = self._frozen_config.myopts.get("--load-average") 571 scheduler = TaskScheduler( 572 self._dynamic_deps_preload(fake_vartree, fakedb), 573 max_jobs=max_jobs, 574 max_load=max_load, 575 event_loop=fake_vartree._portdb._event_loop) 576 scheduler.start() 577 scheduler.wait() 578 579 self._dynamic_config._vdb_loaded = True
580
581 - def _dynamic_deps_preload(self, fake_vartree, fakedb):
582 portdb = fake_vartree._portdb 583 for pkg in fake_vartree.dbapi: 584 self._spinner_update() 585 fakedb.cpv_inject(pkg) 586 ebuild_path, repo_path = \ 587 portdb.findname2(pkg.cpv, myrepo=pkg.repo) 588 if ebuild_path is None: 589 fake_vartree.dynamic_deps_preload(pkg, None) 590 continue 591 metadata, ebuild_hash = portdb._pull_valid_cache( 592 pkg.cpv, ebuild_path, repo_path) 593 if metadata is not None: 594 fake_vartree.dynamic_deps_preload(pkg, metadata) 595 else: 596 proc = EbuildMetadataPhase(cpv=pkg.cpv, 597 ebuild_hash=ebuild_hash, 598 portdb=portdb, repo_path=repo_path, 599 settings=portdb.doebuild_settings) 600 proc.addExitListener( 601 self._dynamic_deps_proc_exit(pkg, fake_vartree)) 602 yield proc
603
604 - class _dynamic_deps_proc_exit(object):
605 606 __slots__ = ('_pkg', '_fake_vartree') 607
608 - def __init__(self, pkg, fake_vartree):
609 self._pkg = pkg 610 self._fake_vartree = fake_vartree
611
612 - def __call__(self, proc):
613 metadata = None 614 if proc.returncode == os.EX_OK: 615 metadata = proc.metadata 616 self._fake_vartree.dynamic_deps_preload(self._pkg, metadata)
617
618 - def _spinner_update(self):
619 if self._frozen_config.spinner: 620 self._frozen_config.spinner.update()
621
622 - def _show_ignored_binaries(self):
623 """ 624 Show binaries that have been ignored because their USE didn't 625 match the user's config. 626 """ 627 if not self._dynamic_config.ignored_binaries \ 628 or '--quiet' in self._frozen_config.myopts \ 629 or self._dynamic_config.myparams.get( 630 "binpkg_respect_use") in ("y", "n"): 631 return 632 633 for pkg in list(self._dynamic_config.ignored_binaries): 634 635 selected_pkg = self._dynamic_config.mydbapi[pkg.root 636 ].match_pkgs(pkg.slot_atom) 637 638 if not selected_pkg: 639 continue 640 641 selected_pkg = selected_pkg[-1] 642 if selected_pkg > pkg: 643 self._dynamic_config.ignored_binaries.pop(pkg) 644 continue 645 646 if selected_pkg.installed and \ 647 selected_pkg.cpv == pkg.cpv and \ 648 selected_pkg.build_time == pkg.build_time: 649 # We don't care about ignored binaries when an 650 # identical installed instance is selected to 651 # fill the slot. 652 self._dynamic_config.ignored_binaries.pop(pkg) 653 continue 654 655 if not self._dynamic_config.ignored_binaries: 656 return 657 658 self._show_merge_list() 659 660 writemsg("\n!!! The following binary packages have been ignored " + \ 661 "due to non matching USE:\n\n", noiselevel=-1) 662 663 for pkg, flags in self._dynamic_config.ignored_binaries.items(): 664 flag_display = [] 665 for flag in sorted(flags): 666 if flag not in pkg.use.enabled: 667 flag = "-" + flag 668 flag_display.append(flag) 669 flag_display = " ".join(flag_display) 670 # The user can paste this line into package.use 671 writemsg(" =%s %s" % (pkg.cpv, flag_display), noiselevel=-1) 672 if pkg.root_config.settings["ROOT"] != "/": 673 writemsg(" # for %s" % (pkg.root,), noiselevel=-1) 674 writemsg("\n", noiselevel=-1) 675 676 msg = [ 677 "", 678 "NOTE: The --binpkg-respect-use=n option will prevent emerge", 679 " from ignoring these binary packages if possible.", 680 " Using --binpkg-respect-use=y will silence this warning." 681 ] 682 683 for line in msg: 684 if line: 685 line = colorize("INFORM", line) 686 writemsg(line + "\n", noiselevel=-1)
687
688 - def _get_missed_updates(self):
689 690 # In order to minimize noise, show only the highest 691 # missed update from each SLOT. 692 missed_updates = {} 693 for pkg, mask_reasons in \ 694 self._dynamic_config._runtime_pkg_mask.items(): 695 if pkg.installed: 696 # Exclude installed here since we only 697 # want to show available updates. 698 continue 699 chosen_pkg = self._dynamic_config.mydbapi[pkg.root 700 ].match_pkgs(pkg.slot_atom) 701 if not chosen_pkg or chosen_pkg[-1] >= pkg: 702 continue 703 k = (pkg.root, pkg.slot_atom) 704 if k in missed_updates: 705 other_pkg, mask_type, parent_atoms = missed_updates[k] 706 if other_pkg > pkg: 707 continue 708 for mask_type, parent_atoms in mask_reasons.items(): 709 if not parent_atoms: 710 continue 711 missed_updates[k] = (pkg, mask_type, parent_atoms) 712 break 713 714 return missed_updates
715
716 - def _show_missed_update(self):
717 718 missed_updates = self._get_missed_updates() 719 720 if not missed_updates: 721 return 722 723 missed_update_types = {} 724 for pkg, mask_type, parent_atoms in missed_updates.values(): 725 missed_update_types.setdefault(mask_type, 726 []).append((pkg, parent_atoms)) 727 728 if '--quiet' in self._frozen_config.myopts and \ 729 '--debug' not in self._frozen_config.myopts: 730 missed_update_types.pop("slot conflict", None) 731 missed_update_types.pop("missing dependency", None) 732 733 self._show_missed_update_slot_conflicts( 734 missed_update_types.get("slot conflict")) 735 736 self._show_missed_update_unsatisfied_dep( 737 missed_update_types.get("missing dependency"))
738
739 - def _show_missed_update_unsatisfied_dep(self, missed_updates):
740 741 if not missed_updates: 742 return 743 744 self._show_merge_list() 745 backtrack_masked = [] 746 747 for pkg, parent_atoms in missed_updates: 748 749 try: 750 for parent, root, atom in parent_atoms: 751 self._show_unsatisfied_dep(root, atom, myparent=parent, 752 check_backtrack=True) 753 except self._backtrack_mask: 754 # This is displayed below in abbreviated form. 755 backtrack_masked.append((pkg, parent_atoms)) 756 continue 757 758 writemsg("\n!!! The following update has been skipped " + \ 759 "due to unsatisfied dependencies:\n\n", noiselevel=-1) 760 761 writemsg(str(pkg.slot_atom), noiselevel=-1) 762 if pkg.root_config.settings["ROOT"] != "/": 763 writemsg(" for %s" % (pkg.root,), noiselevel=-1) 764 writemsg("\n", noiselevel=-1) 765 766 for parent, root, atom in parent_atoms: 767 self._show_unsatisfied_dep(root, atom, myparent=parent) 768 writemsg("\n", noiselevel=-1) 769 770 if backtrack_masked: 771 # These are shown in abbreviated form, in order to avoid terminal 772 # flooding from mask messages as reported in bug #285832. 773 writemsg("\n!!! The following update(s) have been skipped " + \ 774 "due to unsatisfied dependencies\n" + \ 775 "!!! triggered by backtracking:\n\n", noiselevel=-1) 776 for pkg, parent_atoms in backtrack_masked: 777 writemsg(str(pkg.slot_atom), noiselevel=-1) 778 if pkg.root_config.settings["ROOT"] != "/": 779 writemsg(" for %s" % (pkg.root,), noiselevel=-1) 780 writemsg("\n", noiselevel=-1)
781
782 - def _show_missed_update_slot_conflicts(self, missed_updates):
783 784 if not missed_updates: 785 return 786 787 self._show_merge_list() 788 msg = [] 789 msg.append("\nWARNING: One or more updates have been " + \ 790 "skipped due to a dependency conflict:\n\n") 791 792 indent = " " 793 for pkg, parent_atoms in missed_updates: 794 msg.append(str(pkg.slot_atom)) 795 if pkg.root_config.settings["ROOT"] != "/": 796 msg.append(" for %s" % (pkg.root,)) 797 msg.append("\n\n") 798 799 for parent, atom in parent_atoms: 800 msg.append(indent) 801 msg.append(str(pkg)) 802 803 msg.append(" conflicts with\n") 804 msg.append(2*indent) 805 if isinstance(parent, 806 (PackageArg, AtomArg)): 807 # For PackageArg and AtomArg types, it's 808 # redundant to display the atom attribute. 809 msg.append(str(parent)) 810 else: 811 # Display the specific atom from SetArg or 812 # Package types. 813 msg.append("%s required by %s" % (atom, parent)) 814 msg.append("\n") 815 msg.append("\n") 816 817 writemsg("".join(msg), noiselevel=-1)
818
820 """Show an informational message advising the user to mask one of the 821 the packages. In some cases it may be possible to resolve this 822 automatically, but support for backtracking (removal nodes that have 823 already been selected) will be required in order to handle all possible 824 cases. 825 """ 826 827 if not self._dynamic_config._slot_collision_info: 828 return 829 830 self._show_merge_list() 831 832 self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self) 833 handler = self._dynamic_config._slot_conflict_handler 834 835 conflict = handler.get_conflict() 836 writemsg(conflict, noiselevel=-1) 837 838 explanation = handler.get_explanation() 839 if explanation: 840 writemsg(explanation, noiselevel=-1) 841 return 842 843 if "--quiet" in self._frozen_config.myopts: 844 return 845 846 msg = [] 847 msg.append("It may be possible to solve this problem ") 848 msg.append("by using package.mask to prevent one of ") 849 msg.append("those packages from being selected. ") 850 msg.append("However, it is also possible that conflicting ") 851 msg.append("dependencies exist such that they are impossible to ") 852 msg.append("satisfy simultaneously. If such a conflict exists in ") 853 msg.append("the dependencies of two different packages, then those ") 854 msg.append("packages can not be installed simultaneously.") 855 backtrack_opt = self._frozen_config.myopts.get('--backtrack') 856 if not self._dynamic_config._allow_backtracking and \ 857 (backtrack_opt is None or \ 858 (backtrack_opt > 0 and backtrack_opt < 30)): 859 msg.append(" You may want to try a larger value of the ") 860 msg.append("--backtrack option, such as --backtrack=30, ") 861 msg.append("in order to see if that will solve this conflict ") 862 msg.append("automatically.") 863 864 for line in textwrap.wrap(''.join(msg), 70): 865 writemsg(line + '\n', noiselevel=-1) 866 writemsg('\n', noiselevel=-1) 867 868 msg = [] 869 msg.append("For more information, see MASKED PACKAGES ") 870 msg.append("section in the emerge man page or refer ") 871 msg.append("to the Gentoo Handbook.") 872 for line in textwrap.wrap(''.join(msg), 70): 873 writemsg(line + '\n', noiselevel=-1) 874 writemsg('\n', noiselevel=-1)
875
876 - def _process_slot_conflicts(self):
877 """ 878 If there are any slot conflicts and backtracking is enabled, 879 _complete_graph should complete the graph before this method 880 is called, so that all relevant reverse dependencies are 881 available for use in backtracking decisions. 882 """ 883 for (slot_atom, root), slot_nodes in \ 884 self._dynamic_config._slot_collision_info.items(): 885 self._process_slot_conflict(root, slot_atom, slot_nodes)
886
887 - def _process_slot_conflict(self, root, slot_atom, slot_nodes):
888 """ 889 Process slot conflict data to identify specific atoms which 890 lead to conflict. These atoms only match a subset of the 891 packages that have been pulled into a given slot. 892 """ 893 894 debug = "--debug" in self._frozen_config.myopts 895 896 slot_parent_atoms = set() 897 for pkg in slot_nodes: 898 parent_atoms = self._dynamic_config._parent_atoms.get(pkg) 899 if not parent_atoms: 900 continue 901 slot_parent_atoms.update(parent_atoms) 902 903 conflict_pkgs = [] 904 conflict_atoms = {} 905 for pkg in slot_nodes: 906 907 if self._dynamic_config._allow_backtracking and \ 908 pkg in self._dynamic_config._runtime_pkg_mask: 909 if debug: 910 writemsg_level( 911 "!!! backtracking loop detected: %s %s\n" % \ 912 (pkg, 913 self._dynamic_config._runtime_pkg_mask[pkg]), 914 level=logging.DEBUG, noiselevel=-1) 915 916 parent_atoms = self._dynamic_config._parent_atoms.get(pkg) 917 if parent_atoms is None: 918 parent_atoms = set() 919 self._dynamic_config._parent_atoms[pkg] = parent_atoms 920 921 all_match = True 922 for parent_atom in slot_parent_atoms: 923 if parent_atom in parent_atoms: 924 continue 925 # Use package set for matching since it will match via 926 # PROVIDE when necessary, while match_from_list does not. 927 parent, atom = parent_atom 928 atom_set = InternalPackageSet( 929 initial_atoms=(atom,), allow_repo=True) 930 if atom_set.findAtomForPackage(pkg, 931 modified_use=self._pkg_use_enabled(pkg)): 932 parent_atoms.add(parent_atom) 933 else: 934 all_match = False 935 conflict_atoms.setdefault(parent_atom, set()).add(pkg) 936 937 if not all_match: 938 conflict_pkgs.append(pkg) 939 940 if conflict_pkgs and \ 941 self._dynamic_config._allow_backtracking and \ 942 not self._accept_blocker_conflicts(): 943 remaining = [] 944 for pkg in conflict_pkgs: 945 if self._slot_conflict_backtrack_abi(pkg, 946 slot_nodes, conflict_atoms): 947 backtrack_infos = self._dynamic_config._backtrack_infos 948 config = backtrack_infos.setdefault("config", {}) 949 config.setdefault("slot_conflict_abi", set()).add(pkg) 950 else: 951 remaining.append(pkg) 952 if remaining: 953 self._slot_confict_backtrack(root, slot_atom, 954 slot_parent_atoms, remaining)
955
956 - def _slot_confict_backtrack(self, root, slot_atom, 957 all_parents, conflict_pkgs):
958 959 debug = "--debug" in self._frozen_config.myopts 960 existing_node = self._dynamic_config._slot_pkg_map[root][slot_atom] 961 # In order to avoid a missed update, first mask lower versions 962 # that conflict with higher versions (the backtracker visits 963 # these in reverse order). 964 conflict_pkgs.sort(reverse=True) 965 backtrack_data = [] 966 for to_be_masked in conflict_pkgs: 967 # For missed update messages, find out which 968 # atoms matched to_be_selected that did not 969 # match to_be_masked. 970 parent_atoms = \ 971 self._dynamic_config._parent_atoms.get(to_be_masked, set()) 972 conflict_atoms = set(parent_atom for parent_atom in all_parents \ 973 if parent_atom not in parent_atoms) 974 backtrack_data.append((to_be_masked, conflict_atoms)) 975 976 to_be_masked = backtrack_data[-1][0] 977 978 self._dynamic_config._backtrack_infos.setdefault( 979 "slot conflict", []).append(backtrack_data) 980 self._dynamic_config._need_restart = True 981 if debug: 982 msg = [] 983 msg.append("") 984 msg.append("") 985 msg.append("backtracking due to slot conflict:") 986 msg.append(" first package: %s" % existing_node) 987 msg.append(" package to mask: %s" % to_be_masked) 988 msg.append(" slot: %s" % slot_atom) 989 msg.append(" parents: %s" % ", ".join( \ 990 "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents)) 991 msg.append("") 992 writemsg_level("".join("%s\n" % l for l in msg), 993 noiselevel=-1, level=logging.DEBUG)
994
995 - def _slot_conflict_backtrack_abi(self, pkg, slot_nodes, conflict_atoms):
996 """ 997 If one or more conflict atoms have a slot/sub-slot dep that can be resolved 998 by rebuilding the parent package, then schedule the rebuild via 999 backtracking, and return True. Otherwise, return False. 1000 """ 1001 1002 found_update = False 1003 for parent_atom, conflict_pkgs in conflict_atoms.items(): 1004 parent, atom = parent_atom 1005 if atom.slot_operator != "=" or not parent.built: 1006 continue 1007 1008 if pkg not in conflict_pkgs: 1009 continue 1010 1011 for other_pkg in slot_nodes: 1012 if other_pkg in conflict_pkgs: 1013 continue 1014 1015 dep = Dependency(atom=atom, child=other_pkg, 1016 parent=parent, root=pkg.root) 1017 1018 new_dep = \ 1019 self._slot_operator_update_probe_slot_conflict(dep) 1020 if new_dep is not None: 1021 self._slot_operator_update_backtrack(dep, 1022 new_dep=new_dep) 1023 found_update = True 1024 1025 return found_update
1026
1027 - def _slot_change_probe(self, dep):
1028 """ 1029 @rtype: bool 1030 @return: True if dep.child should be rebuilt due to a change 1031 in sub-slot (without revbump, as in bug #456208). 1032 """ 1033 if not (isinstance(dep.parent, Package) and \ 1034 not dep.parent.built and dep.child.built): 1035 return None 1036 1037 root_config = self._frozen_config.roots[dep.root] 1038 matches = [] 1039 try: 1040 matches.append(self._pkg(dep.child.cpv, "ebuild", 1041 root_config, myrepo=dep.child.repo)) 1042 except PackageNotFound: 1043 pass 1044 1045 for unbuilt_child in chain(matches, 1046 self._iter_match_pkgs(root_config, "ebuild", 1047 Atom("=%s" % (dep.child.cpv,)))): 1048 if unbuilt_child in self._dynamic_config._runtime_pkg_mask: 1049 continue 1050 if self._frozen_config.excluded_pkgs.findAtomForPackage( 1051 unbuilt_child, 1052 modified_use=self._pkg_use_enabled(unbuilt_child)): 1053 continue 1054 if not self._pkg_visibility_check(unbuilt_child): 1055 continue 1056 break 1057 else: 1058 return None 1059 1060 if unbuilt_child.slot == dep.child.slot and \ 1061 unbuilt_child.sub_slot == dep.child.sub_slot: 1062 return None 1063 1064 return unbuilt_child
1065
1066 - def _slot_change_backtrack(self, dep, new_child_slot):
1067 child = dep.child 1068 if "--debug" in self._frozen_config.myopts: 1069 msg = [] 1070 msg.append("") 1071 msg.append("") 1072 msg.append("backtracking due to slot/sub-slot change:") 1073 msg.append(" child package: %s" % child) 1074 msg.append(" child slot: %s/%s" % 1075 (child.slot, child.sub_slot)) 1076 msg.append(" new child: %s" % new_child_slot) 1077 msg.append(" new child slot: %s/%s" % 1078 (new_child_slot.slot, new_child_slot.sub_slot)) 1079 msg.append(" parent package: %s" % dep.parent) 1080 msg.append(" atom: %s" % dep.atom) 1081 msg.append("") 1082 writemsg_level("\n".join(msg), 1083 noiselevel=-1, level=logging.DEBUG) 1084 backtrack_infos = self._dynamic_config._backtrack_infos 1085 config = backtrack_infos.setdefault("config", {}) 1086 1087 # mask unwanted binary packages if necessary 1088 masks = {} 1089 if not child.installed: 1090 masks.setdefault(dep.child, {})["slot_operator_mask_built"] = None 1091 if masks: 1092 config.setdefault("slot_operator_mask_built", {}).update(masks) 1093 1094 # trigger replacement of installed packages if necessary 1095 reinstalls = set() 1096 if child.installed: 1097 replacement_atom = self._replace_installed_atom(child) 1098 if replacement_atom is not None: 1099 reinstalls.add((child.root, replacement_atom)) 1100 if reinstalls: 1101 config.setdefault("slot_operator_replace_installed", 1102 set()).update(reinstalls) 1103 1104 self._dynamic_config._need_restart = True
1105
1106 - def _slot_operator_update_backtrack(self, dep, new_child_slot=None, 1107 new_dep=None):
1108 if new_child_slot is None: 1109 child = dep.child 1110 else: 1111 child = new_child_slot 1112 if "--debug" in self._frozen_config.myopts: 1113 msg = [] 1114 msg.append("") 1115 msg.append("") 1116 msg.append("backtracking due to missed slot abi update:") 1117 msg.append(" child package: %s" % child) 1118 if new_child_slot is not None: 1119 msg.append(" new child slot package: %s" % new_child_slot) 1120 msg.append(" parent package: %s" % dep.parent) 1121 if new_dep is not None: 1122 msg.append(" new parent pkg: %s" % new_dep.parent) 1123 msg.append(" atom: %s" % dep.atom) 1124 msg.append("") 1125 writemsg_level("\n".join(msg), 1126 noiselevel=-1, level=logging.DEBUG) 1127 backtrack_infos = self._dynamic_config._backtrack_infos 1128 config = backtrack_infos.setdefault("config", {}) 1129 1130 # mask unwanted binary packages if necessary 1131 abi_masks = {} 1132 if new_child_slot is None: 1133 if not child.installed: 1134 abi_masks.setdefault(child, {})["slot_operator_mask_built"] = None 1135 if not dep.parent.installed: 1136 abi_masks.setdefault(dep.parent, {})["slot_operator_mask_built"] = None 1137 if abi_masks: 1138 config.setdefault("slot_operator_mask_built", {}).update(abi_masks) 1139 1140 # trigger replacement of installed packages if necessary 1141 abi_reinstalls = set() 1142 if dep.parent.installed: 1143 if new_dep is not None: 1144 replacement_atom = new_dep.parent.slot_atom 1145 else: 1146 replacement_atom = self._replace_installed_atom(dep.parent) 1147 if replacement_atom is not None: 1148 abi_reinstalls.add((dep.parent.root, replacement_atom)) 1149 if new_child_slot is None and child.installed: 1150 replacement_atom = self._replace_installed_atom(child) 1151 if replacement_atom is not None: 1152 abi_reinstalls.add((child.root, replacement_atom)) 1153 if abi_reinstalls: 1154 config.setdefault("slot_operator_replace_installed", 1155 set()).update(abi_reinstalls) 1156 1157 self._dynamic_config._need_restart = True
1158
1160 new_dep = self._slot_operator_update_probe(dep, slot_conflict=True) 1161 1162 if new_dep is not None: 1163 return new_dep 1164 1165 if self._dynamic_config._autounmask is True: 1166 1167 for autounmask_level in self._autounmask_levels(): 1168 1169 new_dep = self._slot_operator_update_probe(dep, 1170 slot_conflict=True, autounmask_level=autounmask_level) 1171 1172 if new_dep is not None: 1173 return new_dep 1174 1175 return None
1176
1177 - def _slot_operator_update_probe(self, dep, new_child_slot=False, 1178 slot_conflict=False, autounmask_level=None):
1179 """ 1180 slot/sub-slot := operators tend to prevent updates from getting pulled in, 1181 since installed packages pull in packages with the slot/sub-slot that they 1182 were built against. Detect this case so that we can schedule rebuilds 1183 and reinstalls when appropriate. 1184 NOTE: This function only searches for updates that involve upgrades 1185 to higher versions, since the logic required to detect when a 1186 downgrade would be desirable is not implemented. 1187 """ 1188 1189 if dep.child.installed and \ 1190 self._frozen_config.excluded_pkgs.findAtomForPackage(dep.child, 1191 modified_use=self._pkg_use_enabled(dep.child)): 1192 return None 1193 1194 if dep.parent.installed and \ 1195 self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent, 1196 modified_use=self._pkg_use_enabled(dep.parent)): 1197 return None 1198 1199 debug = "--debug" in self._frozen_config.myopts 1200 selective = "selective" in self._dynamic_config.myparams 1201 want_downgrade = None 1202 1203 for replacement_parent in self._iter_similar_available(dep.parent, 1204 dep.parent.slot_atom, autounmask_level=autounmask_level): 1205 1206 selected_atoms = None 1207 1208 for atom in replacement_parent.validated_atoms: 1209 if not atom.slot_operator == "=" or \ 1210 atom.blocker or \ 1211 atom.cp != dep.atom.cp: 1212 continue 1213 1214 # Discard USE deps, we're only searching for an approximate 1215 # pattern, and dealing with USE states is too complex for 1216 # this purpose. 1217 unevaluated_atom = atom.unevaluated_atom 1218 atom = atom.without_use 1219 1220 if replacement_parent.built and \ 1221 portage.dep._match_slot(atom, dep.child): 1222 # Our selected replacement_parent appears to be built 1223 # for the existing child selection. So, discard this 1224 # parent and search for another. 1225 break 1226 1227 for pkg in self._iter_similar_available( 1228 dep.child, atom): 1229 if pkg.slot == dep.child.slot and \ 1230 pkg.sub_slot == dep.child.sub_slot: 1231 # If slot/sub-slot is identical, then there's 1232 # no point in updating. 1233 continue 1234 if new_child_slot: 1235 if pkg.slot == dep.child.slot: 1236 continue 1237 if pkg < dep.child: 1238 # the new slot only matters if the 1239 # package version is higher 1240 continue 1241 else: 1242 if pkg.slot != dep.child.slot: 1243 continue 1244 if pkg < dep.child: 1245 if want_downgrade is None: 1246 want_downgrade = self._downgrade_probe(dep.child) 1247 # be careful not to trigger a rebuild when 1248 # the only version available with a 1249 # different slot_operator is an older version 1250 if not want_downgrade: 1251 continue 1252 1253 insignificant = False 1254 if not slot_conflict and \ 1255 selective and \ 1256 dep.parent.installed and \ 1257 dep.child.installed and \ 1258 dep.parent.cpv == replacement_parent.cpv and \ 1259 dep.child.cpv == pkg.cpv: 1260 # Then can happen if the child's sub-slot changed 1261 # without a revision bump. The sub-slot change is 1262 # considered insignificant until one of its parent 1263 # packages needs to be rebuilt (which may trigger a 1264 # slot conflict). 1265 insignificant = True 1266 1267 if not insignificant: 1268 # Evaluate USE conditionals and || deps, in order 1269 # to see if this atom is really desirable, since 1270 # otherwise we may trigger an undesirable rebuild 1271 # as in bug #460304. 1272 if selected_atoms is None: 1273 selected_atoms = self._select_atoms_probe( 1274 dep.child.root, replacement_parent) 1275 if unevaluated_atom not in selected_atoms: 1276 continue 1277 1278 if debug: 1279 msg = [] 1280 msg.append("") 1281 msg.append("") 1282 msg.append("slot_operator_update_probe:") 1283 msg.append(" existing child package: %s" % dep.child) 1284 msg.append(" existing parent package: %s" % dep.parent) 1285 msg.append(" new child package: %s" % pkg) 1286 msg.append(" new parent package: %s" % replacement_parent) 1287 if insignificant: 1288 msg.append("insignificant changes detected") 1289 msg.append("") 1290 writemsg_level("\n".join(msg), 1291 noiselevel=-1, level=logging.DEBUG) 1292 1293 if insignificant: 1294 return None 1295 1296 return Dependency(parent=replacement_parent, 1297 child=pkg, atom=unevaluated_atom) 1298 1299 if debug: 1300 msg = [] 1301 msg.append("") 1302 msg.append("") 1303 msg.append("slot_operator_update_probe:") 1304 msg.append(" existing child package: %s" % dep.child) 1305 msg.append(" existing parent package: %s" % dep.parent) 1306 msg.append(" new child package: %s" % None) 1307 msg.append(" new parent package: %s" % None) 1308 msg.append("") 1309 writemsg_level("\n".join(msg), 1310 noiselevel=-1, level=logging.DEBUG) 1311 1312 return None
1313
1314 - def _slot_operator_unsatisfied_probe(self, dep):
1315 1316 if dep.parent.installed and \ 1317 self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent, 1318 modified_use=self._pkg_use_enabled(dep.parent)): 1319 return False 1320 1321 debug = "--debug" in self._frozen_config.myopts 1322 1323 for replacement_parent in self._iter_similar_available(dep.parent, 1324 dep.parent.slot_atom): 1325 1326 for atom in replacement_parent.validated_atoms: 1327 if not atom.slot_operator == "=" or \ 1328 atom.blocker or \ 1329 atom.cp != dep.atom.cp: 1330 continue 1331 1332 # Discard USE deps, we're only searching for an approximate 1333 # pattern, and dealing with USE states is too complex for 1334 # this purpose. 1335 atom = atom.without_use 1336 1337 pkg, existing_node = self._select_package(dep.root, atom, 1338 onlydeps=dep.onlydeps) 1339 1340 if pkg is not None: 1341 1342 if debug: 1343 msg = [] 1344 msg.append("") 1345 msg.append("") 1346 msg.append("slot_operator_unsatisfied_probe:") 1347 msg.append(" existing parent package: %s" % dep.parent) 1348 msg.append(" existing parent atom: %s" % dep.atom) 1349 msg.append(" new parent package: %s" % replacement_parent) 1350 msg.append(" new child package: %s" % pkg) 1351 msg.append("") 1352 writemsg_level("\n".join(msg), 1353 noiselevel=-1, level=logging.DEBUG) 1354 1355 return True 1356 1357 if debug: 1358 msg = [] 1359 msg.append("") 1360 msg.append("") 1361 msg.append("slot_operator_unsatisfied_probe:") 1362 msg.append(" existing parent package: %s" % dep.parent) 1363 msg.append(" existing parent atom: %s" % dep.atom) 1364 msg.append(" new parent package: %s" % None) 1365 msg.append(" new child package: %s" % None) 1366 msg.append("") 1367 writemsg_level("\n".join(msg), 1368 noiselevel=-1, level=logging.DEBUG) 1369 1370 return False
1371
1373 1374 parent = dep.parent 1375 1376 if "--debug" in self._frozen_config.myopts: 1377 msg = [] 1378 msg.append("") 1379 msg.append("") 1380 msg.append("backtracking due to unsatisfied " 1381 "built slot-operator dep:") 1382 msg.append(" parent package: %s" % parent) 1383 msg.append(" atom: %s" % dep.atom) 1384 msg.append("") 1385 writemsg_level("\n".join(msg), 1386 noiselevel=-1, level=logging.DEBUG) 1387 1388 backtrack_infos = self._dynamic_config._backtrack_infos 1389 config = backtrack_infos.setdefault("config", {}) 1390 1391 # mask unwanted binary packages if necessary 1392 masks = {} 1393 if not parent.installed: 1394 masks.setdefault(parent, {})["slot_operator_mask_built"] = None 1395 if masks: 1396 config.setdefault("slot_operator_mask_built", {}).update(masks) 1397 1398 # trigger replacement of installed packages if necessary 1399 reinstalls = set() 1400 if parent.installed: 1401 replacement_atom = self._replace_installed_atom(parent) 1402 if replacement_atom is not None: 1403 reinstalls.add((parent.root, replacement_atom)) 1404 if reinstalls: 1405 config.setdefault("slot_operator_replace_installed", 1406 set()).update(reinstalls) 1407 1408 self._dynamic_config._need_restart = True
1409
1410 - def _downgrade_probe(self, pkg):
1411 """ 1412 Detect cases where a downgrade of the given package is considered 1413 desirable due to the current version being masked or unavailable. 1414 """ 1415 available_pkg = None 1416 for available_pkg in self._iter_similar_available(pkg, 1417 pkg.slot_atom): 1418 if available_pkg >= pkg: 1419 # There's an available package of the same or higher 1420 # version, so downgrade seems undesirable. 1421 return False 1422 1423 return available_pkg is not None
1424
1425 - def _select_atoms_probe(self, root, pkg):
1426 selected_atoms = [] 1427 use = self._pkg_use_enabled(pkg) 1428 for k in pkg._dep_keys: 1429 v = pkg._metadata.get(k) 1430 if not v: 1431 continue 1432 selected_atoms.extend(self._select_atoms( 1433 root, v, myuse=use, parent=pkg)[pkg]) 1434 return frozenset(x.unevaluated_atom for 1435 x in selected_atoms)
1436
1437 - def _iter_similar_available(self, graph_pkg, atom, autounmask_level=None):
1438 """ 1439 Given a package that's in the graph, do a rough check to 1440 see if a similar package is available to install. The given 1441 graph_pkg itself may be yielded only if it's not installed. 1442 """ 1443 1444 usepkgonly = "--usepkgonly" in self._frozen_config.myopts 1445 useoldpkg_atoms = self._frozen_config.useoldpkg_atoms 1446 use_ebuild_visibility = self._frozen_config.myopts.get( 1447 '--use-ebuild-visibility', 'n') != 'n' 1448 1449 for pkg in self._iter_match_pkgs_any( 1450 graph_pkg.root_config, atom): 1451 if pkg.cp != graph_pkg.cp: 1452 # discard old-style virtual match 1453 continue 1454 if pkg.installed: 1455 continue 1456 if pkg in self._dynamic_config._runtime_pkg_mask: 1457 continue 1458 if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, 1459 modified_use=self._pkg_use_enabled(pkg)): 1460 continue 1461 if pkg.built: 1462 if self._equiv_binary_installed(pkg): 1463 continue 1464 if not (not use_ebuild_visibility and 1465 (usepkgonly or useoldpkg_atoms.findAtomForPackage( 1466 pkg, modified_use=self._pkg_use_enabled(pkg)))) and \ 1467 not self._equiv_ebuild_visible(pkg, 1468 autounmask_level=autounmask_level): 1469 continue 1470 if not self._pkg_visibility_check(pkg, 1471 autounmask_level=autounmask_level): 1472 continue 1473 yield pkg
1474
1475 - def _replace_installed_atom(self, inst_pkg):
1476 """ 1477 Given an installed package, generate an atom suitable for 1478 slot_operator_replace_installed backtracking info. The replacement 1479 SLOT may differ from the installed SLOT, so first search by cpv. 1480 """ 1481 built_pkgs = [] 1482 for pkg in self._iter_similar_available(inst_pkg, 1483 Atom("=%s" % inst_pkg.cpv)): 1484 if not pkg.built: 1485 return pkg.slot_atom 1486 elif not pkg.installed: 1487 # avoid using SLOT from a built instance 1488 built_pkgs.append(pkg) 1489 1490 for pkg in self._iter_similar_available(inst_pkg, inst_pkg.slot_atom): 1491 if not pkg.built: 1492 return pkg.slot_atom 1493 elif not pkg.installed: 1494 # avoid using SLOT from a built instance 1495 built_pkgs.append(pkg) 1496 1497 if built_pkgs: 1498 best_version = None 1499 for pkg in built_pkgs: 1500 if best_version is None or pkg > best_version: 1501 best_version = pkg 1502 return best_version.slot_atom 1503 1504 return None
1505
1507 """ 1508 Search for packages with slot-operator deps on older slots, and schedule 1509 rebuilds if they can link to a newer slot that's in the graph. 1510 """ 1511 1512 rebuild_if_new_slot = self._dynamic_config.myparams.get( 1513 "rebuild_if_new_slot", "y") == "y" 1514 1515 for slot_key, slot_info in self._dynamic_config._slot_operator_deps.items(): 1516 1517 for dep in slot_info: 1518 1519 atom = dep.atom 1520 if atom.slot_operator is None: 1521 continue 1522 1523 if not atom.slot_operator_built: 1524 new_child_slot = self._slot_change_probe(dep) 1525 if new_child_slot is not None: 1526 self._slot_change_backtrack(dep, new_child_slot) 1527 continue 1528 1529 if not (dep.parent and 1530 isinstance(dep.parent, Package) and dep.parent.built): 1531 continue 1532 1533 # Check for slot update first, since we don't want to 1534 # trigger reinstall of the child package when a newer 1535 # slot will be used instead. 1536 if rebuild_if_new_slot: 1537 new_dep = self._slot_operator_update_probe(dep, 1538 new_child_slot=True) 1539 if new_dep is not None: 1540 self._slot_operator_update_backtrack(dep, 1541 new_child_slot=new_dep.child) 1542 break 1543 1544 if dep.want_update: 1545 if self._slot_operator_update_probe(dep): 1546 self._slot_operator_update_backtrack(dep) 1547 break
1548
1549 - def _reinstall_for_flags(self, pkg, forced_flags, 1550 orig_use, orig_iuse, cur_use, cur_iuse):
1551 """Return a set of flags that trigger reinstallation, or None if there 1552 are no such flags.""" 1553 1554 # binpkg_respect_use: Behave like newuse by default. If newuse is 1555 # False and changed_use is True, then behave like changed_use. 1556 binpkg_respect_use = (pkg.built and 1557 self._dynamic_config.myparams.get("binpkg_respect_use") 1558 in ("y", "auto")) 1559 newuse = "--newuse" in self._frozen_config.myopts 1560 changed_use = "changed-use" == self._frozen_config.myopts.get("--reinstall") 1561 feature_flags = _get_feature_flags( 1562 _get_eapi_attrs(pkg.eapi)) 1563 1564 if newuse or (binpkg_respect_use and not changed_use): 1565 flags = set(orig_iuse.symmetric_difference( 1566 cur_iuse).difference(forced_flags)) 1567 flags.update(orig_iuse.intersection(orig_use).symmetric_difference( 1568 cur_iuse.intersection(cur_use))) 1569 flags.difference_update(feature_flags) 1570 if flags: 1571 return flags 1572 1573 elif changed_use or binpkg_respect_use: 1574 flags = set(orig_iuse.intersection(orig_use).symmetric_difference( 1575 cur_iuse.intersection(cur_use))) 1576 flags.difference_update(feature_flags) 1577 if flags: 1578 return flags 1579 return None
1580
1581 - def _create_graph(self, allow_unsatisfied=False):
1582 dep_stack = self._dynamic_config._dep_stack 1583 dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack 1584 while dep_stack or dep_disjunctive_stack: 1585 self._spinner_update() 1586 while dep_stack: 1587 dep = dep_stack.pop() 1588 if isinstance(dep, Package): 1589 if not self._add_pkg_deps(dep, 1590 allow_unsatisfied=allow_unsatisfied): 1591 return 0 1592 continue 1593 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied): 1594 return 0 1595 if dep_disjunctive_stack: 1596 if not self._pop_disjunction(allow_unsatisfied): 1597 return 0 1598 return 1
1599
1600 - def _expand_set_args(self, input_args, add_to_digraph=False):
1601 """ 1602 Iterate over a list of DependencyArg instances and yield all 1603 instances given in the input together with additional SetArg 1604 instances that are generated from nested sets. 1605 @param input_args: An iterable of DependencyArg instances 1606 @type input_args: Iterable 1607 @param add_to_digraph: If True then add SetArg instances 1608 to the digraph, in order to record parent -> child 1609 relationships from nested sets 1610 @type add_to_digraph: Boolean 1611 @rtype: Iterable 1612 @return: All args given in the input together with additional 1613 SetArg instances that are generated from nested sets 1614 """ 1615 1616 traversed_set_args = set() 1617 1618 for arg in input_args: 1619 if not isinstance(arg, SetArg): 1620 yield arg 1621 continue 1622 1623 root_config = arg.root_config 1624 depgraph_sets = self._dynamic_config.sets[root_config.root] 1625 arg_stack = [arg] 1626 while arg_stack: 1627 arg = arg_stack.pop() 1628 if arg in traversed_set_args: 1629 continue 1630 traversed_set_args.add(arg) 1631 1632 if add_to_digraph: 1633 self._dynamic_config.digraph.add(arg, None, 1634 priority=BlockerDepPriority.instance) 1635 1636 yield arg 1637 1638 # Traverse nested sets and add them to the stack 1639 # if they're not already in the graph. Also, graph 1640 # edges between parent and nested sets. 1641 for token in arg.pset.getNonAtoms(): 1642 if not token.startswith(SETPREFIX): 1643 continue 1644 s = token[len(SETPREFIX):] 1645 nested_set = depgraph_sets.sets.get(s) 1646 if nested_set is None: 1647 nested_set = root_config.sets.get(s) 1648 if nested_set is not None: 1649 nested_arg = SetArg(arg=token, pset=nested_set, 1650 root_config=root_config) 1651 arg_stack.append(nested_arg) 1652 if add_to_digraph: 1653 self._dynamic_config.digraph.add(nested_arg, arg, 1654 priority=BlockerDepPriority.instance) 1655 depgraph_sets.sets[nested_arg.name] = nested_arg.pset
1656
1657 - def _add_dep(self, dep, allow_unsatisfied=False):
1658 debug = "--debug" in self._frozen_config.myopts 1659 buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts 1660 nodeps = "--nodeps" in self._frozen_config.myopts 1661 if dep.blocker: 1662 if not buildpkgonly and \ 1663 not nodeps and \ 1664 not dep.collapsed_priority.ignored and \ 1665 not dep.collapsed_priority.optional and \ 1666 dep.parent not in self._dynamic_config._slot_collision_nodes: 1667 if dep.parent.onlydeps: 1668 # It's safe to ignore blockers if the 1669 # parent is an --onlydeps node. 1670 return 1 1671 # The blocker applies to the root where 1672 # the parent is or will be installed. 1673 blocker = Blocker(atom=dep.atom, 1674 eapi=dep.parent.eapi, 1675 priority=dep.priority, root=dep.parent.root) 1676 self._dynamic_config._blocker_parents.add(blocker, dep.parent) 1677 return 1 1678 1679 if dep.child is None: 1680 dep_pkg, existing_node = self._select_package(dep.root, dep.atom, 1681 onlydeps=dep.onlydeps) 1682 else: 1683 # The caller has selected a specific package 1684 # via self._minimize_packages(). 1685 dep_pkg = dep.child 1686 existing_node = self._dynamic_config._slot_pkg_map[ 1687 dep.root].get(dep_pkg.slot_atom) 1688 1689 if not dep_pkg: 1690 if (dep.collapsed_priority.optional or 1691 dep.collapsed_priority.ignored): 1692 # This is an unnecessary build-time dep. 1693 return 1 1694 if allow_unsatisfied: 1695 self._dynamic_config._unsatisfied_deps.append(dep) 1696 return 1 1697 self._dynamic_config._unsatisfied_deps_for_display.append( 1698 ((dep.root, dep.atom), {"myparent":dep.parent})) 1699 1700 # The parent node should not already be in 1701 # runtime_pkg_mask, since that would trigger an 1702 # infinite backtracking loop. 1703 if self._dynamic_config._allow_backtracking: 1704 if dep.parent in self._dynamic_config._runtime_pkg_mask: 1705 if debug: 1706 writemsg( 1707 "!!! backtracking loop detected: %s %s\n" % \ 1708 (dep.parent, 1709 self._dynamic_config._runtime_pkg_mask[ 1710 dep.parent]), noiselevel=-1) 1711 elif dep.atom.slot_operator_built and \ 1712 self._slot_operator_unsatisfied_probe(dep): 1713 self._slot_operator_unsatisfied_backtrack(dep) 1714 return 1 1715 else: 1716 # Do not backtrack if only USE have to be changed in 1717 # order to satisfy the dependency. Note that when 1718 # want_restart_for_use_change sets the need_restart 1719 # flag, it causes _select_pkg_highest_available to 1720 # return None, and eventually we come through here 1721 # and skip the "missing dependency" backtracking path. 1722 dep_pkg, existing_node = \ 1723 self._select_package(dep.root, dep.atom.without_use, 1724 onlydeps=dep.onlydeps) 1725 if dep_pkg is None: 1726 self._dynamic_config._backtrack_infos["missing dependency"] = dep 1727 self._dynamic_config._need_restart = True 1728 if debug: 1729 msg = [] 1730 msg.append("") 1731 msg.append("") 1732 msg.append("backtracking due to unsatisfied dep:") 1733 msg.append(" parent: %s" % dep.parent) 1734 msg.append(" priority: %s" % dep.priority) 1735 msg.append(" root: %s" % dep.root) 1736 msg.append(" atom: %s" % dep.atom) 1737 msg.append("") 1738 writemsg_level("".join("%s\n" % l for l in msg), 1739 noiselevel=-1, level=logging.DEBUG) 1740 1741 return 0 1742 1743 self._rebuild.add(dep_pkg, dep) 1744 1745 ignore = dep.collapsed_priority.ignored and \ 1746 not self._dynamic_config._traverse_ignored_deps 1747 if not ignore and not self._add_pkg(dep_pkg, dep): 1748 return 0 1749 return 1
1750
1751 - def _check_slot_conflict(self, pkg, atom):
1752 existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom) 1753 matches = None 1754 if existing_node: 1755 matches = pkg.cpv == existing_node.cpv 1756 if pkg != existing_node and \ 1757 atom is not None: 1758 # Use package set for matching since it will match via 1759 # PROVIDE when necessary, while match_from_list does not. 1760 matches = bool(InternalPackageSet(initial_atoms=(atom,), 1761 allow_repo=True).findAtomForPackage(existing_node, 1762 modified_use=self._pkg_use_enabled(existing_node))) 1763 1764 return (existing_node, matches)
1765
1766 - def _add_pkg(self, pkg, dep):
1767 """ 1768 Adds a package to the depgraph, queues dependencies, and handles 1769 slot conflicts. 1770 """ 1771 debug = "--debug" in self._frozen_config.myopts 1772 myparent = None 1773 priority = None 1774 depth = 0 1775 if dep is None: 1776 dep = Dependency() 1777 else: 1778 myparent = dep.parent 1779 priority = dep.priority 1780 depth = dep.depth 1781 if priority is None: 1782 priority = DepPriority() 1783 1784 if debug: 1785 writemsg_level( 1786 "\n%s%s %s\n" % ("Child:".ljust(15), pkg, 1787 pkg_use_display(pkg, self._frozen_config.myopts, 1788 modified_use=self._pkg_use_enabled(pkg))), 1789 level=logging.DEBUG, noiselevel=-1) 1790 if isinstance(myparent, 1791 (PackageArg, AtomArg)): 1792 # For PackageArg and AtomArg types, it's 1793 # redundant to display the atom attribute. 1794 writemsg_level( 1795 "%s%s\n" % ("Parent Dep:".ljust(15), myparent), 1796 level=logging.DEBUG, noiselevel=-1) 1797 else: 1798 # Display the specific atom from SetArg or 1799 # Package types. 1800 uneval = "" 1801 if dep.atom is not dep.atom.unevaluated_atom: 1802 uneval = " (%s)" % (dep.atom.unevaluated_atom,) 1803 writemsg_level( 1804 "%s%s%s required by %s\n" % 1805 ("Parent Dep:".ljust(15), dep.atom, uneval, myparent), 1806 level=logging.DEBUG, noiselevel=-1) 1807 1808 # Ensure that the dependencies of the same package 1809 # are never processed more than once. 1810 previously_added = pkg in self._dynamic_config.digraph 1811 1812 pkgsettings = self._frozen_config.pkgsettings[pkg.root] 1813 1814 arg_atoms = None 1815 if True: 1816 try: 1817 arg_atoms = list(self._iter_atoms_for_pkg(pkg)) 1818 except portage.exception.InvalidDependString as e: 1819 if not pkg.installed: 1820 # should have been masked before it was selected 1821 raise 1822 del e 1823 1824 # NOTE: REQUIRED_USE checks are delayed until after 1825 # package selection, since we want to prompt the user 1826 # for USE adjustment rather than have REQUIRED_USE 1827 # affect package selection and || dep choices. 1828 if not pkg.built and pkg._metadata.get("REQUIRED_USE") and \ 1829 eapi_has_required_use(pkg.eapi): 1830 required_use_is_sat = check_required_use( 1831 pkg._metadata["REQUIRED_USE"], 1832 self._pkg_use_enabled(pkg), 1833 pkg.iuse.is_valid_flag, 1834 eapi=pkg.eapi) 1835 if not required_use_is_sat: 1836 if dep.atom is not None and dep.parent is not None: 1837 self._add_parent_atom(pkg, (dep.parent, dep.atom)) 1838 1839 if arg_atoms: 1840 for parent_atom in arg_atoms: 1841 parent, atom = parent_atom 1842 self._add_parent_atom(pkg, parent_atom) 1843 1844 atom = dep.atom 1845 if atom is None: 1846 atom = Atom("=" + pkg.cpv) 1847 self._dynamic_config._unsatisfied_deps_for_display.append( 1848 ((pkg.root, atom), 1849 {"myparent" : dep.parent, "show_req_use" : pkg})) 1850 self._dynamic_config._skip_restart = True 1851 return 0 1852 1853 if not pkg.onlydeps: 1854 1855 existing_node, existing_node_matches = \ 1856 self._check_slot_conflict(pkg, dep.atom) 1857 slot_collision = False 1858 if existing_node: 1859 if existing_node_matches: 1860 # The existing node can be reused. 1861 if pkg != existing_node: 1862 pkg = existing_node 1863 previously_added = True 1864 try: 1865 arg_atoms = list(self._iter_atoms_for_pkg(pkg)) 1866 except InvalidDependString as e: 1867 if not pkg.installed: 1868 # should have been masked before 1869 # it was selected 1870 raise 1871 1872 if debug: 1873 writemsg_level( 1874 "%s%s %s\n" % ("Re-used Child:".ljust(15), 1875 pkg, pkg_use_display(pkg, 1876 self._frozen_config.myopts, 1877 modified_use=self._pkg_use_enabled(pkg))), 1878 level=logging.DEBUG, noiselevel=-1) 1879 1880 else: 1881 self._add_slot_conflict(pkg) 1882 if debug: 1883 writemsg_level( 1884 "%s%s %s\n" % ("Slot Conflict:".ljust(15), 1885 existing_node, pkg_use_display(existing_node, 1886 self._frozen_config.myopts, 1887 modified_use=self._pkg_use_enabled(existing_node))), 1888 level=logging.DEBUG, noiselevel=-1) 1889 1890 slot_collision = True 1891 1892 if slot_collision: 1893 # Now add this node to the graph so that self.display() 1894 # can show use flags and --tree portage.output. This node is 1895 # only being partially added to the graph. It must not be 1896 # allowed to interfere with the other nodes that have been 1897 # added. Do not overwrite data for existing nodes in 1898 # self._dynamic_config.mydbapi since that data will be used for blocker 1899 # validation. 1900 # Even though the graph is now invalid, continue to process 1901 # dependencies so that things like --fetchonly can still 1902 # function despite collisions. 1903 pass 1904 elif not previously_added: 1905 self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg 1906 self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg) 1907 self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache() 1908 self._dynamic_config._highest_pkg_cache.clear() 1909 self._check_masks(pkg) 1910 1911 if not pkg.installed: 1912 # Allow this package to satisfy old-style virtuals in case it 1913 # doesn't already. Any pre-existing providers will be preferred 1914 # over this one. 1915 try: 1916 pkgsettings.setinst(pkg.cpv, pkg._metadata) 1917 # For consistency, also update the global virtuals. 1918 settings = self._frozen_config.roots[pkg.root].settings 1919 settings.unlock() 1920 settings.setinst(pkg.cpv, pkg._metadata) 1921 settings.lock() 1922 except portage.exception.InvalidDependString: 1923 if not pkg.installed: 1924 # should have been masked before it was selected 1925 raise 1926 1927 if arg_atoms: 1928 self._dynamic_config._set_nodes.add(pkg) 1929 1930 # Do this even for onlydeps, so that the 1931 # parent/child relationship is always known in case 1932 # self._show_slot_collision_notice() needs to be called later. 1933 # If a direct circular dependency is not an unsatisfied 1934 # buildtime dependency then drop it here since otherwise 1935 # it can skew the merge order calculation in an unwanted 1936 # way. 1937 if pkg != dep.parent or \ 1938 (priority.buildtime and not priority.satisfied): 1939 self._dynamic_config.digraph.add(