Package portage :: Package util
[hide private]

Source Code for Package portage.util

   1  # Copyright 2004-2013 Gentoo Foundation 
   2  # Distributed under the terms of the GNU General Public License v2 
   3   
   4  from __future__ import unicode_literals 
   5   
   6  __all__ = ['apply_permissions', 'apply_recursive_permissions', 
   7          'apply_secpass_permissions', 'apply_stat_permissions', 'atomic_ofstream', 
   8          'cmp_sort_key', 'ConfigProtect', 'dump_traceback', 'ensure_dirs', 
   9          'find_updated_config_files', 'getconfig', 'getlibpaths', 'grabdict', 
  10          'grabdict_package', 'grabfile', 'grabfile_package', 'grablines', 
  11          'initialize_logger', 'LazyItemsDict', 'map_dictlist_vals', 
  12          'new_protect_filename', 'normalize_path', 'pickle_read', 'stack_dictlist', 
  13          'stack_dicts', 'stack_lists', 'unique_array', 'unique_everseen', 'varexpand', 
  14          'write_atomic', 'writedict', 'writemsg', 'writemsg_level', 'writemsg_stdout'] 
  15   
  16  from copy import deepcopy 
  17  import errno 
  18  import io 
  19  try: 
  20          from itertools import filterfalse 
  21  except ImportError: 
  22          from itertools import ifilterfalse as filterfalse 
  23  import logging 
  24  import re 
  25  import shlex 
  26  import stat 
  27  import string 
  28  import sys 
  29  import traceback 
  30  import glob 
  31   
  32  import portage 
  33  portage.proxy.lazyimport.lazyimport(globals(), 
  34          'pickle', 
  35          'portage.dep:Atom', 
  36          'subprocess', 
  37  ) 
  38   
  39  from portage import os 
  40  from portage import _encodings 
  41  from portage import _os_merge 
  42  from portage import _unicode_encode 
  43  from portage import _unicode_decode 
  44  from portage.const import VCS_DIRS 
  45  from portage.exception import InvalidAtom, PortageException, FileNotFound, \ 
  46         OperationNotPermitted, ParseError, PermissionDenied, ReadOnlyFileSystem 
  47  from portage.localization import _ 
  48  from portage.proxy.objectproxy import ObjectProxy 
  49  from portage.cache.mappings import UserDict 
  50   
  51  if sys.hexversion >= 0x3000000: 
  52          _unicode = str 
  53  else: 
  54          _unicode = unicode 
  55   
  56  noiselimit = 0 
  57   
58 -def initialize_logger(level=logging.WARN):
59 """Sets up basic logging of portage activities 60 Args: 61 level: the level to emit messages at ('info', 'debug', 'warning' ...) 62 Returns: 63 None 64 """ 65 logging.basicConfig(level=logging.WARN, format='[%(levelname)-4s] %(message)s')
66
67 -def writemsg(mystr,noiselevel=0,fd=None):
68 """Prints out warning and debug messages based on the noiselimit setting""" 69 global noiselimit 70 if fd is None: 71 fd = sys.stderr 72 if noiselevel <= noiselimit: 73 # avoid potential UnicodeEncodeError 74 if isinstance(fd, io.StringIO): 75 mystr = _unicode_decode(mystr, 76 encoding=_encodings['content'], errors='replace') 77 else: 78 mystr = _unicode_encode(mystr, 79 encoding=_encodings['stdio'], errors='backslashreplace') 80 if sys.hexversion >= 0x3000000 and fd in (sys.stdout, sys.stderr): 81 fd = fd.buffer 82 fd.write(mystr) 83 fd.flush()
84
85 -def writemsg_stdout(mystr,noiselevel=0):
86 """Prints messages stdout based on the noiselimit setting""" 87 writemsg(mystr, noiselevel=noiselevel, fd=sys.stdout)
88
89 -def writemsg_level(msg, level=0, noiselevel=0):
90 """ 91 Show a message for the given level as defined by the logging module 92 (default is 0). When level >= logging.WARNING then the message is 93 sent to stderr, otherwise it is sent to stdout. The noiselevel is 94 passed directly to writemsg(). 95 96 @type msg: str 97 @param msg: a message string, including newline if appropriate 98 @type level: int 99 @param level: a numeric logging level (see the logging module) 100 @type noiselevel: int 101 @param noiselevel: passed directly to writemsg 102 """ 103 if level >= logging.WARNING: 104 fd = sys.stderr 105 else: 106 fd = sys.stdout 107 writemsg(msg, noiselevel=noiselevel, fd=fd)
108
109 -def normalize_path(mypath):
110 """ 111 os.path.normpath("//foo") returns "//foo" instead of "/foo" 112 We dislike this behavior so we create our own normpath func 113 to fix it. 114 """ 115 if sys.hexversion >= 0x3000000 and isinstance(mypath, bytes): 116 path_sep = os.path.sep.encode() 117 else: 118 path_sep = os.path.sep 119 120 if mypath.startswith(path_sep): 121 # posixpath.normpath collapses 3 or more leading slashes to just 1. 122 return os.path.normpath(2*path_sep + mypath) 123 else: 124 return os.path.normpath(mypath)
125
126 -def grabfile(myfilename, compat_level=0, recursive=0, remember_source_file=False):
127 """This function grabs the lines in a file, normalizes whitespace and returns lines in a list; if a line 128 begins with a #, it is ignored, as are empty lines""" 129 130 mylines=grablines(myfilename, recursive, remember_source_file=True) 131 newlines=[] 132 133 for x, source_file in mylines: 134 #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line 135 #into single spaces. 136 myline = x.split() 137 if x and x[0] != "#": 138 mylinetemp = [] 139 for item in myline: 140 if item[:1] != "#": 141 mylinetemp.append(item) 142 else: 143 break 144 myline = mylinetemp 145 146 myline = " ".join(myline) 147 if not myline: 148 continue 149 if myline[0]=="#": 150 # Check if we have a compat-level string. BC-integration data. 151 # '##COMPAT==>N<==' 'some string attached to it' 152 mylinetest = myline.split("<==",1) 153 if len(mylinetest) == 2: 154 myline_potential = mylinetest[1] 155 mylinetest = mylinetest[0].split("##COMPAT==>") 156 if len(mylinetest) == 2: 157 if compat_level >= int(mylinetest[1]): 158 # It's a compat line, and the key matches. 159 newlines.append(myline_potential) 160 continue 161 else: 162 continue 163 if remember_source_file: 164 newlines.append((myline, source_file)) 165 else: 166 newlines.append(myline) 167 return newlines
168
169 -def map_dictlist_vals(func,myDict):
170 """Performs a function on each value of each key in a dictlist. 171 Returns a new dictlist.""" 172 new_dl = {} 173 for key in myDict: 174 new_dl[key] = [] 175 new_dl[key] = [func(x) for x in myDict[key]] 176 return new_dl
177
178 -def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0):
179 """ 180 Stacks an array of dict-types into one array. Optionally merging or 181 overwriting matching key/value pairs for the dict[key]->list. 182 Returns a single dict. Higher index in lists is preferenced. 183 184 Example usage: 185 >>> from portage.util import stack_dictlist 186 >>> print stack_dictlist( [{'a':'b'},{'x':'y'}]) 187 >>> {'a':'b','x':'y'} 188 >>> print stack_dictlist( [{'a':'b'},{'a':'c'}], incremental = True ) 189 >>> {'a':['b','c'] } 190 >>> a = {'KEYWORDS':['x86','alpha']} 191 >>> b = {'KEYWORDS':['-x86']} 192 >>> print stack_dictlist( [a,b] ) 193 >>> { 'KEYWORDS':['x86','alpha','-x86']} 194 >>> print stack_dictlist( [a,b], incremental=True) 195 >>> { 'KEYWORDS':['alpha'] } 196 >>> print stack_dictlist( [a,b], incrementals=['KEYWORDS']) 197 >>> { 'KEYWORDS':['alpha'] } 198 199 @param original_dicts a list of (dictionary objects or None) 200 @type list 201 @param incremental True or false depending on whether new keys should overwrite 202 keys which already exist. 203 @type boolean 204 @param incrementals A list of items that should be incremental (-foo removes foo from 205 the returned dict). 206 @type list 207 @param ignore_none Appears to be ignored, but probably was used long long ago. 208 @type boolean 209 210 """ 211 final_dict = {} 212 for mydict in original_dicts: 213 if mydict is None: 214 continue 215 for y in mydict: 216 if not y in final_dict: 217 final_dict[y] = [] 218 219 for thing in mydict[y]: 220 if thing: 221 if incremental or y in incrementals: 222 if thing == "-*": 223 final_dict[y] = [] 224 continue 225 elif thing[:1] == '-': 226 try: 227 final_dict[y].remove(thing[1:]) 228 except ValueError: 229 pass 230 continue 231 if thing not in final_dict[y]: 232 final_dict[y].append(thing) 233 if y in final_dict and not final_dict[y]: 234 del final_dict[y] 235 return final_dict
236
237 -def stack_dicts(dicts, incremental=0, incrementals=[], ignore_none=0):
238 """Stacks an array of dict-types into one array. Optionally merging or 239 overwriting matching key/value pairs for the dict[key]->string. 240 Returns a single dict.""" 241 final_dict = {} 242 for mydict in dicts: 243 if not mydict: 244 continue 245 for k, v in mydict.items(): 246 if k in final_dict and (incremental or (k in incrementals)): 247 final_dict[k] += " " + v 248 else: 249 final_dict[k] = v 250 return final_dict
251
252 -def append_repo(atom_list, repo_name, remember_source_file=False):
253 """ 254 Takes a list of valid atoms without repo spec and appends ::repo_name. 255 If an atom already has a repo part, then it is preserved (see bug #461948). 256 """ 257 if remember_source_file: 258 return [(atom.repo is not None and atom or atom.with_repo(repo_name), source) \ 259 for atom, source in atom_list] 260 else: 261 return [atom.repo is not None and atom or atom.with_repo(repo_name) \ 262 for atom in atom_list]
263
264 -def stack_lists(lists, incremental=1, remember_source_file=False, 265 warn_for_unmatched_removal=False, strict_warn_for_unmatched_removal=False, ignore_repo=False):
266 """Stacks an array of list-types into one array. Optionally removing 267 distinct values using '-value' notation. Higher index is preferenced. 268 269 all elements must be hashable.""" 270 matched_removals = set() 271 unmatched_removals = {} 272 new_list = {} 273 for sub_list in lists: 274 for token in sub_list: 275 token_key = token 276 if remember_source_file: 277 token, source_file = token 278 else: 279 source_file = False 280 281 if token is None: 282 continue 283 284 if incremental: 285 if token == "-*": 286 new_list.clear() 287 elif token[:1] == '-': 288 matched = False 289 if ignore_repo and not "::" in token: 290 #Let -cat/pkg remove cat/pkg::repo. 291 to_be_removed = [] 292 token_slice = token[1:] 293 for atom in new_list: 294 atom_without_repo = atom 295 if atom.repo is not None: 296 # Atom.without_repo instantiates a new Atom, 297 # which is unnecessary here, so use string 298 # replacement instead. 299 atom_without_repo = \ 300 atom.replace("::" + atom.repo, "", 1) 301 if atom_without_repo == token_slice: 302 to_be_removed.append(atom) 303 if to_be_removed: 304 matched = True 305 for atom in to_be_removed: 306 new_list.pop(atom) 307 else: 308 try: 309 new_list.pop(token[1:]) 310 matched = True 311 except KeyError: 312 pass 313 314 if not matched: 315 if source_file and \ 316 (strict_warn_for_unmatched_removal or \ 317 token_key not in matched_removals): 318 unmatched_removals.setdefault(source_file, set()).add(token) 319 else: 320 matched_removals.add(token_key) 321 else: 322 new_list[token] = source_file 323 else: 324 new_list[token] = source_file 325 326 if warn_for_unmatched_removal: 327 for source_file, tokens in unmatched_removals.items(): 328 if len(tokens) > 3: 329 selected = [tokens.pop(), tokens.pop(), tokens.pop()] 330 writemsg(_("--- Unmatched removal atoms in %s: %s and %s more\n") % \ 331 (source_file, ", ".join(selected), len(tokens)), 332 noiselevel=-1) 333 else: 334 writemsg(_("--- Unmatched removal atom(s) in %s: %s\n") % (source_file, ", ".join(tokens)), 335 noiselevel=-1) 336 337 if remember_source_file: 338 return list(new_list.items()) 339 else: 340 return list(new_list)
341
342 -def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1):
343 """ 344 This function grabs the lines in a file, normalizes whitespace and returns lines in a dictionary 345 346 @param myfilename: file to process 347 @type myfilename: string (path) 348 @param juststrings: only return strings 349 @type juststrings: Boolean (integer) 350 @param empty: Ignore certain lines 351 @type empty: Boolean (integer) 352 @param recursive: Recursively grab ( support for /etc/portage/package.keywords/* and friends ) 353 @type recursive: Boolean (integer) 354 @param incremental: Append to the return list, don't overwrite 355 @type incremental: Boolean (integer) 356 @rtype: Dictionary 357 @return: 358 1. Returns the lines in a file in a dictionary, for example: 359 'sys-apps/portage x86 amd64 ppc' 360 would return 361 { "sys-apps/portage" : [ 'x86', 'amd64', 'ppc' ] 362 """ 363 newdict={} 364 for x in grablines(myfilename, recursive): 365 #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line 366 #into single spaces. 367 if x[0] == "#": 368 continue 369 myline=x.split() 370 mylinetemp = [] 371 for item in myline: 372 if item[:1] != "#": 373 mylinetemp.append(item) 374 else: 375 break 376 myline = mylinetemp 377 if len(myline) < 2 and empty == 0: 378 continue 379 if len(myline) < 1 and empty == 1: 380 continue 381 if incremental: 382 newdict.setdefault(myline[0], []).extend(myline[1:]) 383 else: 384 newdict[myline[0]] = myline[1:] 385 if juststrings: 386 for k, v in newdict.items(): 387 newdict[k] = " ".join(v) 388 return newdict
389 390 _eapi_cache = {} 391
392 -def read_corresponding_eapi_file(filename, default="0"):
393 """ 394 Read the 'eapi' file from the directory 'filename' is in. 395 Returns "0" if the file is not present or invalid. 396 """ 397 eapi_file = os.path.join(os.path.dirname(filename), "eapi") 398 try: 399 eapi = _eapi_cache[eapi_file] 400 except KeyError: 401 pass 402 else: 403 if eapi is None: 404 return default 405 return eapi 406 407 eapi = None 408 try: 409 f = io.open(_unicode_encode(eapi_file, 410 encoding=_encodings['fs'], errors='strict'), 411 mode='r', encoding=_encodings['repo.content'], errors='replace') 412 lines = f.readlines() 413 if len(lines) == 1: 414 eapi = lines[0].rstrip("\n") 415 else: 416 writemsg(_("--- Invalid 'eapi' file (doesn't contain exactly one line): %s\n") % (eapi_file), 417 noiselevel=-1) 418 f.close() 419 except IOError: 420 pass 421 422 _eapi_cache[eapi_file] = eapi 423 if eapi is None: 424 return default 425 return eapi
426
427 -def grabdict_package(myfilename, juststrings=0, recursive=0, allow_wildcard=False, allow_repo=False, 428 verify_eapi=False, eapi=None):
429 """ Does the same thing as grabdict except it validates keys 430 with isvalidatom()""" 431 432 if recursive: 433 file_list = _recursive_file_list(myfilename) 434 else: 435 file_list = [myfilename] 436 437 atoms = {} 438 for filename in file_list: 439 d = grabdict(filename, juststrings=False, 440 empty=True, recursive=False, incremental=True) 441 if not d: 442 continue 443 if verify_eapi and eapi is None: 444 eapi = read_corresponding_eapi_file(myfilename) 445 446 for k, v in d.items(): 447 try: 448 k = Atom(k, allow_wildcard=allow_wildcard, 449 allow_repo=allow_repo, eapi=eapi) 450 except InvalidAtom as e: 451 writemsg(_("--- Invalid atom in %s: %s\n") % (filename, e), 452 noiselevel=-1) 453 else: 454 atoms.setdefault(k, []).extend(v) 455 456 if juststrings: 457 for k, v in atoms.items(): 458 atoms[k] = " ".join(v) 459 460 return atoms
461
462 -def grabfile_package(myfilename, compatlevel=0, recursive=0, allow_wildcard=False, allow_repo=False, 463 remember_source_file=False, verify_eapi=False, eapi=None):
464 465 pkgs=grabfile(myfilename, compatlevel, recursive=recursive, remember_source_file=True) 466 if not pkgs: 467 return pkgs 468 if verify_eapi and eapi is None: 469 eapi = read_corresponding_eapi_file(myfilename) 470 mybasename = os.path.basename(myfilename) 471 atoms = [] 472 for pkg, source_file in pkgs: 473 pkg_orig = pkg 474 # for packages and package.mask files 475 if pkg[:1] == "-": 476 pkg = pkg[1:] 477 if pkg[:1] == '*' and mybasename == 'packages': 478 pkg = pkg[1:] 479 try: 480 pkg = Atom(pkg, allow_wildcard=allow_wildcard, allow_repo=allow_repo, eapi=eapi) 481 except InvalidAtom as e: 482 writemsg(_("--- Invalid atom in %s: %s\n") % (source_file, e), 483 noiselevel=-1) 484 else: 485 if pkg_orig == _unicode(pkg): 486 # normal atom, so return as Atom instance 487 if remember_source_file: 488 atoms.append((pkg, source_file)) 489 else: 490 atoms.append(pkg) 491 else: 492 # atom has special prefix, so return as string 493 if remember_source_file: 494 atoms.append((pkg_orig, source_file)) 495 else: 496 atoms.append(pkg_orig) 497 return atoms
498
499 -def _recursive_basename_filter(f):
500 return not f.startswith(".") and not f.endswith("~")
501
502 -def _recursive_file_list(path):
503 # path may be a regular file or a directory 504 505 def onerror(e): 506 if e.errno == PermissionDenied.errno: 507 raise PermissionDenied(path)
508 509 stack = [os.path.split(path)] 510 511 while stack: 512 parent, fname = stack.pop() 513 fullpath = os.path.join(parent, fname) 514 515 try: 516 st = os.stat(fullpath) 517 except OSError as e: 518 onerror(e) 519 continue 520 521 if stat.S_ISDIR(st.st_mode): 522 if fname in VCS_DIRS or not _recursive_basename_filter(fname): 523 continue 524 try: 525 children = os.listdir(fullpath) 526 except OSError as e: 527 onerror(e) 528 continue 529 530 # Sort in reverse, since we pop from the end of the stack. 531 # Include regular files in the stack, so files are sorted 532 # together with directories. 533 children.sort(reverse=True) 534 stack.extend((fullpath, x) for x in children) 535 536 elif stat.S_ISREG(st.st_mode): 537 if _recursive_basename_filter(fname): 538 yield fullpath 539
540 -def grablines(myfilename, recursive=0, remember_source_file=False):
541 mylines=[] 542 if recursive: 543 for f in _recursive_file_list(myfilename): 544 mylines.extend(grablines(f, recursive=False, 545 remember_source_file=remember_source_file)) 546 547 else: 548 try: 549 myfile = io.open(_unicode_encode(myfilename, 550 encoding=_encodings['fs'], errors='strict'), 551 mode='r', encoding=_encodings['content'], errors='replace') 552 if remember_source_file: 553 mylines = [(line, myfilename) for line in myfile.readlines()] 554 else: 555 mylines = myfile.readlines() 556 myfile.close() 557 except IOError as e: 558 if e.errno == PermissionDenied.errno: 559 raise PermissionDenied(myfilename) 560 elif e.errno in (errno.ENOENT, errno.ESTALE): 561 pass 562 else: 563 raise 564 return mylines
565
566 -def writedict(mydict,myfilename,writekey=True):
567 """Writes out a dict to a file; writekey=0 mode doesn't write out 568 the key and assumes all values are strings, not lists.""" 569 lines = [] 570 if not writekey: 571 for v in mydict.values(): 572 lines.append(v + "\n") 573 else: 574 for k, v in mydict.items(): 575 lines.append("%s %s\n" % (k, " ".join(v))) 576 write_atomic(myfilename, "".join(lines))
577
578 -def shlex_split(s):
579 """ 580 This is equivalent to shlex.split, but if the current interpreter is 581 python2, it temporarily encodes unicode strings to bytes since python2's 582 shlex.split() doesn't handle unicode strings. 583 """ 584 convert_to_bytes = sys.hexversion < 0x3000000 and not isinstance(s, bytes) 585 if convert_to_bytes: 586 s = _unicode_encode(s) 587 rval = shlex.split(s) 588 if convert_to_bytes: 589 rval = [_unicode_decode(x) for x in rval] 590 return rval
591
592 -class _getconfig_shlex(shlex.shlex):
593
594 - def __init__(self, portage_tolerant=False, **kwargs):
595 shlex.shlex.__init__(self, **kwargs) 596 self.__portage_tolerant = portage_tolerant
597
598 - def sourcehook(self, newfile):
599 try: 600 return shlex.shlex.sourcehook(self, newfile) 601 except EnvironmentError as e: 602 if e.errno == PermissionDenied.errno: 603 raise PermissionDenied(newfile) 604 if e.errno not in (errno.ENOENT, errno.ENOTDIR): 605 writemsg("open('%s', 'r'): %s\n" % (newfile, e), noiselevel=-1) 606 raise 607 608 msg = self.error_leader() 609 if e.errno == errno.ENOTDIR: 610 msg += _("%s: Not a directory") % newfile 611 else: 612 msg += _("%s: No such file or directory") % newfile 613 614 if self.__portage_tolerant: 615 writemsg("%s\n" % msg, noiselevel=-1) 616 else: 617 raise ParseError(msg) 618 return (newfile, io.StringIO())
619 620 _invalid_var_name_re = re.compile(r'^\d|\W') 621
622 -def getconfig(mycfg, tolerant=False, allow_sourcing=False, expand=True, 623 recursive=False):
624 625 if isinstance(expand, dict): 626 # Some existing variable definitions have been 627 # passed in, for use in substitutions. 628 expand_map = expand 629 expand = True 630 else: 631 expand_map = {} 632 mykeys = {} 633 634 if recursive: 635 # Emulate source commands so that syntax error messages 636 # can display real file names and line numbers. 637 if not expand: 638 expand_map = False 639 fname = None 640 for fname in _recursive_file_list(mycfg): 641 mykeys.update(getconfig(fname, tolerant=tolerant, 642 allow_sourcing=allow_sourcing, expand=expand_map, 643 recursive=False) or {}) 644 if fname is None: 645 return None 646 return mykeys 647 648 f = None 649 try: 650 # NOTE: shlex doesn't support unicode objects with Python 2 651 # (produces spurious \0 characters). 652 if sys.hexversion < 0x3000000: 653 f = open(_unicode_encode(mycfg, 654 encoding=_encodings['fs'], errors='strict'), 'rb') 655 else: 656 f = open(_unicode_encode(mycfg, 657 encoding=_encodings['fs'], errors='strict'), mode='r', 658 encoding=_encodings['content'], errors='replace') 659 content = f.read() 660 except IOError as e: 661 if e.errno == PermissionDenied.errno: 662 raise PermissionDenied(mycfg) 663 if e.errno != errno.ENOENT: 664 writemsg("open('%s', 'r'): %s\n" % (mycfg, e), noiselevel=-1) 665 if e.errno not in (errno.EISDIR,): 666 raise 667 return None 668 finally: 669 if f is not None: 670 f.close() 671 672 # Since this file has unicode_literals enabled, and Python 2's 673 # shlex implementation does not support unicode, the following code 674 # uses _native_string() to encode unicode literals when necessary. 675 676 # Workaround for avoiding a silent error in shlex that is 677 # triggered by a source statement at the end of the file 678 # without a trailing newline after the source statement. 679 if content and content[-1] != portage._native_string('\n'): 680 content += portage._native_string('\n') 681 682 # Warn about dos-style line endings since that prevents 683 # people from being able to source them with bash. 684 if portage._native_string('\r') in content: 685 writemsg(("!!! " + _("Please use dos2unix to convert line endings " + \ 686 "in config file: '%s'") + "\n") % mycfg, noiselevel=-1) 687 688 lex = None 689 try: 690 # The default shlex.sourcehook() implementation 691 # only joins relative paths when the infile 692 # attribute is properly set. 693 lex = _getconfig_shlex(instream=content, infile=mycfg, posix=True, 694 portage_tolerant=tolerant) 695 lex.wordchars = portage._native_string(string.digits + 696 string.ascii_letters + "~!@#$%*_\:;?,./-+{}") 697 lex.quotes = portage._native_string("\"'") 698 if allow_sourcing: 699 lex.source = portage._native_string("source") 700 701 while True: 702 key = _unicode_decode(lex.get_token()) 703 if key == "export": 704 key = _unicode_decode(lex.get_token()) 705 if key is None: 706 #normal end of file 707 break 708 709 equ = _unicode_decode(lex.get_token()) 710 if not equ: 711 msg = lex.error_leader() + _("Unexpected EOF") 712 if not tolerant: 713 raise ParseError(msg) 714 else: 715 writemsg("%s\n" % msg, noiselevel=-1) 716 return mykeys 717 718 elif equ != "=": 719 msg = lex.error_leader() + \ 720 _("Invalid token '%s' (not '=')") % (equ,) 721 if not tolerant: 722 raise ParseError(msg) 723 else: 724 writemsg("%s\n" % msg, noiselevel=-1) 725 return mykeys 726 727 val = _unicode_decode(lex.get_token()) 728 if val is None: 729 msg = lex.error_leader() + \ 730 _("Unexpected end of config file: variable '%s'") % (key,) 731 if not tolerant: 732 raise ParseError(msg) 733 else: 734 writemsg("%s\n" % msg, noiselevel=-1) 735 return mykeys 736 737 if _invalid_var_name_re.search(key) is not None: 738 msg = lex.error_leader() + \ 739 _("Invalid variable name '%s'") % (key,) 740 if not tolerant: 741 raise ParseError(msg) 742 writemsg("%s\n" % msg, noiselevel=-1) 743 continue 744 745 if expand: 746 mykeys[key] = varexpand(val, mydict=expand_map, 747 error_leader=lex.error_leader) 748 expand_map[key] = mykeys[key] 749 else: 750 mykeys[key] = val 751 except SystemExit as e: 752 raise 753 except Exception as e: 754 if isinstance(e, ParseError) or lex is None: 755 raise 756 msg = "%s%s" % (lex.error_leader(), e) 757 writemsg("%s\n" % msg, noiselevel=-1) 758 raise 759 760 return mykeys
761 762 _varexpand_word_chars = frozenset(string.ascii_letters + string.digits + "_") 763 _varexpand_unexpected_eof_msg = "unexpected EOF while looking for matching `}'" 764
765 -def varexpand(mystring, mydict=None, error_leader=None):
766 if mydict is None: 767 mydict = {} 768 769 """ 770 new variable expansion code. Preserves quotes, handles \n, etc. 771 This code is used by the configfile code, as well as others (parser) 772 This would be a good bunch of code to port to C. 773 """ 774 numvars=0 775 #in single, double quotes 776 insing=0 777 indoub=0 778 pos = 0 779 length = len(mystring) 780 newstring = [] 781 while pos < length: 782 current = mystring[pos] 783 if current == "'": 784 if (indoub): 785 newstring.append("'") 786 else: 787 newstring.append("'") # Quote removal is handled by shlex. 788 insing=not insing 789 pos=pos+1 790 continue 791 elif current == '"': 792 if (insing): 793 newstring.append('"') 794 else: 795 newstring.append('"') # Quote removal is handled by shlex. 796 indoub=not indoub 797 pos=pos+1 798 continue 799 if (not insing): 800 #expansion time 801 if current == "\n": 802 #convert newlines to spaces 803 newstring.append(" ") 804 pos += 1 805 elif current == "\\": 806 # For backslash expansion, this function used to behave like 807 # echo -e, but that's not needed for our purposes. We want to 808 # behave like bash does when expanding a variable assignment 809 # in a sourced file, in which case it performs backslash 810 # removal for \\ and \$ but nothing more. It also removes 811 # escaped newline characters. Note that we don't handle 812 # escaped quotes here, since getconfig() uses shlex 813 # to handle that earlier. 814 if (pos+1>=len(mystring)): 815 newstring.append(current) 816 break 817 else: 818 current = mystring[pos + 1] 819 pos += 2 820 if current == "$": 821 newstring.append(current) 822 elif current == "\\": 823 newstring.append(current) 824 # BUG: This spot appears buggy, but it's intended to 825 # be bug-for-bug compatible with existing behavior. 826 if pos < length and \ 827 mystring[pos] in ("'", '"', "$"): 828 newstring.append(mystring[pos]) 829 pos += 1 830 elif current == "\n": 831 pass 832 else: 833 newstring.append(mystring[pos - 2:pos]) 834 continue 835 elif current == "$": 836 pos=pos+1 837 if mystring[pos]=="{": 838 pos=pos+1 839 braced=True 840 else: 841 braced=False 842 myvstart=pos 843 while mystring[pos] in _varexpand_word_chars: 844 if (pos+1)>=len(mystring): 845 if braced: 846 msg = _varexpand_unexpected_eof_msg 847 if error_leader is not None: 848 msg = error_leader() + msg 849 writemsg(msg + "\n", noiselevel=-1) 850 return "" 851 else: 852 pos=pos+1 853 break 854 pos=pos+1 855 myvarname=mystring[myvstart:pos] 856 if braced: 857 if mystring[pos]!="}": 858 msg = _varexpand_unexpected_eof_msg 859 if error_leader is not None: 860 msg = error_leader() + msg 861 writemsg(msg + "\n", noiselevel=-1) 862 return "" 863 else: 864 pos=pos+1 865 if len(myvarname)==0: 866 msg = "$" 867 if braced: 868 msg += "{}" 869 msg += ": bad substitution" 870 if error_leader is not None: 871 msg = error_leader() + msg 872 writemsg(msg + "\n", noiselevel=-1) 873 return "" 874 numvars=numvars+1 875 if myvarname in mydict: 876 newstring.append(mydict[myvarname]) 877 else: 878 newstring.append(current) 879 pos += 1 880 else: 881 newstring.append(current) 882 pos += 1 883 884 return "".join(newstring)
885 886 # broken and removed, but can still be imported 887 pickle_write = None 888
889 -def pickle_read(filename,default=None,debug=0):
890 if not os.access(filename, os.R_OK): 891 writemsg(_("pickle_read(): File not readable. '")+filename+"'\n",1) 892 return default 893 data = None 894 try: 895 myf = open(_unicode_encode(filename, 896 encoding=_encodings['fs'], errors='strict'), 'rb') 897 mypickle = pickle.Unpickler(myf) 898 data = mypickle.load() 899 myf.close() 900 del mypickle,myf 901 writemsg(_("pickle_read(): Loaded pickle. '")+filename+"'\n",1) 902 except SystemExit as e: 903 raise 904 except Exception as e: 905 writemsg(_("!!! Failed to load pickle: ")+str(e)+"\n",1) 906 data = default 907 return data
908
909 -def dump_traceback(msg, noiselevel=1):
910 info = sys.exc_info() 911 if not info[2]: 912 stack = traceback.extract_stack()[:-1] 913 error = None 914 else: 915 stack = traceback.extract_tb(info[2]) 916 error = str(info[1]) 917 writemsg("\n====================================\n", noiselevel=noiselevel) 918 writemsg("%s\n\n" % msg, noiselevel=noiselevel) 919 for line in traceback.format_list(stack): 920 writemsg(line, noiselevel=noiselevel) 921 if error: 922 writemsg(error+"\n", noiselevel=noiselevel) 923 writemsg("====================================\n\n", noiselevel=noiselevel)
924
925 -class cmp_sort_key(object):
926 """ 927 In python-3.0 the list.sort() method no longer has a "cmp" keyword 928 argument. This class acts as an adapter which converts a cmp function 929 into one that's suitable for use as the "key" keyword argument to 930 list.sort(), making it easier to port code for python-3.0 compatibility. 931 It works by generating key objects which use the given cmp function to 932 implement their __lt__ method. 933 934 Beginning with Python 2.7 and 3.2, equivalent functionality is provided 935 by functools.cmp_to_key(). 936 """ 937 __slots__ = ("_cmp_func",) 938
939 - def __init__(self, cmp_func):
940 """ 941 @type cmp_func: callable which takes 2 positional arguments 942 @param cmp_func: A cmp function. 943 """ 944 self._cmp_func = cmp_func
945
946 - def __call__(self, lhs):
947 return self._cmp_key(self._cmp_func, lhs)
948
949 - class _cmp_key(object):
950 __slots__ = ("_cmp_func", "_obj") 951
952 - def __init__(self, cmp_func, obj):
953 self._cmp_func = cmp_func 954 self._obj = obj
955
956 - def __lt__(self, other):
957 if other.__class__ is not self.__class__: 958 raise TypeError("Expected type %s, got %s" % \ 959 (self.__class__, other.__class__)) 960 return self._cmp_func(self._obj, other._obj) < 0
961
962 -def unique_array(s):
963 """lifted from python cookbook, credit: Tim Peters 964 Return a list of the elements in s in arbitrary order, sans duplicates""" 965 n = len(s) 966 # assume all elements are hashable, if so, it's linear 967 try: 968 return list(set(s)) 969 except TypeError: 970 pass 971 972 # so much for linear. abuse sort. 973 try: 974 t = list(s) 975 t.sort() 976 except TypeError: 977 pass 978 else: 979 assert n > 0 980 last = t[0] 981 lasti = i = 1 982 while i < n: 983 if t[i] != last: 984 t[lasti] = last = t[i] 985 lasti += 1 986 i += 1 987 return t[:lasti] 988 989 # blah. back to original portage.unique_array 990 u = [] 991 for x in s: 992 if x not in u: 993 u.append(x) 994 return u
995
996 -def unique_everseen(iterable, key=None):
997 """ 998 List unique elements, preserving order. Remember all elements ever seen. 999 Taken from itertools documentation. 1000 """ 1001 # unique_everseen('AAAABBBCCDAABBB') --> A B C D 1002 # unique_everseen('ABBCcAD', str.lower) --> A B C D 1003 seen = set() 1004 seen_add = seen.add 1005 if key is None: 1006 for element in filterfalse(seen.__contains__, iterable): 1007 seen_add(element) 1008 yield element 1009 else: 1010 for element in iterable: 1011 k = key(element) 1012 if k not in seen: 1013 seen_add(k) 1014 yield element
1015
1016 -def apply_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1, 1017 stat_cached=None, follow_links=True):
1018 """Apply user, group, and mode bits to a file if the existing bits do not 1019 already match. The default behavior is to force an exact match of mode 1020 bits. When mask=0 is specified, mode bits on the target file are allowed 1021 to be a superset of the mode argument (via logical OR). When mask>0, the 1022 mode bits that the target file is allowed to have are restricted via 1023 logical XOR. 1024 Returns True if the permissions were modified and False otherwise.""" 1025 1026 modified = False 1027 1028 # Since Python 3.4, chown requires int type (no proxies). 1029 uid = int(uid) 1030 gid = int(gid) 1031 1032 if stat_cached is None: 1033 try: 1034 if follow_links: 1035 stat_cached = os.stat(filename) 1036 else: 1037 stat_cached = os.lstat(filename) 1038 except OSError as oe: 1039 func_call = "stat('%s')" % filename 1040 if oe.errno == errno.EPERM: 1041 raise OperationNotPermitted(func_call) 1042 elif oe.errno == errno.EACCES: 1043 raise PermissionDenied(func_call) 1044 elif oe.errno == errno.ENOENT: 1045 raise FileNotFound(filename) 1046 else: 1047 raise 1048 1049 if (uid != -1 and uid != stat_cached.st_uid) or \ 1050 (gid != -1 and gid != stat_cached.st_gid): 1051 try: 1052 if follow_links: 1053 os.chown(filename, uid, gid) 1054 else: 1055 portage.data.lchown(filename, uid, gid) 1056 modified = True 1057 except OSError as oe: 1058 func_call = "chown('%s', %i, %i)" % (filename, uid, gid) 1059 if oe.errno == errno.EPERM: 1060 raise OperationNotPermitted(func_call) 1061 elif oe.errno == errno.EACCES: 1062 raise PermissionDenied(func_call) 1063 elif oe.errno == errno.EROFS: 1064 raise ReadOnlyFileSystem(func_call) 1065 elif oe.errno == errno.ENOENT: 1066 raise FileNotFound(filename) 1067 else: 1068 raise 1069 1070 new_mode = -1 1071 st_mode = stat_cached.st_mode & 0o7777 # protect from unwanted bits 1072 if mask >= 0: 1073 if mode == -1: 1074 mode = 0 # Don't add any mode bits when mode is unspecified. 1075 else: 1076 mode = mode & 0o7777 1077 if (mode & st_mode != mode) or \ 1078 ((mask ^ st_mode) & st_mode != st_mode): 1079 new_mode = mode | st_mode 1080 new_mode = (mask ^ new_mode) & new_mode 1081 elif mode != -1: 1082 mode = mode & 0o7777 # protect from unwanted bits 1083 if mode != st_mode: 1084 new_mode = mode 1085 1086 # The chown system call may clear S_ISUID and S_ISGID 1087 # bits, so those bits are restored if necessary. 1088 if modified and new_mode == -1 and \ 1089 (st_mode & stat.S_ISUID or st_mode & stat.S_ISGID): 1090 if mode == -1: 1091 new_mode = st_mode 1092 else: 1093 mode = mode & 0o7777 1094 if mask >= 0: 1095 new_mode = mode | st_mode 1096 new_mode = (mask ^ new_mode) & new_mode 1097 else: 1098 new_mode = mode 1099 if not (new_mode & stat.S_ISUID or new_mode & stat.S_ISGID): 1100 new_mode = -1 1101 1102 if not follow_links and stat.S_ISLNK(stat_cached.st_mode): 1103 # Mode doesn't matter for symlinks. 1104 new_mode = -1 1105 1106 if new_mode != -1: 1107 try: 1108 os.chmod(filename, new_mode) 1109 modified = True 1110 except OSError as oe: 1111 func_call = "chmod('%s', %s)" % (filename, oct(new_mode)) 1112 if oe.errno == errno.EPERM: 1113 raise OperationNotPermitted(func_call) 1114 elif oe.errno == errno.EACCES: 1115 raise PermissionDenied(func_call) 1116 elif oe.errno == errno.EROFS: 1117 raise ReadOnlyFileSystem(func_call) 1118 elif oe.errno == errno.ENOENT: 1119 raise FileNotFound(filename) 1120 raise 1121 return modified
1122
1123 -def apply_stat_permissions(filename, newstat, **kwargs):
1124 """A wrapper around apply_secpass_permissions that gets 1125 uid, gid, and mode from a stat object""" 1126 return apply_secpass_permissions(filename, uid=newstat.st_uid, gid=newstat.st_gid, 1127 mode=newstat.st_mode, **kwargs)
1128
1129 -def apply_recursive_permissions(top, uid=-1, gid=-1, 1130 dirmode=-1, dirmask=-1, filemode=-1, filemask=-1, onerror=None):
1131 """A wrapper around apply_secpass_permissions that applies permissions 1132 recursively. If optional argument onerror is specified, it should be a 1133 function; it will be called with one argument, a PortageException instance. 1134 Returns True if all permissions are applied and False if some are left 1135 unapplied.""" 1136 1137 # Avoid issues with circular symbolic links, as in bug #339670. 1138 follow_links = False 1139 1140 if onerror is None: 1141 # Default behavior is to dump errors to stderr so they won't 1142 # go unnoticed. Callers can pass in a quiet instance. 1143 def onerror(e): 1144 if isinstance(e, OperationNotPermitted): 1145 writemsg(_("Operation Not Permitted: %s\n") % str(e), 1146 noiselevel=-1) 1147 elif isinstance(e, FileNotFound): 1148 writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1) 1149 else: 1150 raise
1151 1152 all_applied = True 1153 for dirpath, dirnames, filenames in os.walk(top): 1154 try: 1155 applied = apply_secpass_permissions(dirpath, 1156 uid=uid, gid=gid, mode=dirmode, mask=dirmask, 1157 follow_links=follow_links) 1158 if not applied: 1159 all_applied = False 1160 except PortageException as e: 1161 all_applied = False 1162 onerror(e) 1163 1164 for name in filenames: 1165 try: 1166 applied = apply_secpass_permissions(os.path.join(dirpath, name), 1167 uid=uid, gid=gid, mode=filemode, mask=filemask, 1168 follow_links=follow_links) 1169 if not applied: 1170 all_applied = False 1171 except PortageException as e: 1172 # Ignore InvalidLocation exceptions such as FileNotFound 1173 # and DirectoryNotFound since sometimes things disappear, 1174 # like when adjusting permissions on DISTCC_DIR. 1175 if not isinstance(e, portage.exception.InvalidLocation): 1176 all_applied = False 1177 onerror(e) 1178 return all_applied 1179
1180 -def apply_secpass_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1, 1181 stat_cached=None, follow_links=True):
1182 """A wrapper around apply_permissions that uses secpass and simple 1183 logic to apply as much of the permissions as possible without 1184 generating an obviously avoidable permission exception. Despite 1185 attempts to avoid an exception, it's possible that one will be raised 1186 anyway, so be prepared. 1187 Returns True if all permissions are applied and False if some are left 1188 unapplied.""" 1189 1190 if stat_cached is None: 1191 try: 1192 if follow_links: 1193 stat_cached = os.stat(filename) 1194 else: 1195 stat_cached = os.lstat(filename) 1196 except OSError as oe: 1197 func_call = "stat('%s')" % filename 1198 if oe.errno == errno.EPERM: 1199 raise OperationNotPermitted(func_call) 1200 elif oe.errno == errno.EACCES: 1201 raise PermissionDenied(func_call) 1202 elif oe.errno == errno.ENOENT: 1203 raise FileNotFound(filename) 1204 else: 1205 raise 1206 1207 all_applied = True 1208 1209 if portage.data.secpass < 2: 1210 1211 if uid != -1 and \ 1212 uid != stat_cached.st_uid: 1213 all_applied = False 1214 uid = -1 1215 1216 if gid != -1 and \ 1217 gid != stat_cached.st_gid and \ 1218 gid not in os.getgroups(): 1219 all_applied = False 1220 gid = -1 1221 1222 apply_permissions(filename, uid=uid, gid=gid, mode=mode, mask=mask, 1223 stat_cached=stat_cached, follow_links=follow_links) 1224 return all_applied
1225
1226 -class atomic_ofstream(ObjectProxy):
1227 """Write a file atomically via os.rename(). Atomic replacement prevents 1228 interprocess interference and prevents corruption of the target 1229 file when the write is interrupted (for example, when an 'out of space' 1230 error occurs).""" 1231
1232 - def __init__(self, filename, mode='w', follow_links=True, **kargs):
1233 """Opens a temporary filename.pid in the same directory as filename.""" 1234 ObjectProxy.__init__(self) 1235 object.__setattr__(self, '_aborted', False) 1236 if 'b' in mode: 1237 open_func = open 1238 else: 1239 open_func = io.open 1240 kargs.setdefault('encoding', _encodings['content']) 1241 kargs.setdefault('errors', 'backslashreplace') 1242 1243 if follow_links: 1244 canonical_path = os.path.realpath(filename) 1245 object.__setattr__(self, '_real_name', canonical_path) 1246 tmp_name = "%s.%i" % (canonical_path, os.getpid()) 1247 try: 1248 object.__setattr__(self, '_file', 1249 open_func(_unicode_encode(tmp_name, 1250 encoding=_encodings['fs'], errors='strict'), 1251 mode=mode, **portage._native_kwargs(kargs))) 1252 return 1253 except IOError as e: 1254 if canonical_path == filename: 1255 raise 1256 # Ignore this error, since it's irrelevant 1257 # and the below open call will produce a 1258 # new error if necessary. 1259 1260 object.__setattr__(self, '_real_name', filename) 1261 tmp_name = "%s.%i" % (filename, os.getpid()) 1262 object.__setattr__(self, '_file', 1263 open_func(_unicode_encode(tmp_name, 1264 encoding=_encodings['fs'], errors='strict'), 1265 mode=mode, **kargs))
1266
1267 - def _get_target(self):
1268 return object.__getattribute__(self, '_file')
1269 1270 if sys.hexversion >= 0x3000000: 1271
1272 - def __getattribute__(self, attr):
1273 if attr in ('close', 'abort', '__del__'): 1274 return object.__getattribute__(self, attr) 1275 return getattr(object.__getattribute__(self, '_file'), attr)
1276 1277 else: 1278 1279 # For TextIOWrapper, automatically coerce write calls to 1280 # unicode, in order to avoid TypeError when writing raw 1281 # bytes with python2. 1282
1283 - def __getattribute__(self, attr):
1284 if attr in ('close', 'abort', 'write', '__del__'): 1285 return object.__getattribute__(self, attr) 1286 return getattr(object.__getattribute__(self, '_file'), attr)
1287
1288 - def write(self, s):
1289 f = object.__getattribute__(self, '_file') 1290 if isinstance(f, io.TextIOWrapper): 1291 s = _unicode_decode(s) 1292 return f.write(s)
1293
1294 - def close(self):
1295 """Closes the temporary file, copies permissions (if possible), 1296 and performs the atomic replacement via os.rename(). If the abort() 1297 method has been called, then the temp file is closed and removed.""" 1298 f = object.__getattribute__(self, '_file') 1299 real_name = object.__getattribute__(self, '_real_name') 1300 if not f.closed: 1301 try: 1302 f.close() 1303 if not object.__getattribute__(self, '_aborted'): 1304 try: 1305 apply_stat_permissions(f.name, os.stat(real_name)) 1306 except OperationNotPermitted: 1307 pass 1308 except FileNotFound: 1309 pass 1310 except OSError as oe: # from the above os.stat call 1311 if oe.errno in (errno.ENOENT, errno.EPERM): 1312 pass 1313 else: 1314 raise 1315 os.rename(f.name, real_name) 1316 finally: 1317 # Make sure we cleanup the temp file 1318 # even if an exception is raised. 1319 try: 1320 os.unlink(f.name) 1321 except OSError as oe: 1322 pass
1323
1324 - def abort(self):
1325 """If an error occurs while writing the file, the user should 1326 call this method in order to leave the target file unchanged. 1327 This will call close() automatically.""" 1328 if not object.__getattribute__(self, '_aborted'): 1329 object.__setattr__(self, '_aborted', True) 1330 self.close()
1331
1332 - def __del__(self):
1333 """If the user does not explicitly call close(), it is 1334 assumed that an error has occurred, so we abort().""" 1335 try: 1336 f = object.__getattribute__(self, '_file') 1337 except AttributeError: 1338 pass 1339 else: 1340 if not f.closed: 1341 self.abort() 1342 # ensure destructor from the base class is called 1343 base_destructor = getattr(ObjectProxy, '__del__', None) 1344 if base_destructor is not None: 1345 base_destructor(self)
1346
1347 -def write_atomic(file_path, content, **kwargs):
1348 f = None 1349 try: 1350 f = atomic_ofstream(file_path, **kwargs) 1351 f.write(content) 1352 f.close() 1353 except (IOError, OSError) as e: 1354 if f: 1355 f.abort() 1356 func_call = "write_atomic('%s')" % file_path 1357 if e.errno == errno.EPERM: 1358 raise OperationNotPermitted(func_call) 1359 elif e.errno == errno.EACCES: 1360 raise PermissionDenied(func_call) 1361 elif e.errno == errno.EROFS: 1362 raise ReadOnlyFileSystem(func_call) 1363 elif e.errno == errno.ENOENT: 1364 raise FileNotFound(file_path) 1365 else: 1366 raise
1367
1368 -def ensure_dirs(dir_path, **kwargs):
1369 """Create a directory and call apply_permissions. 1370 Returns True if a directory is created or the permissions needed to be 1371 modified, and False otherwise. 1372 1373 This function's handling of EEXIST errors makes it useful for atomic 1374 directory creation, in which multiple processes may be competing to 1375 create the same directory. 1376 """ 1377 1378 created_dir = False 1379 1380 try: 1381 os.makedirs(dir_path) 1382 created_dir = True 1383 except OSError as oe: 1384 func_call = "makedirs('%s')" % dir_path 1385 if oe.errno in (errno.EEXIST,): 1386 pass 1387 else: 1388 if os.path.isdir(dir_path): 1389 # NOTE: DragonFly raises EPERM for makedir('/') 1390 # and that is supposed to be ignored here. 1391 # Also, sometimes mkdir raises EISDIR on FreeBSD 1392 # and we want to ignore that too (bug #187518). 1393 pass 1394 elif oe.errno == errno.EPERM: 1395 raise OperationNotPermitted(func_call) 1396 elif oe.errno == errno.EACCES: 1397 raise PermissionDenied(func_call) 1398 elif oe.errno == errno.EROFS: 1399 raise ReadOnlyFileSystem(func_call) 1400 else: 1401 raise 1402 if kwargs: 1403 perms_modified = apply_permissions(dir_path, **kwargs) 1404 else: 1405 perms_modified = False 1406 return created_dir or perms_modified
1407
1408 -class LazyItemsDict(UserDict):
1409 """A mapping object that behaves like a standard dict except that it allows 1410 for lazy initialization of values via callable objects. Lazy items can be 1411 overwritten and deleted just as normal items.""" 1412 1413 __slots__ = ('lazy_items',) 1414
1415 - def __init__(self, *args, **kwargs):
1416 1417 self.lazy_items = {} 1418 UserDict.__init__(self, *args, **kwargs)
1419
1420 - def addLazyItem(self, item_key, value_callable, *pargs, **kwargs):
1421 """Add a lazy item for the given key. When the item is requested, 1422 value_callable will be called with *pargs and **kwargs arguments.""" 1423 self.lazy_items[item_key] = \ 1424 self._LazyItem(value_callable, pargs, kwargs, False) 1425 # make it show up in self.keys(), etc... 1426 UserDict.__setitem__(self, item_key, None)
1427
1428 - def addLazySingleton(self, item_key, value_callable, *pargs, **kwargs):
1429 """This is like addLazyItem except value_callable will only be called 1430 a maximum of 1 time and the result will be cached for future requests.""" 1431 self.lazy_items[item_key] = \ 1432 self._LazyItem(value_callable, pargs, kwargs, True) 1433 # make it show up in self.keys(), etc... 1434 UserDict.__setitem__(self, item_key, None)
1435
1436 - def update(self, *args, **kwargs):
1437 if len(args) > 1: 1438 raise TypeError( 1439 "expected at most 1 positional argument, got " + \ 1440 repr(len(args))) 1441 if args: 1442 map_obj = args[0] 1443 else: 1444 map_obj = None 1445 if map_obj is None: 1446 pass 1447 elif isinstance(map_obj, LazyItemsDict): 1448 for k in map_obj: 1449 if k in map_obj.lazy_items: 1450 UserDict.__setitem__(self, k, None) 1451 else: 1452 UserDict.__setitem__(self, k, map_obj[k]) 1453 self.lazy_items.update(map_obj.lazy_items) 1454 else: 1455 UserDict.update(self, map_obj) 1456 if kwargs: 1457 UserDict.update(self, kwargs)
1458
1459 - def __getitem__(self, item_key):
1460 if item_key in self.lazy_items: 1461 lazy_item = self.lazy_items[item_key] 1462 pargs = lazy_item.pargs 1463 if pargs is None: 1464 pargs = () 1465 kwargs = lazy_item.kwargs 1466 if kwargs is None: 1467 kwargs = {} 1468 result = lazy_item.func(*pargs, **kwargs) 1469 if lazy_item.singleton: 1470 self[item_key] = result 1471 return result 1472 1473 else: 1474 return UserDict.__getitem__(self, item_key)
1475
1476 - def __setitem__(self, item_key, value):
1477 if item_key in self.lazy_items: 1478 del self.lazy_items[item_key] 1479 UserDict.__setitem__(self, item_key, value)
1480
1481 - def __delitem__(self, item_key):
1482 if item_key in self.lazy_items: 1483 del self.lazy_items[item_key] 1484 UserDict.__delitem__(self, item_key)
1485
1486 - def clear(self):
1487 self.lazy_items.clear() 1488 UserDict.clear(self)
1489
1490 - def copy(self):
1491 return self.__copy__()
1492
1493 - def __copy__(self):
1494 return self.__class__(self)
1495
1496 - def __deepcopy__(self, memo=None):
1497 """ 1498 This forces evaluation of each contained lazy item, and deepcopy of 1499 the result. A TypeError is raised if any contained lazy item is not 1500 a singleton, since it is not necessarily possible for the behavior 1501 of this type of item to be safely preserved. 1502 """ 1503 if memo is None: 1504 memo = {} 1505 result = self.__class__() 1506 memo[id(self)] = result 1507 for k in self: 1508 k_copy = deepcopy(k, memo) 1509 lazy_item = self.lazy_items.get(k) 1510 if lazy_item is not None: 1511 if not lazy_item.singleton: 1512 raise TypeError("LazyItemsDict " + \ 1513 "deepcopy is unsafe with lazy items that are " + \ 1514 "not singletons: key=%s value=%s" % (k, lazy_item,)) 1515 UserDict.__setitem__(result, k_copy, deepcopy(self[k], memo)) 1516 return result
1517
1518 - class _LazyItem(object):
1519 1520 __slots__ = ('func', 'pargs', 'kwargs', 'singleton') 1521
1522 - def __init__(self, func, pargs, kwargs, singleton):
1523 1524 if not pargs: 1525 pargs = None 1526 if not kwargs: 1527 kwargs = None 1528 1529 self.func = func 1530 self.pargs = pargs 1531 self.kwargs = kwargs 1532 self.singleton = singleton
1533
1534 - def __copy__(self):
1535 return self.__class__(self.func, self.pargs, 1536 self.kwargs, self.singleton)
1537
1538 - def __deepcopy__(self, memo=None):
1539 """ 1540 Override this since the default implementation can fail silently, 1541 leaving some attributes unset. 1542 """ 1543 if memo is None: 1544 memo = {} 1545 result = self.__copy__() 1546 memo[id(self)] = result 1547 result.func = deepcopy(self.func, memo) 1548 result.pargs = deepcopy(self.pargs, memo) 1549 result.kwargs = deepcopy(self.kwargs, memo) 1550 result.singleton = deepcopy(self.singleton, memo) 1551 return result
1552
1553 -class ConfigProtect(object):
1554 - def __init__(self, myroot, protect_list, mask_list):
1555 self.myroot = myroot 1556 self.protect_list = protect_list 1557 self.mask_list = mask_list 1558 self.updateprotect()
1559
1560 - def updateprotect(self):
1561 """Update internal state for isprotected() calls. Nonexistent paths 1562 are ignored.""" 1563 1564 os = _os_merge 1565 1566 self.protect = [] 1567 self._dirs = set() 1568 for x in self.protect_list: 1569 ppath = normalize_path( 1570 os.path.join(self.myroot, x.lstrip(os.path.sep))) 1571 try: 1572 if stat.S_ISDIR(os.stat(ppath).st_mode): 1573 self._dirs.add(ppath) 1574 self.protect.append(ppath) 1575 except OSError: 1576 # If it doesn't exist, there's no need to protect it. 1577 pass 1578 1579 self.protectmask = [] 1580 for x in self.mask_list: 1581 ppath = normalize_path( 1582 os.path.join(self.myroot, x.lstrip(os.path.sep))) 1583 try: 1584 """Use lstat so that anything, even a broken symlink can be 1585 protected.""" 1586 if stat.S_ISDIR(os.lstat(ppath).st_mode): 1587 self._dirs.add(ppath) 1588 self.protectmask.append(ppath) 1589 """Now use stat in case this is a symlink to a directory.""" 1590 if stat.S_ISDIR(os.stat(ppath).st_mode): 1591 self._dirs.add(ppath) 1592 except OSError: 1593 # If it doesn't exist, there's no need to mask it. 1594 pass
1595
1596 - def isprotected(self, obj):
1597 """Returns True if obj is protected, False otherwise. The caller must 1598 ensure that obj is normalized with a single leading slash. A trailing 1599 slash is optional for directories.""" 1600 masked = 0 1601 protected = 0 1602 sep = os.path.sep 1603 for ppath in self.protect: 1604 if len(ppath) > masked and obj.startswith(ppath): 1605 if ppath in self._dirs: 1606 if obj != ppath and not obj.startswith(ppath + sep): 1607 # /etc/foo does not match /etc/foobaz 1608 continue 1609 elif obj != ppath: 1610 # force exact match when CONFIG_PROTECT lists a 1611 # non-directory 1612 continue 1613 protected = len(ppath) 1614 #config file management 1615 for pmpath in self.protectmask: 1616 if len(pmpath) >= protected and obj.startswith(pmpath): 1617 if pmpath in self._dirs: 1618 if obj != pmpath and \ 1619 not obj.startswith(pmpath + sep): 1620 # /etc/foo does not match /etc/foobaz 1621 continue 1622 elif obj != pmpath: 1623 # force exact match when CONFIG_PROTECT_MASK lists 1624 # a non-directory 1625 continue 1626 #skip, it's in the mask 1627 masked = len(pmpath) 1628 return protected > masked
1629
1630 -def new_protect_filename(mydest, newmd5=None, force=False):
1631 """Resolves a config-protect filename for merging, optionally 1632 using the last filename if the md5 matches. If force is True, 1633 then a new filename will be generated even if mydest does not 1634 exist yet. 1635 (dest,md5) ==> 'string' --- path_to_target_filename 1636 (dest) ==> ('next', 'highest') --- next_target and most-recent_target 1637 """ 1638 1639 # config protection filename format: 1640 # ._cfg0000_foo 1641 # 0123456789012 1642 1643 os = _os_merge 1644 1645 prot_num = -1 1646 last_pfile = "" 1647 1648 if not force and \ 1649 not os.path.exists(mydest): 1650 return mydest 1651 1652 real_filename = os.path.basename(mydest) 1653 real_dirname = os.path.dirname(mydest) 1654 for pfile in os.listdir(real_dirname): 1655 if pfile[0:5] != "._cfg": 1656 continue 1657 if pfile[10:] != real_filename: 1658 continue 1659 try: 1660 new_prot_num = int(pfile[5:9]) 1661 if new_prot_num > prot_num: 1662 prot_num = new_prot_num 1663 last_pfile = pfile 1664 except ValueError: 1665 continue 1666 prot_num = prot_num + 1 1667 1668 new_pfile = normalize_path(os.path.join(real_dirname, 1669 "._cfg" + str(prot_num).zfill(4) + "_" + real_filename)) 1670 old_pfile = normalize_path(os.path.join(real_dirname, last_pfile)) 1671 if last_pfile and newmd5: 1672 try: 1673 last_pfile_md5 = portage.checksum._perform_md5_merge(old_pfile) 1674 except FileNotFound: 1675 # The file suddenly disappeared or it's a broken symlink. 1676 pass 1677 else: 1678 if last_pfile_md5 == newmd5: 1679 return old_pfile 1680 return new_pfile
1681
1682 -def find_updated_config_files(target_root, config_protect):
1683 """ 1684 Return a tuple of configuration files that needs to be updated. 1685 The tuple contains lists organized like this: 1686 [ protected_dir, file_list ] 1687 If the protected config isn't a protected_dir but a procted_file, list is: 1688 [ protected_file, None ] 1689 If no configuration files needs to be updated, None is returned 1690 """ 1691 1692 encoding = _encodings['fs'] 1693 1694 if config_protect: 1695 # directories with some protect files in them 1696 for x in config_protect: 1697 files = [] 1698 1699 x = os.path.join(target_root, x.lstrip(os.path.sep)) 1700 if not os.access(x, os.W_OK): 1701 continue 1702 try: 1703 mymode = os.lstat(x).st_mode 1704 except OSError: 1705 continue 1706 1707 if stat.S_ISLNK(mymode): 1708 # We want to treat it like a directory if it 1709 # is a symlink to an existing directory. 1710 try: 1711 real_mode = os.stat(x).st_mode 1712 if stat.S_ISDIR(real_mode): 1713 mymode = real_mode 1714 except OSError: 1715 pass 1716 1717 if stat.S_ISDIR(mymode): 1718 mycommand = \ 1719 "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x 1720 else: 1721 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \ 1722 os.path.split(x.rstrip(os.path.sep)) 1723 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0" 1724 cmd = shlex_split(mycommand) 1725 1726 if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000: 1727 # Python 3.1 _execvp throws TypeError for non-absolute executable 1728 # path passed as bytes (see http://bugs.python.org/issue8513). 1729 fullname = portage.process.find_binary(cmd[0]) 1730 if fullname is None: 1731 raise portage.exception.CommandNotFound(cmd[0]) 1732 cmd[0] = fullname 1733 1734 cmd = [_unicode_encode(arg, encoding=encoding, errors='strict') 1735 for arg in cmd] 1736 proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, 1737 stderr=subprocess.STDOUT) 1738 output = _unicode_decode(proc.communicate()[0], encoding=encoding) 1739 status = proc.wait() 1740 if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK: 1741 files = output.split('\0') 1742 # split always produces an empty string as the last element 1743 if files and not files[-1]: 1744 del files[-1] 1745 if files: 1746 if stat.S_ISDIR(mymode): 1747 yield (x, files) 1748 else: 1749 yield (x, None)
1750 1751 _ld_so_include_re = re.compile(r'^include\s+(\S.*)') 1752
1753 -def getlibpaths(root, env=None):
1754 def read_ld_so_conf(path): 1755 for l in grabfile(path): 1756 include_match = _ld_so_include_re.match(l) 1757 if include_match is not None: 1758 subpath = os.path.join(os.path.dirname(path), 1759 include_match.group(1)) 1760 for p in glob.glob(subpath): 1761 for r in read_ld_so_conf(p): 1762 yield r 1763 else: 1764 yield l
1765 1766 """ Return a list of paths that are used for library lookups """ 1767 if env is None: 1768 env = os.environ 1769 # the following is based on the information from ld.so(8) 1770 rval = env.get("LD_LIBRARY_PATH", "").split(":") 1771 rval.extend(read_ld_so_conf(os.path.join(root, "etc", "ld.so.conf"))) 1772 rval.append("/usr/lib") 1773 rval.append("/lib") 1774 1775 return [normalize_path(x) for x in rval if x] 1776