summaryrefslogtreecommitdiffstats
path: root/external/poky/bitbake/lib/bb/codeparser.py
diff options
context:
space:
mode:
Diffstat (limited to 'external/poky/bitbake/lib/bb/codeparser.py')
-rw-r--r--external/poky/bitbake/lib/bb/codeparser.py39
1 files changed, 11 insertions, 28 deletions
diff --git a/external/poky/bitbake/lib/bb/codeparser.py b/external/poky/bitbake/lib/bb/codeparser.py
index ddd1b97d..25a7ac69 100644
--- a/external/poky/bitbake/lib/bb/codeparser.py
+++ b/external/poky/bitbake/lib/bb/codeparser.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
"""
BitBake code parser
@@ -21,19 +25,17 @@ import ast
import sys
import codegen
import logging
-import pickle
import bb.pysh as pysh
-import os.path
import bb.utils, bb.data
import hashlib
from itertools import chain
-from bb.pysh import pyshyacc, pyshlex, sherrors
+from bb.pysh import pyshyacc, pyshlex
from bb.cache import MultiProcessCache
logger = logging.getLogger('BitBake.CodeParser')
def bbhash(s):
- return hashlib.md5(s.encode("utf-8")).hexdigest()
+ return hashlib.sha256(s.encode("utf-8")).hexdigest()
def check_indent(codestr):
"""If the code is indented, add a top level piece of code to 'remove' the indentation"""
@@ -54,30 +56,10 @@ def check_indent(codestr):
return codestr
-
-# Basically pickle, in python 2.7.3 at least, does badly with data duplication
-# upon pickling and unpickling. Combine this with duplicate objects and things
-# are a mess.
-#
-# When the sets are originally created, python calls intern() on the set keys
-# which significantly improves memory usage. Sadly the pickle/unpickle process
-# doesn't call intern() on the keys and results in the same strings being duplicated
-# in memory. This also means pickle will save the same string multiple times in
-# the cache file.
-#
-# By having shell and python cacheline objects with setstate/getstate, we force
-# the object creation through our own routine where we can call intern (via internSet).
-#
-# We also use hashable frozensets and ensure we use references to these so that
-# duplicates can be removed, both in memory and in the resulting pickled data.
-#
-# By playing these games, the size of the cache file shrinks dramatically
-# meaning faster load times and the reloaded cache files also consume much less
-# memory. Smaller cache files, faster load times and lower memory usage is good.
-#
# A custom getstate/setstate using tuples is actually worth 15% cachesize by
# avoiding duplication of the attribute names!
+
class SetCache(object):
def __init__(self):
self.setcache = {}
@@ -140,7 +122,7 @@ class CodeParserCache(MultiProcessCache):
# so that an existing cache gets invalidated. Additionally you'll need
# to increment __cache_version__ in cache.py in order to ensure that old
# recipe caches don't trigger "Taskhash mismatch" errors.
- CACHE_VERSION = 10
+ CACHE_VERSION = 11
def __init__(self):
MultiProcessCache.__init__(self)
@@ -368,8 +350,9 @@ class ShellParser():
def _parse_shell(self, value):
try:
tokens, _ = pyshyacc.parse(value, eof=True, debug=False)
- except pyshlex.NeedMore:
- raise sherrors.ShellSyntaxError("Unexpected EOF")
+ except Exception:
+ bb.error('Error during parse shell code, the last 5 lines are:\n%s' % '\n'.join(value.split('\n')[-5:]))
+ raise
self.process_tokens(tokens)