diff options
Diffstat (limited to 'meson/mesonbuild/modules/unstable_cuda.py')
-rw-r--r-- | meson/mesonbuild/modules/unstable_cuda.py | 350 |
1 files changed, 350 insertions, 0 deletions
diff --git a/meson/mesonbuild/modules/unstable_cuda.py b/meson/mesonbuild/modules/unstable_cuda.py new file mode 100644 index 000000000..d542fdd54 --- /dev/null +++ b/meson/mesonbuild/modules/unstable_cuda.py @@ -0,0 +1,350 @@ +# Copyright 2017 The Meson development team + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import typing as T +import re + +from ..mesonlib import version_compare +from ..compilers import CudaCompiler, Compiler + +from . import NewExtensionModule + +from ..interpreterbase import ( + flatten, permittedKwargs, noKwargs, + InvalidArguments, FeatureNew +) + +class CudaModule(NewExtensionModule): + + @FeatureNew('CUDA module', '0.50.0') + def __init__(self, *args, **kwargs): + super().__init__() + self.methods.update({ + "min_driver_version": self.min_driver_version, + "nvcc_arch_flags": self.nvcc_arch_flags, + "nvcc_arch_readable": self.nvcc_arch_readable, + }) + + @noKwargs + def min_driver_version(self, state: 'ModuleState', + args: T.Tuple[str], + kwargs: T.Dict[str, T.Any]) -> str: + argerror = InvalidArguments('min_driver_version must have exactly one positional argument: ' + + 'a CUDA Toolkit version string. Beware that, since CUDA 11.0, ' + + 'the CUDA Toolkit\'s components (including NVCC) are versioned ' + + 'independently from each other (and the CUDA Toolkit as a whole).') + + if len(args) != 1 or not isinstance(args[0], str): + raise argerror + + cuda_version = args[0] + driver_version_table = [ + {'cuda_version': '>=11.5.0', 'windows': '496.04', 'linux': '495.29.05'}, + {'cuda_version': '>=11.4.1', 'windows': '471.41', 'linux': '470.57.02'}, + {'cuda_version': '>=11.4.0', 'windows': '471.11', 'linux': '470.42.01'}, + {'cuda_version': '>=11.3.0', 'windows': '465.89', 'linux': '465.19.01'}, + {'cuda_version': '>=11.2.2', 'windows': '461.33', 'linux': '460.32.03'}, + {'cuda_version': '>=11.2.1', 'windows': '461.09', 'linux': '460.32.03'}, + {'cuda_version': '>=11.2.0', 'windows': '460.82', 'linux': '460.27.03'}, + {'cuda_version': '>=11.1.1', 'windows': '456.81', 'linux': '455.32'}, + {'cuda_version': '>=11.1.0', 'windows': '456.38', 'linux': '455.23'}, + {'cuda_version': '>=11.0.3', 'windows': '451.82', 'linux': '450.51.06'}, + {'cuda_version': '>=11.0.2', 'windows': '451.48', 'linux': '450.51.05'}, + {'cuda_version': '>=11.0.1', 'windows': '451.22', 'linux': '450.36.06'}, + {'cuda_version': '>=10.2.89', 'windows': '441.22', 'linux': '440.33'}, + {'cuda_version': '>=10.1.105', 'windows': '418.96', 'linux': '418.39'}, + {'cuda_version': '>=10.0.130', 'windows': '411.31', 'linux': '410.48'}, + {'cuda_version': '>=9.2.148', 'windows': '398.26', 'linux': '396.37'}, + {'cuda_version': '>=9.2.88', 'windows': '397.44', 'linux': '396.26'}, + {'cuda_version': '>=9.1.85', 'windows': '391.29', 'linux': '390.46'}, + {'cuda_version': '>=9.0.76', 'windows': '385.54', 'linux': '384.81'}, + {'cuda_version': '>=8.0.61', 'windows': '376.51', 'linux': '375.26'}, + {'cuda_version': '>=8.0.44', 'windows': '369.30', 'linux': '367.48'}, + {'cuda_version': '>=7.5.16', 'windows': '353.66', 'linux': '352.31'}, + {'cuda_version': '>=7.0.28', 'windows': '347.62', 'linux': '346.46'}, + ] + + driver_version = 'unknown' + for d in driver_version_table: + if version_compare(cuda_version, d['cuda_version']): + driver_version = d.get(state.host_machine.system, d['linux']) + break + + return driver_version + + @permittedKwargs(['detected']) + def nvcc_arch_flags(self, state: 'ModuleState', + args: T.Tuple[T.Union[Compiler, CudaCompiler, str]], + kwargs: T.Dict[str, T.Any]) -> T.List[str]: + nvcc_arch_args = self._validate_nvcc_arch_args(args, kwargs) + ret = self._nvcc_arch_flags(*nvcc_arch_args)[0] + return ret + + @permittedKwargs(['detected']) + def nvcc_arch_readable(self, state: 'ModuleState', + args: T.Tuple[T.Union[Compiler, CudaCompiler, str]], + kwargs: T.Dict[str, T.Any]) -> T.List[str]: + nvcc_arch_args = self._validate_nvcc_arch_args(args, kwargs) + ret = self._nvcc_arch_flags(*nvcc_arch_args)[1] + return ret + + @staticmethod + def _break_arch_string(s): + s = re.sub('[ \t\r\n,;]+', ';', s) + s = s.strip(';').split(';') + return s + + @staticmethod + def _detected_cc_from_compiler(c): + if isinstance(c, CudaCompiler): + return c.detected_cc + return '' + + @staticmethod + def _version_from_compiler(c): + if isinstance(c, CudaCompiler): + return c.version + if isinstance(c, str): + return c + return 'unknown' + + def _validate_nvcc_arch_args(self, args, kwargs): + argerror = InvalidArguments('The first argument must be an NVCC compiler object, or its version string!') + + if len(args) < 1: + raise argerror + else: + compiler = args[0] + cuda_version = self._version_from_compiler(compiler) + if cuda_version == 'unknown': + raise argerror + + arch_list = [] if len(args) <= 1 else flatten(args[1:]) + arch_list = [self._break_arch_string(a) for a in arch_list] + arch_list = flatten(arch_list) + if len(arch_list) > 1 and not set(arch_list).isdisjoint({'All', 'Common', 'Auto'}): + raise InvalidArguments('''The special architectures 'All', 'Common' and 'Auto' must appear alone, as a positional argument!''') + arch_list = arch_list[0] if len(arch_list) == 1 else arch_list + + detected = kwargs.get('detected', self._detected_cc_from_compiler(compiler)) + detected = flatten([detected]) + detected = [self._break_arch_string(a) for a in detected] + detected = flatten(detected) + if not set(detected).isdisjoint({'All', 'Common', 'Auto'}): + raise InvalidArguments('''The special architectures 'All', 'Common' and 'Auto' must appear alone, as a positional argument!''') + + return cuda_version, arch_list, detected + + def _filter_cuda_arch_list(self, cuda_arch_list, lo=None, hi=None, saturate=None): + """ + Filter CUDA arch list (no codenames) for >= low and < hi architecture + bounds, and deduplicate. + If saturate is provided, architectures >= hi are replaced with saturate. + """ + + filtered_cuda_arch_list = [] + for arch in cuda_arch_list: + if arch: + if lo and version_compare(arch, '<' + lo): + continue + if hi and version_compare(arch, '>=' + hi): + if not saturate: + continue + arch = saturate + if arch not in filtered_cuda_arch_list: + filtered_cuda_arch_list.append(arch) + return filtered_cuda_arch_list + + def _nvcc_arch_flags(self, cuda_version, cuda_arch_list='Auto', detected=''): + """ + Using the CUDA Toolkit version and the target architectures, compute + the NVCC architecture flags. + """ + + # Replicates much of the logic of + # https://github.com/Kitware/CMake/blob/master/Modules/FindCUDA/select_compute_arch.cmake + # except that a bug with cuda_arch_list="All" is worked around by + # tracking both lower and upper limits on GPU architectures. + + cuda_known_gpu_architectures = ['Fermi', 'Kepler', 'Maxwell'] # noqa: E221 + cuda_common_gpu_architectures = ['3.0', '3.5', '5.0'] # noqa: E221 + cuda_hi_limit_gpu_architecture = None # noqa: E221 + cuda_lo_limit_gpu_architecture = '2.0' # noqa: E221 + cuda_all_gpu_architectures = ['3.0', '3.2', '3.5', '5.0'] # noqa: E221 + + if version_compare(cuda_version, '<7.0'): + cuda_hi_limit_gpu_architecture = '5.2' + + if version_compare(cuda_version, '>=7.0'): + cuda_known_gpu_architectures += ['Kepler+Tegra', 'Kepler+Tesla', 'Maxwell+Tegra'] # noqa: E221 + cuda_common_gpu_architectures += ['5.2'] # noqa: E221 + + if version_compare(cuda_version, '<8.0'): + cuda_common_gpu_architectures += ['5.2+PTX'] # noqa: E221 + cuda_hi_limit_gpu_architecture = '6.0' # noqa: E221 + + if version_compare(cuda_version, '>=8.0'): + cuda_known_gpu_architectures += ['Pascal', 'Pascal+Tegra'] # noqa: E221 + cuda_common_gpu_architectures += ['6.0', '6.1'] # noqa: E221 + cuda_all_gpu_architectures += ['6.0', '6.1', '6.2'] # noqa: E221 + + if version_compare(cuda_version, '<9.0'): + cuda_common_gpu_architectures += ['6.1+PTX'] # noqa: E221 + cuda_hi_limit_gpu_architecture = '7.0' # noqa: E221 + + if version_compare(cuda_version, '>=9.0'): + cuda_known_gpu_architectures += ['Volta', 'Xavier'] # noqa: E221 + cuda_common_gpu_architectures += ['7.0'] # noqa: E221 + cuda_all_gpu_architectures += ['7.0', '7.2'] # noqa: E221 + # https://docs.nvidia.com/cuda/archive/9.0/cuda-toolkit-release-notes/index.html#unsupported-features + cuda_lo_limit_gpu_architecture = '3.0' # noqa: E221 + + if version_compare(cuda_version, '<10.0'): + cuda_common_gpu_architectures += ['7.2+PTX'] # noqa: E221 + cuda_hi_limit_gpu_architecture = '8.0' # noqa: E221 + + if version_compare(cuda_version, '>=10.0'): + cuda_known_gpu_architectures += ['Turing'] # noqa: E221 + cuda_common_gpu_architectures += ['7.5'] # noqa: E221 + cuda_all_gpu_architectures += ['7.5'] # noqa: E221 + + if version_compare(cuda_version, '<11.0'): + cuda_common_gpu_architectures += ['7.5+PTX'] # noqa: E221 + cuda_hi_limit_gpu_architecture = '8.0' # noqa: E221 + + if version_compare(cuda_version, '>=11.0'): + cuda_known_gpu_architectures += ['Ampere'] # noqa: E221 + cuda_common_gpu_architectures += ['8.0'] # noqa: E221 + cuda_all_gpu_architectures += ['8.0'] # noqa: E221 + # https://docs.nvidia.com/cuda/archive/11.0/cuda-toolkit-release-notes/index.html#deprecated-features + cuda_lo_limit_gpu_architecture = '3.5' # noqa: E221 + + if version_compare(cuda_version, '<11.1'): + cuda_common_gpu_architectures += ['8.0+PTX'] # noqa: E221 + cuda_hi_limit_gpu_architecture = '8.6' # noqa: E221 + + if version_compare(cuda_version, '>=11.1'): + cuda_common_gpu_architectures += ['8.6', '8.6+PTX'] # noqa: E221 + cuda_all_gpu_architectures += ['8.6'] # noqa: E221 + + if version_compare(cuda_version, '<12.0'): + cuda_hi_limit_gpu_architecture = '9.0' # noqa: E221 + + if not cuda_arch_list: + cuda_arch_list = 'Auto' + + if cuda_arch_list == 'All': # noqa: E271 + cuda_arch_list = cuda_known_gpu_architectures + elif cuda_arch_list == 'Common': # noqa: E271 + cuda_arch_list = cuda_common_gpu_architectures + elif cuda_arch_list == 'Auto': # noqa: E271 + if detected: + if isinstance(detected, list): + cuda_arch_list = detected + else: + cuda_arch_list = self._break_arch_string(detected) + cuda_arch_list = self._filter_cuda_arch_list(cuda_arch_list, + cuda_lo_limit_gpu_architecture, + cuda_hi_limit_gpu_architecture, + cuda_common_gpu_architectures[-1]) + else: + cuda_arch_list = cuda_common_gpu_architectures + elif isinstance(cuda_arch_list, str): + cuda_arch_list = self._break_arch_string(cuda_arch_list) + + cuda_arch_list = sorted([x for x in set(cuda_arch_list) if x]) + + cuda_arch_bin = [] + cuda_arch_ptx = [] + for arch_name in cuda_arch_list: + arch_bin = [] + arch_ptx = [] + add_ptx = arch_name.endswith('+PTX') + if add_ptx: + arch_name = arch_name[:-len('+PTX')] + + if re.fullmatch('[0-9]+\\.[0-9](\\([0-9]+\\.[0-9]\\))?', arch_name): + arch_bin, arch_ptx = [arch_name], [arch_name] + else: + arch_bin, arch_ptx = { + 'Fermi': (['2.0', '2.1(2.0)'], []), + 'Kepler+Tegra': (['3.2'], []), + 'Kepler+Tesla': (['3.7'], []), + 'Kepler': (['3.0', '3.5'], ['3.5']), + 'Maxwell+Tegra': (['5.3'], []), + 'Maxwell': (['5.0', '5.2'], ['5.2']), + 'Pascal': (['6.0', '6.1'], ['6.1']), + 'Pascal+Tegra': (['6.2'], []), + 'Volta': (['7.0'], ['7.0']), + 'Xavier': (['7.2'], []), + 'Turing': (['7.5'], ['7.5']), + 'Ampere': (['8.0'], ['8.0']), + }.get(arch_name, (None, None)) + + if arch_bin is None: + raise InvalidArguments('Unknown CUDA Architecture Name {}!' + .format(arch_name)) + + cuda_arch_bin += arch_bin + + if add_ptx: + if not arch_ptx: + arch_ptx = arch_bin + cuda_arch_ptx += arch_ptx + + cuda_arch_bin = sorted(list(set(cuda_arch_bin))) + cuda_arch_ptx = sorted(list(set(cuda_arch_ptx))) + + nvcc_flags = [] + nvcc_archs_readable = [] + + for arch in cuda_arch_bin: + arch, codev = re.fullmatch( + '([0-9]+\\.[0-9])(?:\\(([0-9]+\\.[0-9])\\))?', arch).groups() + + if version_compare(arch, '<' + cuda_lo_limit_gpu_architecture): + continue + if version_compare(arch, '>=' + cuda_hi_limit_gpu_architecture): + continue + + if codev: + arch = arch.replace('.', '') + codev = codev.replace('.', '') + nvcc_flags += ['-gencode', 'arch=compute_' + codev + ',code=sm_' + arch] + nvcc_archs_readable += ['sm_' + arch] + else: + arch = arch.replace('.', '') + nvcc_flags += ['-gencode', 'arch=compute_' + arch + ',code=sm_' + arch] + nvcc_archs_readable += ['sm_' + arch] + + for arch in cuda_arch_ptx: + arch, codev = re.fullmatch( + '([0-9]+\\.[0-9])(?:\\(([0-9]+\\.[0-9])\\))?', arch).groups() + + if codev: + arch = codev + + if version_compare(arch, '<' + cuda_lo_limit_gpu_architecture): + continue + if version_compare(arch, '>=' + cuda_hi_limit_gpu_architecture): + continue + + arch = arch.replace('.', '') + nvcc_flags += ['-gencode', 'arch=compute_' + arch + ',code=compute_' + arch] + nvcc_archs_readable += ['compute_' + arch] + + return nvcc_flags, nvcc_archs_readable + +def initialize(*args, **kwargs): + return CudaModule(*args, **kwargs) |