aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--azure-pipelines.yml2
-rw-r--r--docs/markdown/Builtin-options.md2
-rw-r--r--docs/markdown/Cuda-module.md183
-rw-r--r--docs/markdown/snippets/fortran_submodule.md12
-rw-r--r--docs/sitemap.txt1
-rw-r--r--mesonbuild/backend/backends.py19
-rw-r--r--mesonbuild/backend/ninjabackend.py52
-rw-r--r--mesonbuild/backend/vs2010backend.py31
-rw-r--r--mesonbuild/build.py7
-rw-r--r--mesonbuild/compilers/c.py22
-rw-r--r--mesonbuild/compilers/compilers.py15
-rw-r--r--mesonbuild/compilers/cs.py1
-rw-r--r--mesonbuild/compilers/d.py15
-rw-r--r--mesonbuild/compilers/fortran.py2
-rw-r--r--mesonbuild/compilers/java.py1
-rw-r--r--mesonbuild/compilers/vala.py6
-rw-r--r--mesonbuild/coredata.py88
-rw-r--r--mesonbuild/dependencies/base.py4
-rw-r--r--mesonbuild/environment.py38
-rw-r--r--mesonbuild/interpreter.py13
-rw-r--r--mesonbuild/mconf.py6
-rw-r--r--mesonbuild/mintro.py3
-rw-r--r--mesonbuild/modules/__init__.py4
-rw-r--r--mesonbuild/modules/gnome.py180
-rw-r--r--mesonbuild/modules/python.py7
-rw-r--r--mesonbuild/modules/unstable_cuda.py259
-rwxr-xr-xrun_tests.py2
-rwxr-xr-xrun_unittests.py12
-rw-r--r--test cases/cuda/3 cudamodule/meson.build16
-rw-r--r--test cases/cuda/3 cudamodule/prog.cu30
-rw-r--r--test cases/fortran/12 submodule/a1.f9025
-rw-r--r--test cases/fortran/12 submodule/a2.f9010
-rw-r--r--test cases/fortran/12 submodule/a3.f9010
-rw-r--r--test cases/fortran/12 submodule/child.f9010
-rw-r--r--test cases/fortran/12 submodule/meson.build7
-rw-r--r--test cases/fortran/12 submodule/parent.f9023
36 files changed, 979 insertions, 139 deletions
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 39e41e9..90ebeff 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -136,6 +136,7 @@ jobs:
git ^
mercurial ^
mingw-w64-$(MSYS2_ARCH)-cmake ^
+ mingw-w64-$(MSYS2_ARCH)-ninja ^
mingw-w64-$(MSYS2_ARCH)-pkg-config ^
mingw-w64-$(MSYS2_ARCH)-python2 ^
mingw-w64-$(MSYS2_ARCH)-python3 ^
@@ -144,7 +145,6 @@ jobs:
displayName: Install Dependencies
- script: |
set PATH=%SystemRoot%\system32;%SystemRoot%;%SystemRoot%\System32\Wbem
- %MSYS2_ROOT%\usr\bin\bash -lc "wget https://github.com/mesonbuild/cidata/raw/master/ninja.exe; mv ninja.exe /$MSYSTEM/bin"
set PATHEXT=%PATHEXT%;.py
if %compiler%==clang ( set CC=clang && set CXX=clang++ )
%MSYS2_ROOT%\usr\bin\bash -lc "MSYSTEM= python3 run_tests.py --backend=ninja"
diff --git a/docs/markdown/Builtin-options.md b/docs/markdown/Builtin-options.md
index 288bd79..0d1a16b 100644
--- a/docs/markdown/Builtin-options.md
+++ b/docs/markdown/Builtin-options.md
@@ -42,7 +42,7 @@ Installation options are all relative to the prefix, except:
| werror | false | Treat warnings as errors |
| warning_level {1, 2, 3} | 1 | Set the warning level. From 1 = lowest to 3 = highest |
| layout {mirror,flat} | mirror | Build directory layout. |
-| default-library {shared, static, both} | shared | Default library type. |
+| default_library {shared, static, both} | shared | Default library type. |
| backend {ninja, vs,<br>vs2010, vs2015, vs2017, xcode} | | Backend to use (default: ninja). |
| stdsplit | | Split stdout and stderr in test logs. |
| errorlogs | | Whether to print the logs from failing tests. |
diff --git a/docs/markdown/Cuda-module.md b/docs/markdown/Cuda-module.md
new file mode 100644
index 0000000..caa1756
--- /dev/null
+++ b/docs/markdown/Cuda-module.md
@@ -0,0 +1,183 @@
+---
+short-description: CUDA module
+authors:
+ - name: Olexa Bilaniuk
+ years: [2019]
+ has-copyright: false
+...
+
+# Unstable CUDA Module
+_Since: 0.50.0_
+
+This module provides helper functionality related to the CUDA Toolkit and
+building code using it.
+
+
+**Note**: this module is unstable. It is only provided as a technology preview.
+Its API may change in arbitrary ways between releases or it might be removed
+from Meson altogether.
+
+
+## Importing the module
+
+The module may be imported as follows:
+
+``` meson
+cuda = import('unstable-cuda')
+```
+
+It offers several useful functions that are enumerated below.
+
+
+## Functions
+
+### `nvcc_arch_flags()`
+_Since: 0.50.0_
+
+``` meson
+cuda.nvcc_arch_flags(nvcc_or_version, ...,
+ detected: string_or_array)
+```
+
+Returns a list of `-gencode` flags that should be passed to `cuda_args:` in
+order to compile a "fat binary" for the architectures/compute capabilities
+enumerated in the positional argument(s). The flags shall be acceptable to
+the NVCC compiler object `nvcc_or_version`, or its version string.
+
+A set of architectures and/or compute capabilities may be specified by:
+
+- The single positional argument `'All'`, `'Common'` or `'Auto'`
+- As (an array of)
+ - Architecture names (`'Kepler'`, `'Maxwell+Tegra'`, `'Turing'`) and/or
+ - Compute capabilities (`'3.0'`, `'3.5'`, `'5.3'`, `'7.5'`)
+
+A suffix of `+PTX` requests PTX code generation for the given architecture.
+A compute capability given as `A.B(X.Y)` requests PTX generation for an older
+virtual architecture `X.Y` before binary generation for a newer architecture
+`A.B`.
+
+Multiple architectures and compute capabilities may be passed in using
+
+- Multiple positional arguments
+- Lists of strings
+- Space (` `), comma (`,`) or semicolon (`;`)-separated strings
+
+The single-word architectural sets `'All'`, `'Common'` or `'Auto'` cannot be
+mixed with architecture names or compute capabilities. Their interpretation is:
+
+| Name | Compute Capability |
+|-------------------|--------------------|
+| `'All'` | All CCs supported by given NVCC compiler. |
+| `'Common'` | Relatively common CCs supported by given NVCC compiler. Generally excludes Tegra and Tesla devices. |
+| `'Auto'` | The CCs provided by the `detected:` keyword, filtered for support by given NVCC compiler. |
+
+The supported architecture names and their corresponding compute capabilities
+are:
+
+| Name | Compute Capability |
+|-------------------|--------------------|
+| `'Fermi'` | 2.0, 2.1(2.0) |
+| `'Kepler'` | 3.0, 3.5 |
+| `'Kepler+Tegra'` | 3.2 |
+| `'Kepler+Tesla'` | 3.7 |
+| `'Maxwell'` | 5.0, 5.2 |
+| `'Maxwell+Tegra'` | 5.3 |
+| `'Pascal'` | 6.0, 6.1 |
+| `'Pascal+Tegra'` | 6.2 |
+| `'Volta'` | 7.0 |
+| `'Volta+Tegra'` | 7.2 |
+| `'Turing'` | 7.5 |
+
+
+Examples:
+
+ cuda.nvcc_arch_flags('10.0', '3.0', '3.5', '5.0+PTX')
+ cuda.nvcc_arch_flags('10.0', ['3.0', '3.5', '5.0+PTX'])
+ cuda.nvcc_arch_flags('10.0', [['3.0', '3.5'], '5.0+PTX'])
+ cuda.nvcc_arch_flags('10.0', '3.0 3.5 5.0+PTX')
+ cuda.nvcc_arch_flags('10.0', '3.0,3.5,5.0+PTX')
+ cuda.nvcc_arch_flags('10.0', '3.0;3.5;5.0+PTX')
+ cuda.nvcc_arch_flags('10.0', 'Kepler 5.0+PTX')
+ # Returns ['-gencode', 'arch=compute_30,code=sm_30',
+ # '-gencode', 'arch=compute_35,code=sm_35',
+ # '-gencode', 'arch=compute_50,code=sm_50',
+ # '-gencode', 'arch=compute_50,code=compute_50']
+
+ cuda.nvcc_arch_flags('10.0', '3.5(3.0)')
+ # Returns ['-gencode', 'arch=compute_30,code=sm_35']
+
+ cuda.nvcc_arch_flags('8.0', 'Common')
+ # Returns ['-gencode', 'arch=compute_30,code=sm_30',
+ # '-gencode', 'arch=compute_35,code=sm_35',
+ # '-gencode', 'arch=compute_50,code=sm_50',
+ # '-gencode', 'arch=compute_52,code=sm_52',
+ # '-gencode', 'arch=compute_60,code=sm_60',
+ # '-gencode', 'arch=compute_61,code=sm_61',
+ # '-gencode', 'arch=compute_61,code=compute_61']
+
+ cuda.nvcc_arch_flags('9.2', 'Auto', detected: '6.0 6.0 6.0 6.0')
+ cuda.nvcc_arch_flags('9.2', 'Auto', detected: ['6.0', '6.0', '6.0', '6.0'])
+ # Returns ['-gencode', 'arch=compute_60,code=sm_60']
+
+ cuda.nvcc_arch_flags(nvcc, 'All')
+ # Returns ['-gencode', 'arch=compute_20,code=sm_20',
+ # '-gencode', 'arch=compute_20,code=sm_21',
+ # '-gencode', 'arch=compute_30,code=sm_30',
+ # '-gencode', 'arch=compute_32,code=sm_32',
+ # '-gencode', 'arch=compute_35,code=sm_35',
+ # '-gencode', 'arch=compute_37,code=sm_37',
+ # '-gencode', 'arch=compute_50,code=sm_50', # nvcc.version() < 7.0
+ # '-gencode', 'arch=compute_52,code=sm_52',
+ # '-gencode', 'arch=compute_53,code=sm_53', # nvcc.version() >= 7.0
+ # '-gencode', 'arch=compute_60,code=sm_60',
+ # '-gencode', 'arch=compute_61,code=sm_61', # nvcc.version() >= 8.0
+ # '-gencode', 'arch=compute_70,code=sm_70',
+ # '-gencode', 'arch=compute_72,code=sm_72', # nvcc.version() >= 9.0
+ # '-gencode', 'arch=compute_75,code=sm_75'] # nvcc.version() >= 10.0
+
+_Note:_ This function is intended to closely replicate CMake's FindCUDA module
+function `CUDA_SELECT_NVCC_ARCH_FLAGS(out_variable, [list of CUDA compute architectures])`
+
+
+
+### `nvcc_arch_readable()`
+_Since: 0.50.0_
+
+``` meson
+cuda.nvcc_arch_readable(nvcc_or_version, ...,
+ detected: string_or_array)
+```
+
+Has precisely the same interface as [`nvcc_arch_flags()`](#nvcc_arch_flags),
+but rather than returning a list of flags, it returns a "readable" list of
+architectures that will be compiled for. The output of this function is solely
+intended for informative message printing.
+
+ archs = '3.0 3.5 5.0+PTX'
+ readable = cuda.nvcc_arch_readable(nvcc, archs)
+ message('Building for architectures ' + ' '.join(readable))
+
+This will print
+
+ Message: Building for architectures sm30 sm35 sm50 compute50
+
+_Note:_ This function is intended to closely replicate CMake's FindCUDA module function
+`CUDA_SELECT_NVCC_ARCH_FLAGS(out_variable, [list of CUDA compute architectures])`
+
+
+
+### `min_driver_version()`
+_Since: 0.50.0_
+
+``` meson
+cuda.min_driver_version(nvcc_or_version)
+```
+
+Returns the minimum NVIDIA proprietary driver version required, on the host
+system, by kernels compiled with the given NVCC compiler or its version string.
+
+The output of this function is generally intended for informative message
+printing, but could be used for assertions or to conditionally enable
+features known to exist within the minimum NVIDIA driver required.
+
+
diff --git a/docs/markdown/snippets/fortran_submodule.md b/docs/markdown/snippets/fortran_submodule.md
new file mode 100644
index 0000000..9e4b9cc
--- /dev/null
+++ b/docs/markdown/snippets/fortran_submodule.md
@@ -0,0 +1,12 @@
+## Fortran submodule support
+
+Initial support for Fortran ``submodule`` was added, where the submodule is in
+the same or different file than the parent ``module``.
+The submodule hierarchy specified in the source Fortran code `submodule`
+statements are used by Meson to resolve source file dependencies.
+For example:
+
+```fortran
+submodule (ancestor:parent) child
+```
+
diff --git a/docs/sitemap.txt b/docs/sitemap.txt
index f80c279..6987641 100644
--- a/docs/sitemap.txt
+++ b/docs/sitemap.txt
@@ -44,6 +44,7 @@ index.md
RPM-module.md
Simd-module.md
Windows-module.md
+ Cuda-module.md
Java.md
Vala.md
D.md
diff --git a/mesonbuild/backend/backends.py b/mesonbuild/backend/backends.py
index a0326f3..ba5bd90 100644
--- a/mesonbuild/backend/backends.py
+++ b/mesonbuild/backend/backends.py
@@ -20,7 +20,7 @@ from .. import mesonlib
from .. import mlog
import json
import subprocess
-from ..mesonlib import MesonException, OrderedSet
+from ..mesonlib import MachineChoice, MesonException, OrderedSet
from ..mesonlib import classify_unity_sources
from ..mesonlib import File
from ..compilers import CompilerArgs, VisualStudioCCompiler
@@ -185,9 +185,14 @@ class Backend:
self.environment.coredata.base_options)
def get_compiler_options_for_target(self, target):
- return OptionOverrideProxy(target.option_overrides,
- # no code depends on builtins for now
- self.environment.coredata.compiler_options)
+ if self.environment.is_cross_build() and not target.is_cross:
+ for_machine = MachineChoice.BUILD
+ else:
+ for_machine = MachineChoice.HOST
+
+ return OptionOverrideProxy(
+ target.option_overrides,
+ self.environment.coredata.compiler_options[for_machine])
def get_option_for_target(self, option_name, target):
if option_name in target.option_overrides:
@@ -574,10 +579,14 @@ class Backend:
# Add compile args added using add_global_arguments()
# These override per-project arguments
commands += self.build.get_global_args(compiler, target.is_cross)
+ if self.environment.is_cross_build() and not target.is_cross:
+ for_machine = MachineChoice.BUILD
+ else:
+ for_machine = MachineChoice.HOST
if not target.is_cross:
# Compile args added from the env: CFLAGS/CXXFLAGS, etc. We want these
# to override all the defaults, but not the per-target compile args.
- commands += self.environment.coredata.get_external_args(compiler.get_language())
+ commands += self.environment.coredata.get_external_args(for_machine, compiler.get_language())
# Always set -fPIC for shared libraries
if isinstance(target, build.SharedLibrary):
commands += compiler.get_pic_args()
diff --git a/mesonbuild/backend/ninjabackend.py b/mesonbuild/backend/ninjabackend.py
index e6edf8b..66d42b0 100644
--- a/mesonbuild/backend/ninjabackend.py
+++ b/mesonbuild/backend/ninjabackend.py
@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
+from typing import List
import os
import re
import shlex
@@ -29,9 +29,9 @@ from .. import build
from .. import mlog
from .. import dependencies
from .. import compilers
-from ..compilers import CompilerArgs, CCompiler, VisualStudioCCompiler
+from ..compilers import CompilerArgs, CCompiler, VisualStudioCCompiler, FortranCompiler
from ..linkers import ArLinker
-from ..mesonlib import File, MesonException, OrderedSet
+from ..mesonlib import File, MachineChoice, MesonException, OrderedSet
from ..mesonlib import get_compiler_for_source, has_path_sep
from .backends import CleanTrees
from ..build import InvalidArguments
@@ -1461,7 +1461,7 @@ int dummy;
or langname == 'cs':
continue
crstr = ''
- cross_args = self.environment.properties.host.get_external_link_args(langname)
+ cross_args = self.environment.coredata.get_external_link_args(MachineChoice.HOST, langname)
if is_cross:
crstr = '_CROSS'
rule = 'rule %s%s_LINKER\n' % (langname, crstr)
@@ -1827,7 +1827,8 @@ rule FORTRAN_DEP_HACK%s
if compiler is None:
self.fortran_deps[target.get_basename()] = {}
return
- modre = re.compile(r"\bmodule\s+(\w+)\s*$", re.IGNORECASE)
+ modre = re.compile(r"\s*\bmodule\b\s+(\w+)\s*$", re.IGNORECASE)
+ submodre = re.compile(r"\s*\bsubmodule\b\s+\((\w+:?\w+)\)\s+(\w+)\s*$", re.IGNORECASE)
module_files = {}
for s in target.get_sources():
# FIXME, does not work for Fortran sources generated by
@@ -1850,11 +1851,23 @@ rule FORTRAN_DEP_HACK%s
'two files %s and %s.' %
(modname, module_files[modname], s))
module_files[modname] = s
+ else:
+ submodmatch = submodre.match(line)
+ if submodmatch is not None:
+ submodname = submodmatch.group(2).lower()
+ if submodname in module_files:
+ raise InvalidArguments(
+ 'Namespace collision: submodule %s defined in '
+ 'two files %s and %s.' %
+ (submodname, module_files[submodname], s))
+ module_files[submodname] = s
+
self.fortran_deps[target.get_basename()] = module_files
- def get_fortran_deps(self, compiler, src, target):
+ def get_fortran_deps(self, compiler: FortranCompiler, src: str, target) -> List[str]:
mod_files = []
usere = re.compile(r"\s*use\s+(\w+)", re.IGNORECASE)
+ submodre = re.compile(r"\s*\bsubmodule\b\s+\((\w+:?\w+)\)\s+(\w+)\s*$", re.IGNORECASE)
dirname = self.get_target_private_dir(target)
tdeps = self.fortran_deps[target.get_basename()]
with open(src) as f:
@@ -1880,9 +1893,23 @@ rule FORTRAN_DEP_HACK%s
# the same name.
if mod_source_file.fname == os.path.basename(src):
continue
- mod_name = compiler.module_name_to_filename(
- usematch.group(1))
+ mod_name = compiler.module_name_to_filename(usename)
mod_files.append(os.path.join(dirname, mod_name))
+ else:
+ submodmatch = submodre.match(line)
+ if submodmatch is not None:
+ parents = submodmatch.group(1).lower().split(':')
+ assert len(parents) in (1, 2), (
+ 'submodule ancestry must be specified as'
+ ' ancestor:parent but Meson found {}'.parents)
+ for parent in parents:
+ if parent not in tdeps:
+ raise MesonException("submodule {} relies on parent module {} that was not found.".format(submodmatch.group(2).lower(), parent))
+ if tdeps[parent].fname == os.path.basename(src): # same file
+ continue
+ mod_name = compiler.module_name_to_filename(parent)
+ mod_files.append(os.path.join(dirname, mod_name))
+
return mod_files
def get_cross_stdlib_args(self, target, compiler):
@@ -2480,6 +2507,11 @@ rule FORTRAN_DEP_HACK%s
if not isinstance(target, build.StaticLibrary):
commands += self.get_link_whole_args(linker, target)
+ if self.environment.is_cross_build() and not target.is_cross:
+ for_machine = MachineChoice.BUILD
+ else:
+ for_machine = MachineChoice.HOST
+
if not isinstance(target, build.StaticLibrary):
# Add link args added using add_project_link_arguments()
commands += self.build.get_project_link_args(linker, target.subproject, target.is_cross)
@@ -2489,7 +2521,7 @@ rule FORTRAN_DEP_HACK%s
if not target.is_cross:
# Link args added from the env: LDFLAGS. We want these to
# override all the defaults but not the per-target link args.
- commands += self.environment.coredata.get_external_link_args(linker.get_language())
+ commands += self.environment.coredata.get_external_link_args(for_machine, linker.get_language())
# Now we will add libraries and library paths from various sources
@@ -2535,7 +2567,7 @@ rule FORTRAN_DEP_HACK%s
# to be after all internal and external libraries so that unresolved
# symbols from those can be found here. This is needed when the
# *_winlibs that we want to link to are static mingw64 libraries.
- commands += linker.get_option_link_args(self.environment.coredata.compiler_options)
+ commands += linker.get_option_link_args(self.environment.coredata.compiler_options[for_machine])
dep_targets = []
dep_targets.extend(self.guess_external_link_dependencies(linker, target, commands, internal))
diff --git a/mesonbuild/backend/vs2010backend.py b/mesonbuild/backend/vs2010backend.py
index 783ae64..074c3a9 100644
--- a/mesonbuild/backend/vs2010backend.py
+++ b/mesonbuild/backend/vs2010backend.py
@@ -25,7 +25,9 @@ from .. import dependencies
from .. import mlog
from .. import compilers
from ..compilers import CompilerArgs
-from ..mesonlib import MesonException, File, python_command, replace_if_different
+from ..mesonlib import (
+ MesonException, MachineChoice, File, python_command, replace_if_different
+)
from ..environment import Environment, build_filename
def autodetect_vs_version(build):
@@ -878,10 +880,14 @@ class Vs2010Backend(backends.Backend):
file_inc_dirs = dict((lang, []) for lang in target.compilers)
# The order in which these compile args are added must match
# generate_single_compile() and generate_basic_compiler_args()
+ if self.environment.is_cross_build() and not target.is_cross:
+ for_machine = MachineChoice.BUILD
+ else:
+ for_machine = MachineChoice.HOST
for l, comp in target.compilers.items():
if l in file_args:
file_args[l] += compilers.get_base_compile_args(self.get_base_options_for_target(target), comp)
- file_args[l] += comp.get_option_compile_args(self.environment.coredata.compiler_options)
+ file_args[l] += comp.get_option_compile_args(self.environment.coredata.compiler_options[for_machine])
# Add compile args added using add_project_arguments()
for l, args in self.build.projects_args.get(target.subproject, {}).items():
@@ -893,9 +899,10 @@ class Vs2010Backend(backends.Backend):
if l in file_args:
file_args[l] += args
if not target.is_cross:
- # Compile args added from the env: CFLAGS/CXXFLAGS, etc. We want these
- # to override all the defaults, but not the per-target compile args.
- for key, opt in self.environment.coredata.compiler_options.items():
+ # Compile args added from the env or cross file: CFLAGS/CXXFLAGS,
+ # etc. We want these to override all the defaults, but not the
+ # per-target compile args.
+ for key, opt in self.environment.coredata.compiler_options[for_machine].items():
l, suffix = key.split('_', 1)
if suffix == 'args' and l in file_args:
file_args[l] += opt.value
@@ -1054,9 +1061,10 @@ class Vs2010Backend(backends.Backend):
# These override per-project link arguments
extra_link_args += self.build.get_global_link_args(compiler, target.is_cross)
if not target.is_cross:
- # Link args added from the env: LDFLAGS. We want these to
- # override all the defaults but not the per-target link args.
- extra_link_args += self.environment.coredata.get_external_link_args(compiler.get_language())
+ # Link args added from the env: LDFLAGS, or the cross file. We
+ # want these to override all the defaults but not the
+ # per-target link args.
+ extra_link_args += self.environment.coredata.get_external_link_args(for_machine, compiler.get_language())
# Only non-static built targets need link args and link dependencies
extra_link_args += target.link_args
# External deps must be last because target link libraries may depend on them.
@@ -1079,7 +1087,7 @@ class Vs2010Backend(backends.Backend):
# to be after all internal and external libraries so that unresolved
# symbols from those can be found here. This is needed when the
# *_winlibs that we want to link to are static mingw64 libraries.
- extra_link_args += compiler.get_option_link_args(self.environment.coredata.compiler_options)
+ extra_link_args += compiler.get_option_link_args(self.environment.coredata.compiler_options[for_machine])
(additional_libpaths, additional_links, extra_link_args) = self.split_link_args(extra_link_args.to_native())
# Add more libraries to be linked if needed
@@ -1153,7 +1161,7 @@ class Vs2010Backend(backends.Backend):
ET.SubElement(meson_file_group, 'None', Include=os.path.join(proj_to_src_dir, build_filename))
extra_files = target.extra_files
- if len(headers) + len(gen_hdrs) + len(extra_files) > 0:
+ if len(headers) + len(gen_hdrs) + len(extra_files) + len(pch_sources) > 0:
inc_hdrs = ET.SubElement(root, 'ItemGroup')
for h in headers:
relpath = os.path.join(down, h.rel_to_builddir(self.build_to_src))
@@ -1163,6 +1171,9 @@ class Vs2010Backend(backends.Backend):
for h in target.extra_files:
relpath = os.path.join(down, h.rel_to_builddir(self.build_to_src))
ET.SubElement(inc_hdrs, 'CLInclude', Include=relpath)
+ for lang in pch_sources:
+ h = pch_sources[lang][0]
+ ET.SubElement(inc_hdrs, 'CLInclude', Include=os.path.join(proj_to_src_dir, h))
if len(sources) + len(gen_src) + len(pch_sources) > 0:
inc_src = ET.SubElement(root, 'ItemGroup')
diff --git a/mesonbuild/build.py b/mesonbuild/build.py
index 7909613..702b338 100644
--- a/mesonbuild/build.py
+++ b/mesonbuild/build.py
@@ -36,6 +36,7 @@ pch_kwargs = set(['c_pch', 'cpp_pch'])
lang_arg_kwargs = set([
'c_args',
'cpp_args',
+ 'cuda_args',
'd_args',
'd_import_dirs',
'd_unittest',
@@ -797,13 +798,13 @@ just like those detected with the dependency() function.''')
for linktarget in lwhole:
self.link_whole(linktarget)
- c_pchlist, cpp_pchlist, clist, cpplist, cslist, valalist, objclist, objcpplist, fortranlist, rustlist \
- = extract_as_list(kwargs, 'c_pch', 'cpp_pch', 'c_args', 'cpp_args', 'cs_args', 'vala_args', 'objc_args',
+ c_pchlist, cpp_pchlist, clist, cpplist, cudalist, cslist, valalist, objclist, objcpplist, fortranlist, rustlist \
+ = extract_as_list(kwargs, 'c_pch', 'cpp_pch', 'c_args', 'cpp_args', 'cuda_args', 'cs_args', 'vala_args', 'objc_args',
'objcpp_args', 'fortran_args', 'rust_args')
self.add_pch('c', c_pchlist)
self.add_pch('cpp', cpp_pchlist)
- compiler_args = {'c': clist, 'cpp': cpplist, 'cs': cslist, 'vala': valalist, 'objc': objclist, 'objcpp': objcpplist,
+ compiler_args = {'c': clist, 'cpp': cpplist, 'cuda': cudalist, 'cs': cslist, 'vala': valalist, 'objc': objclist, 'objcpp': objcpplist,
'fortran': fortranlist, 'rust': rustlist
}
for key, value in compiler_args.items():
diff --git a/mesonbuild/compilers/c.py b/mesonbuild/compilers/c.py
index a591183..b47be7d 100644
--- a/mesonbuild/compilers/c.py
+++ b/mesonbuild/compilers/c.py
@@ -25,9 +25,9 @@ from .. import mlog
from .. import coredata
from . import compilers
from ..mesonlib import (
- EnvironmentException, MesonException, version_compare, Popen_safe, listify,
- for_windows, for_darwin, for_cygwin, for_haiku, for_openbsd,
- darwin_get_object_archs
+ EnvironmentException, MachineChoice, MesonException, Popen_safe, listify,
+ version_compare, for_windows, for_darwin, for_cygwin, for_haiku,
+ for_openbsd, darwin_get_object_archs
)
from .c_function_attributes import C_FUNC_ATTRIBUTES
@@ -427,12 +427,16 @@ class CCompiler(Compiler):
# Read c_args/cpp_args/etc from the cross-info file (if needed)
args += self.get_cross_extra_flags(env, link=(mode == 'link'))
if not self.is_cross:
+ if env.is_cross_build() and not self.is_cross:
+ for_machine = MachineChoice.BUILD
+ else:
+ for_machine = MachineChoice.HOST
if mode == 'preprocess':
# Add CPPFLAGS from the env.
- args += env.coredata.get_external_preprocess_args(self.language)
+ args += env.coredata.get_external_preprocess_args(for_machine, self.language)
elif mode == 'compile':
# Add CFLAGS/CXXFLAGS/OBJCFLAGS/OBJCXXFLAGS from the env
- sys_args = env.coredata.get_external_args(self.language)
+ sys_args = env.coredata.get_external_args(for_machine, self.language)
# Apparently it is a thing to inject linker flags both
# via CFLAGS _and_ LDFLAGS, even though the former are
# also used during linking. These flags can break
@@ -441,7 +445,7 @@ class CCompiler(Compiler):
args += cleaned_sys_args
elif mode == 'link':
# Add LDFLAGS from the env
- args += env.coredata.get_external_link_args(self.language)
+ args += env.coredata.get_external_link_args(for_machine, self.language)
args += self.get_compiler_check_args()
# extra_args must override all other arguments, so we add them last
args += extra_args
@@ -1081,7 +1085,11 @@ class CCompiler(Compiler):
commands = self.get_exelist() + ['-v', '-E', '-']
commands += self.get_always_args()
# Add CFLAGS/CXXFLAGS/OBJCFLAGS/OBJCXXFLAGS from the env
- commands += env.coredata.get_external_args(self.language)
+ if env.is_cross_build() and not self.is_cross:
+ for_machine = MachineChoice.BUILD
+ else:
+ for_machine = MachineChoice.HOST
+ commands += env.coredata.get_external_args(for_machine, self.language)
mlog.debug('Finding framework path by running: ', ' '.join(commands), '\n')
os_env = os.environ.copy()
os_env['LC_ALL'] = 'C'
diff --git a/mesonbuild/compilers/compilers.py b/mesonbuild/compilers/compilers.py
index 317d91a..9a101bf 100644
--- a/mesonbuild/compilers/compilers.py
+++ b/mesonbuild/compilers/compilers.py
@@ -21,8 +21,8 @@ from .. import coredata
from .. import mlog
from .. import mesonlib
from ..mesonlib import (
- EnvironmentException, MesonException, OrderedSet, version_compare,
- Popen_safe
+ EnvironmentException, MachineChoice, MesonException, OrderedSet,
+ version_compare, Popen_safe
)
"""This file contains the data files of all compilers Meson knows
@@ -1011,7 +1011,11 @@ class Compiler:
opts = {} # build afresh every time
# Take default values from env variables.
- compile_args, link_args = self.get_args_from_envvars()
+ if not self.is_cross:
+ compile_args, link_args = self.get_args_from_envvars()
+ else:
+ compile_args = []
+ link_args = []
description = 'Extra arguments passed to the {}'.format(self.get_display_language())
opts.update({
self.language + '_args': coredata.UserArrayOption(
@@ -1083,10 +1087,9 @@ class Compiler:
def get_cross_extra_flags(self, environment, link):
extra_flags = []
if self.is_cross and environment:
- props = environment.properties.host
- extra_flags += props.get_external_args(self.language)
+ extra_flags += environment.coredata.get_external_args(MachineChoice.HOST, self.language)
if link:
- extra_flags += props.get_external_link_args(self.language)
+ extra_flags += environment.coredata.get_external_link_args(MachineChoice.HOST, self.language)
return extra_flags
def _get_compile_output(self, dirname, mode):
diff --git a/mesonbuild/compilers/cs.py b/mesonbuild/compilers/cs.py
index cbfcd9c..cd67da0 100644
--- a/mesonbuild/compilers/cs.py
+++ b/mesonbuild/compilers/cs.py
@@ -32,6 +32,7 @@ class CsCompiler(Compiler):
self.language = 'cs'
super().__init__(exelist, version)
self.id = id
+ self.is_cross = False
self.runner = runner
def get_display_language(self):
diff --git a/mesonbuild/compilers/d.py b/mesonbuild/compilers/d.py
index 3065ac7..40906c5 100644
--- a/mesonbuild/compilers/d.py
+++ b/mesonbuild/compilers/d.py
@@ -14,7 +14,9 @@
import os.path, subprocess
-from ..mesonlib import EnvironmentException, version_compare, is_windows, is_osx
+from ..mesonlib import (
+ EnvironmentException, MachineChoice, version_compare, is_windows, is_osx
+)
from .compilers import (
CompilerType,
@@ -306,12 +308,17 @@ class DCompiler(Compiler):
# Add link flags needed to find dependencies
args += d.get_link_args()
+ if env.is_cross_build() and not self.is_cross:
+ for_machine = MachineChoice.BUILD
+ else:
+ for_machine = MachineChoice.HOST
+
if mode == 'compile':
# Add DFLAGS from the env
- args += env.coredata.get_external_args(self.language)
+ args += env.coredata.get_external_args(for_machine, self.language)
elif mode == 'link':
# Add LDFLAGS from the env
- args += env.coredata.get_external_link_args(self.language)
+ args += env.coredata.get_external_link_args(for_machine, self.language)
# extra_args must override all other arguments, so we add them last
args += extra_args
return args
@@ -373,7 +380,7 @@ class DCompiler(Compiler):
# translate library link flag
dcargs.append('-L=' + arg)
continue
- elif arg.startswith('-L'):
+ elif arg.startswith('-L/') or arg.startswith('-L./'):
# we need to handle cases where -L is set by e.g. a pkg-config
# setting to select a linker search path. We can however not
# unconditionally prefix '-L' with '-L' because the user might
diff --git a/mesonbuild/compilers/fortran.py b/mesonbuild/compilers/fortran.py
index eea1660..8c50736 100644
--- a/mesonbuild/compilers/fortran.py
+++ b/mesonbuild/compilers/fortran.py
@@ -179,7 +179,7 @@ class FortranCompiler(Compiler):
return parameter_list
- def module_name_to_filename(self, module_name):
+ def module_name_to_filename(self, module_name: str) -> str:
return module_name.lower() + '.mod'
def get_std_shared_lib_link_args(self):
diff --git a/mesonbuild/compilers/java.py b/mesonbuild/compilers/java.py
index 03ee382..5d7f865 100644
--- a/mesonbuild/compilers/java.py
+++ b/mesonbuild/compilers/java.py
@@ -23,6 +23,7 @@ class JavaCompiler(Compiler):
self.language = 'java'
super().__init__(exelist, version)
self.id = 'unknown'
+ self.is_cross = False
self.javarunner = 'java'
def get_soname_args(self, *args):
diff --git a/mesonbuild/compilers/vala.py b/mesonbuild/compilers/vala.py
index e64d57f..5303298 100644
--- a/mesonbuild/compilers/vala.py
+++ b/mesonbuild/compilers/vala.py
@@ -49,6 +49,12 @@ class ValaCompiler(Compiler):
def get_pic_args(self):
return []
+ def get_pie_args(self):
+ return []
+
+ def get_pie_link_args(self):
+ return []
+
def get_always_args(self):
return ['-C']
diff --git a/mesonbuild/coredata.py b/mesonbuild/coredata.py
index b2b9e91..3ce272e 100644
--- a/mesonbuild/coredata.py
+++ b/mesonbuild/coredata.py
@@ -19,7 +19,8 @@ from itertools import chain
from pathlib import PurePath
from collections import OrderedDict
from .mesonlib import (
- MesonException, default_libdir, default_libexecdir, default_prefix
+ MesonException, MachineChoice, PerMachine,
+ default_libdir, default_libexecdir, default_prefix
)
from .wrap import WrapMode
import ast
@@ -261,9 +262,9 @@ class CoreData:
self.init_builtins()
self.backend_options = {}
self.user_options = {}
- self.compiler_options = {}
+ self.compiler_options = PerMachine({}, {}, {})
self.base_options = {}
- self.external_preprocess_args = {} # CPPFLAGS only
+ self.external_preprocess_args = PerMachine({}, {}, {}) # CPPFLAGS only
self.cross_file = self.__load_cross_file(options.cross_file)
self.compilers = OrderedDict()
self.cross_compilers = OrderedDict()
@@ -457,16 +458,18 @@ class CoreData:
mode = 'custom'
self.builtins['buildtype'].set_value(mode)
+ def get_all_compiler_options(self):
+ # TODO think about cross and command-line interface. (Only .build is mentioned here.)
+ yield self.compiler_options.build
+
def _get_all_nonbuiltin_options(self):
yield self.backend_options
yield self.user_options
- yield self.compiler_options
+ yield from self.get_all_compiler_options()
yield self.base_options
def get_all_options(self):
- return chain(
- iter([self.builtins]),
- self._get_all_nonbuiltin_options())
+ return chain([self.builtins], self._get_all_nonbuiltin_options())
def validate_option_value(self, option_name, override_value):
for opts in self.get_all_options():
@@ -475,14 +478,14 @@ class CoreData:
return opt.validate_value(override_value)
raise MesonException('Tried to validate unknown option %s.' % option_name)
- def get_external_args(self, lang):
- return self.compiler_options[lang + '_args'].value
+ def get_external_args(self, for_machine: MachineChoice, lang):
+ return self.compiler_options[for_machine][lang + '_args'].value
- def get_external_link_args(self, lang):
- return self.compiler_options[lang + '_link_args'].value
+ def get_external_link_args(self, for_machine: MachineChoice, lang):
+ return self.compiler_options[for_machine][lang + '_link_args'].value
- def get_external_preprocess_args(self, lang):
- return self.external_preprocess_args[lang]
+ def get_external_preprocess_args(self, for_machine: MachineChoice, lang):
+ return self.external_preprocess_args[for_machine][lang]
def merge_user_options(self, options):
for (name, value) in options.items():
@@ -493,7 +496,7 @@ class CoreData:
if type(oldval) != type(value):
self.user_options[name] = value
- def set_options(self, options, subproject=''):
+ def set_options(self, options, subproject='', warn_unknown=True):
# Set prefix first because it's needed to sanitize other options
prefix = self.builtins['prefix'].value
if 'prefix' in options:
@@ -517,8 +520,7 @@ class CoreData:
break
else:
unknown_options.append(k)
-
- if unknown_options:
+ if unknown_options and warn_unknown:
unknown_options = ', '.join(sorted(unknown_options))
sub = 'In subproject {}: '.format(subproject) if subproject else ''
mlog.warning('{}Unknown options: "{}"'.format(sub, unknown_options))
@@ -553,36 +555,54 @@ class CoreData:
self.set_options(options, subproject)
- def process_new_compilers(self, lang: str, comp, cross_comp, cmd_line_options):
+ def process_new_compilers(self, lang: str, comp, cross_comp, env):
from . import compilers
+
self.compilers[lang] = comp
- # Native compiler always exist so always add its options.
- new_options = comp.get_options()
if cross_comp is not None:
self.cross_compilers[lang] = cross_comp
- new_options.update(cross_comp.get_options())
+
+ # Native compiler always exist so always add its options.
+ new_options_for_build = comp.get_options()
+ preproc_flags_for_build = comp.get_preproc_flags()
+ if cross_comp is not None:
+ new_options_for_host = cross_comp.get_options()
+ preproc_flags_for_host = cross_comp.get_preproc_flags()
+ else:
+ new_options_for_host = comp.get_options()
+ preproc_flags_for_host = comp.get_preproc_flags()
+
+ opts_machines_list = [
+ (new_options_for_build, preproc_flags_for_build, MachineChoice.BUILD),
+ (new_options_for_host, preproc_flags_for_host, MachineChoice.HOST),
+ ]
optprefix = lang + '_'
- for k, o in new_options.items():
- if not k.startswith(optprefix):
- raise MesonException('Internal error, %s has incorrect prefix.' % k)
- if k in cmd_line_options:
- o.set_value(cmd_line_options[k])
- self.compiler_options.setdefault(k, o)
-
- # Unlike compiler and linker flags, preprocessor flags are not in
- # compiler_options because they are not visible to user.
- preproc_flags = comp.get_preproc_flags()
- preproc_flags = shlex.split(preproc_flags)
- self.external_preprocess_args.setdefault(lang, preproc_flags)
+ for new_options, preproc_flags, for_machine in opts_machines_list:
+ for k, o in new_options.items():
+ if not k.startswith(optprefix):
+ raise MesonException('Internal error, %s has incorrect prefix.' % k)
+ if k in env.properties[for_machine]:
+ # Get from configuration files.
+ o.set_value(env.properties[for_machine][k])
+ if (env.machines.matches_build_machine(for_machine) and
+ k in env.cmd_line_options):
+ # TODO think about cross and command-line interface.
+ o.set_value(env.cmd_line_options[k])
+ self.compiler_options[for_machine].setdefault(k, o)
+
+ # Unlike compiler and linker flags, preprocessor flags are not in
+ # compiler_options because they are not visible to user.
+ preproc_flags = shlex.split(preproc_flags)
+ self.external_preprocess_args[for_machine].setdefault(lang, preproc_flags)
enabled_opts = []
for optname in comp.base_options:
if optname in self.base_options:
continue
oobj = compilers.base_options[optname]
- if optname in cmd_line_options:
- oobj.set_value(cmd_line_options[optname])
+ if optname in env.cmd_line_options:
+ oobj.set_value(env.cmd_line_options[optname])
enabled_opts.append(optname)
self.base_options[optname] = oobj
self.emit_base_options_warnings(enabled_opts)
diff --git a/mesonbuild/dependencies/base.py b/mesonbuild/dependencies/base.py
index 9da0d7c..8196124 100644
--- a/mesonbuild/dependencies/base.py
+++ b/mesonbuild/dependencies/base.py
@@ -1674,9 +1674,9 @@ class DubDependency(ExternalDependency):
return ''
# Ex.: library-debug-linux.posix-x86_64-ldc_2081-EF934983A3319F8F8FF2F0E107A363BA
- build_name = 'library-{}-{}-{}-{}_{}'.format(description['buildType'], '.'.join(description['platform']), '.'.join(description['architecture']), comp, d_ver)
+ build_name = '-{}-{}-{}-{}_{}'.format(description['buildType'], '.'.join(description['platform']), '.'.join(description['architecture']), comp, d_ver)
for entry in os.listdir(module_build_path):
- if entry.startswith(build_name):
+ if build_name in entry:
for file in os.listdir(os.path.join(module_build_path, entry)):
if file == lib_file_name:
if folder_only:
diff --git a/mesonbuild/environment.py b/mesonbuild/environment.py
index 3fb93ca..b23509a 100644
--- a/mesonbuild/environment.py
+++ b/mesonbuild/environment.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import configparser, os, platform, re, sys, shlex, shutil, subprocess
+import configparser, os, platform, re, sys, shlex, shutil, subprocess, typing
from . import coredata
from .linkers import ArLinker, ArmarLinker, VisualStudioLinker, DLinker, CcrxLinker
@@ -766,7 +766,22 @@ class Environment:
except OSError as e:
popen_exceptions[' '.join(compiler + [arg])] = e
continue
- version = search_version(out)
+ # Example nvcc printout:
+ #
+ # nvcc: NVIDIA (R) Cuda compiler driver
+ # Copyright (c) 2005-2018 NVIDIA Corporation
+ # Built on Sat_Aug_25_21:08:01_CDT_2018
+ # Cuda compilation tools, release 10.0, V10.0.130
+ #
+ # search_version() first finds the "10.0" after "release",
+ # rather than the more precise "10.0.130" after "V".
+ # The patch version number is occasionally important; For
+ # instance, on Linux,
+ # - CUDA Toolkit 8.0.44 requires NVIDIA Driver 367.48
+ # - CUDA Toolkit 8.0.61 requires NVIDIA Driver 375.26
+ # Luckily, the "V" also makes it very simple to extract
+ # the full version:
+ version = out.strip().split('V')[-1]
cls = CudaCompiler
return cls(ccache + compiler, version, is_cross, exe_wrap)
raise EnvironmentException('Could not find suitable CUDA compiler: "' + ' '.join(compilers) + '"')
@@ -1086,7 +1101,7 @@ class Environment:
def detect_compilers(self, lang: str, need_cross_compiler: bool):
(comp, cross_comp) = self.compilers_from_language(lang, need_cross_compiler)
if comp is not None:
- self.coredata.process_new_compilers(lang, comp, cross_comp, self.cmd_line_options)
+ self.coredata.process_new_compilers(lang, comp, cross_comp, self)
return comp, cross_comp
def detect_static_linker(self, compiler):
@@ -1268,14 +1283,10 @@ class MesonConfigFile:
return out
class Properties:
- def __init__(self):
- self.properties = {}
-
- def get_external_args(self, language):
- return mesonlib.stringlistify(self.properties.get(language + '_args', []))
-
- def get_external_link_args(self, language):
- return mesonlib.stringlistify(self.properties.get(language + '_link_args', []))
+ def __init__(
+ self,
+ properties: typing.Optional[typing.Dict[str, typing.Union[str, typing.List[str]]]] = None):
+ self.properties = properties or {}
def has_stdlib(self, language):
return language + '_stdlib' in self.properties
@@ -1289,6 +1300,11 @@ class Properties:
def get_sys_root(self):
return self.properties.get('sys_root', None)
+ def __eq__(self, other):
+ if isinstance(other, type(self)):
+ return self.properties == other.properties
+ return NotImplemented
+
# TODO consider removing so Properties is less freeform
def __getitem__(self, key):
return self.properties[key]
diff --git a/mesonbuild/interpreter.py b/mesonbuild/interpreter.py
index fb4c468..2eb0720 100644
--- a/mesonbuild/interpreter.py
+++ b/mesonbuild/interpreter.py
@@ -36,6 +36,7 @@ import os, shutil, uuid
import re, shlex
import subprocess
from collections import namedtuple
+from itertools import chain
from pathlib import PurePath
import functools
@@ -998,8 +999,13 @@ class CompilerHolder(InterpreterObject):
idir = os.path.join(self.environment.get_source_dir(),
i.held_object.get_curdir(), idir)
args += self.compiler.get_include_args(idir, False)
+ native = kwargs.get('native', None)
+ if native:
+ for_machine = MachineChoice.BUILD
+ else:
+ for_machine = MachineChoice.HOST
if not nobuiltins:
- opts = self.environment.coredata.compiler_options
+ opts = self.environment.coredata.compiler_options[for_machine]
args += self.compiler.get_option_compile_args(opts)
if mode == 'link':
args += self.compiler.get_option_link_args(opts)
@@ -2454,8 +2460,9 @@ external dependencies (including libraries) must go to "dependencies".''')
def get_option_internal(self, optname):
# Some base options are not defined in some environments, return the
# default value from compilers.base_options in that case.
- for d in [self.coredata.base_options, compilers.base_options,
- self.coredata.builtins, self.coredata.compiler_options]:
+ for d in chain(
+ [self.coredata.base_options, compilers.base_options, self.coredata.builtins],
+ self.coredata.get_all_compiler_options()):
try:
return d[optname]
except KeyError:
diff --git a/mesonbuild/mconf.py b/mesonbuild/mconf.py
index b8fb3c6..48f88e8 100644
--- a/mesonbuild/mconf.py
+++ b/mesonbuild/mconf.py
@@ -139,7 +139,8 @@ class Conf:
self.print_options('Core options', core_options)
self.print_options('Backend options', self.coredata.backend_options)
self.print_options('Base options', self.coredata.base_options)
- self.print_options('Compiler options', self.coredata.compiler_options)
+ # TODO others
+ self.print_options('Compiler options', self.coredata.compiler_options.build)
self.print_options('Directories', dir_options)
self.print_options('Project options', self.coredata.user_options)
self.print_options('Testing options', test_options)
@@ -154,6 +155,9 @@ def run(options):
save = False
if len(options.cmd_line_options) > 0:
c.set_options(options.cmd_line_options)
+ if not c.build.environment.is_cross_build():
+ # TODO think about cross and command-line interface.
+ c.coredata.compiler_options.host = c.coredata.compiler_options.build
coredata.update_cmd_line_file(builddir, options)
save = True
elif options.clearcache:
diff --git a/mesonbuild/mintro.py b/mesonbuild/mintro.py
index 074c70a..5eecb67 100644
--- a/mesonbuild/mintro.py
+++ b/mesonbuild/mintro.py
@@ -204,7 +204,8 @@ def list_buildoptions(coredata: cdata.CoreData):
add_keys(optlist, core_options, 'core')
add_keys(optlist, coredata.backend_options, 'backend')
add_keys(optlist, coredata.base_options, 'base')
- add_keys(optlist, coredata.compiler_options, 'compiler')
+ # TODO others
+ add_keys(optlist, coredata.compiler_options.build, 'compiler')
add_keys(optlist, dir_options, 'directory')
add_keys(optlist, coredata.user_options, 'user')
add_keys(optlist, test_options, 'test')
diff --git a/mesonbuild/modules/__init__.py b/mesonbuild/modules/__init__.py
index 6b6aa8b..2df4d7c 100644
--- a/mesonbuild/modules/__init__.py
+++ b/mesonbuild/modules/__init__.py
@@ -58,6 +58,10 @@ class GResourceHeaderTarget(build.CustomTarget):
def __init__(self, name, subdir, subproject, kwargs):
super().__init__(name, subdir, subproject, kwargs)
+class GResourceObjectTarget(build.CustomTarget):
+ def __init__(self, name, subdir, subproject, kwargs):
+ super().__init__(name, subdir, subproject, kwargs)
+
class GirTarget(build.CustomTarget):
def __init__(self, name, subdir, subproject, kwargs):
super().__init__(name, subdir, subproject, kwargs)
diff --git a/mesonbuild/modules/gnome.py b/mesonbuild/modules/gnome.py
index 871cd48..4473bcb 100644
--- a/mesonbuild/modules/gnome.py
+++ b/mesonbuild/modules/gnome.py
@@ -16,6 +16,8 @@
functionality such as gobject-introspection, gresources and gtk-doc'''
import os
+import re
+import sys
import copy
import shlex
import subprocess
@@ -25,11 +27,13 @@ from .. import mlog
from .. import mesonlib
from .. import compilers
from .. import interpreter
-from . import GResourceTarget, GResourceHeaderTarget, GirTarget, TypelibTarget, VapiTarget
+from . import GResourceTarget, GResourceHeaderTarget, GResourceObjectTarget, GirTarget, TypelibTarget, VapiTarget
from . import get_include_args
from . import ExtensionModule
from . import ModuleReturnValue
-from ..mesonlib import MesonException, OrderedSet, Popen_safe, extract_as_list
+from ..mesonlib import (
+ MachineChoice, MesonException, OrderedSet, Popen_safe, extract_as_list
+)
from ..dependencies import Dependency, PkgConfigDependency, InternalDependency
from ..interpreterbase import noKwargs, permittedKwargs, FeatureNew, FeatureNewKwargs
@@ -40,6 +44,8 @@ from ..interpreterbase import noKwargs, permittedKwargs, FeatureNew, FeatureNewK
# https://bugzilla.gnome.org/show_bug.cgi?id=774368
gresource_dep_needed_version = '>= 2.51.1'
+gresource_ld_binary_needed_version = '>= 2.60'
+
native_glib_version = None
girwarning_printed = False
gdbuswarning_printed = False
@@ -164,7 +170,10 @@ class GnomeModule(ExtensionModule):
cmd += ['--sourcedir', source_dir]
if 'c_name' in kwargs:
- cmd += ['--c-name', kwargs.pop('c_name')]
+ c_name = kwargs.pop('c_name')
+ cmd += ['--c-name', c_name]
+ else:
+ c_name = None
export = kwargs.pop('export', False)
if not export:
cmd += ['--internal']
@@ -173,13 +182,19 @@ class GnomeModule(ExtensionModule):
cmd += mesonlib.stringlistify(kwargs.pop('extra_args', []))
+ gresource_ld_binary = False
+ if mesonlib.is_linux() and mesonlib.version_compare(glib_version, gresource_ld_binary_needed_version) and not state.environment.is_cross_build():
+ ld_obj = self.interpreter.find_program_impl('ld', required=False)
+ if ld_obj.found():
+ gresource_ld_binary = True
+
gresource = kwargs.pop('gresource_bundle', False)
- if gresource:
- output = args[0] + '.gresource'
- name = args[0] + '_gresource'
- else:
- output = args[0] + '.c'
- name = args[0] + '_c'
+ if gresource or gresource_ld_binary:
+ g_output = args[0] + '.gresource'
+ g_name = args[0] + '_gresource'
+
+ output = args[0] + '.c'
+ name = args[0] + '_c'
if kwargs.get('install', False) and not gresource:
raise MesonException('The install kwarg only applies to gresource bundles, see install_header')
@@ -193,18 +208,44 @@ class GnomeModule(ExtensionModule):
kwargs['input'] = args[1]
kwargs['output'] = output
kwargs['depends'] = depends
+ if gresource or gresource_ld_binary:
+ g_kwargs = copy.deepcopy(kwargs)
+ g_kwargs['input'] = args[1]
+ g_kwargs['output'] = g_output
+ g_kwargs['depends'] = depends
if not mesonlib.version_compare(glib_version, gresource_dep_needed_version):
# This will eventually go out of sync if dependencies are added
kwargs['depend_files'] = depend_files
- kwargs['command'] = cmd
+ if gresource_ld_binary:
+ kwargs['command'] = copy.copy(cmd) + ['--external-data']
+ else:
+ kwargs['command'] = cmd
+ if gresource or gresource_ld_binary:
+ # This will eventually go out of sync if dependencies are added
+ g_kwargs['depend_files'] = depend_files
+ g_kwargs['command'] = cmd
else:
depfile = kwargs['output'] + '.d'
- kwargs['depfile'] = depfile
- kwargs['command'] = copy.copy(cmd) + ['--dependency-file', '@DEPFILE@']
- target_c = GResourceTarget(name, state.subdir, state.subproject, kwargs)
+ if gresource_ld_binary:
+ depfile2 = kwargs['output'] + '.2.d'
+ kwargs['depfile'] = depfile2
+ kwargs['command'] = copy.copy(cmd) + ['--external-data', '--dependency-file', '@DEPFILE@']
+ else:
+ kwargs['depfile'] = depfile
+ kwargs['command'] = copy.copy(cmd) + ['--dependency-file', '@DEPFILE@']
+ if gresource or gresource_ld_binary:
+ g_kwargs['depfile'] = depfile
+ g_kwargs['command'] = copy.copy(cmd) + ['--dependency-file', '@DEPFILE@']
+
+ if gresource or gresource_ld_binary:
+ target_g = GResourceTarget(g_name, state.subdir, state.subproject, g_kwargs)
+ if gresource: # Only one target for .gresource files
+ if target_g.get_id() not in self.interpreter.build.targets:
+ return ModuleReturnValue(target_g, [target_g])
+ else:
+ return ModuleReturnValue(target_g, [])
- if gresource: # Only one target for .gresource files
- return ModuleReturnValue(target_c, [target_c])
+ target_c = GResourceTarget(name, state.subdir, state.subproject, kwargs)
h_kwargs = {
'command': cmd,
@@ -220,9 +261,99 @@ class GnomeModule(ExtensionModule):
h_kwargs['install_dir'] = kwargs.get('install_dir',
state.environment.coredata.get_builtin_option('includedir'))
target_h = GResourceHeaderTarget(args[0] + '_h', state.subdir, state.subproject, h_kwargs)
- rv = [target_c, target_h]
+
+ if gresource_ld_binary:
+ return self._create_gresource_ld_binary_targets(args, state, ifile, ld_obj, c_name, target_g, g_output, target_c, target_h)
+ else:
+ rv = [target_c, target_h]
+
return ModuleReturnValue(rv, rv)
+ def _create_gresource_ld_binary_targets(self, args, state, ifile, ld_obj, c_name, target_g, g_output, target_c, target_h):
+ if c_name is None:
+ # Create proper c identifier from filename in the way glib-compile-resources does
+ c_name = os.path.basename(ifile).partition('.')[0]
+ c_name = c_name.replace('-', '_')
+ c_name = re.sub(r'^([^(_a-zA-Z)])+', '', c_name)
+ c_name = re.sub(r'([^(_a-zA-Z0-9)])', '', c_name)
+
+ c_name_no_underscores = re.sub(r'^_+', '', c_name)
+
+ ld = ld_obj.get_command()
+ objcopy_object = self.interpreter.find_program_impl('objcopy', required=False)
+ if objcopy_object.found():
+ objcopy = objcopy_object.get_command()
+ else:
+ objcopy = None
+
+ o_kwargs = {
+ 'command': [ld, '-r', '-b', 'binary', '@INPUT@', '-o', '@OUTPUT@'],
+ 'input': target_g,
+ 'output': args[0] + '1.o'
+ }
+
+ target_o = GResourceObjectTarget(args[0] + '1_o', state.subdir, state.subproject, o_kwargs)
+
+ builddir = os.path.join(state.environment.get_build_dir(), state.subdir)
+ linkerscript_name = args[0] + '_map.ld'
+ linkerscript_path = os.path.join(builddir, linkerscript_name)
+ linkerscript_file = open(linkerscript_path, 'w')
+
+ # Create symbol name the way bfd does
+ binary_name = os.path.join(state.subdir, g_output)
+ encoding = sys.getfilesystemencoding()
+ symbol_name = re.sub(rb'([^(_a-zA-Z0-9)])', b'_', binary_name.encode(encoding)).decode(encoding)
+
+ linkerscript_string = '''SECTIONS
+{{
+ .gresource.{} : ALIGN(8)
+ {{
+ {}_resource_data = _binary_{}_start;
+ }}
+ .data :
+ {{
+ *(.data)
+ }}
+}}'''.format(c_name_no_underscores, c_name, symbol_name)
+
+ linkerscript_file.write(linkerscript_string)
+
+ o2_kwargs = {
+ 'command': [ld, '-r', '-T', os.path.join(state.subdir, linkerscript_name), '@INPUT@', '-o', '@OUTPUT@'],
+ 'input': target_o,
+ 'output': args[0] + '2.o',
+ }
+ target_o2 = GResourceObjectTarget(args[0] + '2_o', state.subdir, state.subproject, o2_kwargs)
+
+ if objcopy is not None:
+ objcopy_cmd = [objcopy, '--set-section-flags', '.gresource.' + c_name + '=readonly,alloc,load,data']
+ objcopy_cmd += ['-N', '_binary_' + symbol_name + '_start']
+ objcopy_cmd += ['-N', '_binary_' + symbol_name + '_end']
+ objcopy_cmd += ['-N', '_binary_' + symbol_name + '_size']
+ objcopy_cmd += ['@INPUT@', '@OUTPUT@']
+
+ o3_kwargs = {
+ 'command': objcopy_cmd,
+ 'input': target_o2,
+ 'output': args[0] + '3.o'
+ }
+
+ target_o3 = GResourceObjectTarget(args[0] + '3_o', state.subdir, state.subproject, o3_kwargs)
+
+ rv1 = [target_c, target_h, target_o3]
+ if target_g.get_id() not in self.interpreter.build.targets:
+ rv2 = rv1 + [target_g, target_o, target_o2]
+ else:
+ rv2 = rv1 + [target_o, target_o2]
+ else:
+ rv1 = [target_c, target_h, target_o2]
+ if target_g.get_id() not in self.interpreter.build.targets:
+ rv2 = rv1 + [target_g, target_o]
+ else:
+ rv2 = rv1 + [target_o]
+
+ return ModuleReturnValue(rv1, rv2)
+
def _get_gresource_dependencies(self, state, input_file, source_dirs, dependencies):
cmd = ['glib-compile-resources',
@@ -531,11 +662,7 @@ class GnomeModule(ExtensionModule):
ret = []
for lang in langs:
- if state.environment.is_cross_build():
- link_args = state.environment.properties.host.get_external_link_args(lang)
- else:
- link_args = state.environment.coredata.get_external_link_args(lang)
-
+ link_args = state.environment.coredata.get_external_link_args(MachineChoice.HOST, lang)
for link_arg in link_args:
if link_arg.startswith('-L'):
ret.append(link_arg)
@@ -720,10 +847,7 @@ class GnomeModule(ExtensionModule):
def _get_external_args_for_langs(self, state, langs):
ret = []
for lang in langs:
- if state.environment.is_cross_build():
- ret += state.environment.properties.host.get_external_args(lang)
- else:
- ret += state.environment.coredata.get_external_args(lang)
+ ret += state.environment.coredata.get_external_args(MachineChoice.HOST, lang)
return ret
@staticmethod
@@ -1048,13 +1172,11 @@ This will become a hard error in the future.''')
ldflags.update(internal_ldflags)
ldflags.update(external_ldflags)
+ cflags.update(state.environment.coredata.get_external_args(MachineChoice.HOST, 'c'))
+ ldflags.update(state.environment.coredata.get_external_link_args(MachineChoice.HOST, 'c'))
if state.environment.is_cross_build():
- cflags.update(state.environment.properties.host.get_external_args('c'))
- ldflags.update(state.environment.properties.host.get_external_link_args('c'))
compiler = state.environment.coredata.cross_compilers.get('c')
else:
- cflags.update(state.environment.coredata.get_external_args('c'))
- ldflags.update(state.environment.coredata.get_external_link_args('c'))
compiler = state.environment.coredata.compilers.get('c')
compiler_flags = self._get_langs_compilers_flags(state, [('c', compiler)])
diff --git a/mesonbuild/modules/python.py b/mesonbuild/modules/python.py
index 1d41165..049c457 100644
--- a/mesonbuild/modules/python.py
+++ b/mesonbuild/modules/python.py
@@ -60,6 +60,7 @@ class PythonDependency(ExternalDependency):
self.pkgdep = None
self.variables = python_holder.variables
self.paths = python_holder.paths
+ self.link_libpython = python_holder.link_libpython
if mesonlib.version_compare(self.version, '>= 3.0'):
self.major_version = 3
else:
@@ -149,11 +150,11 @@ class PythonDependency(ExternalDependency):
libdirs = []
largs = self.clib_compiler.find_library(libname, environment, libdirs)
-
- self.is_found = largs is not None
- if self.is_found:
+ if largs is not None:
self.link_args = largs
+ self.is_found = largs is not None or not self.link_libpython
+
inc_paths = mesonlib.OrderedSet([
self.variables.get('INCLUDEPY'),
self.paths.get('include'),
diff --git a/mesonbuild/modules/unstable_cuda.py b/mesonbuild/modules/unstable_cuda.py
new file mode 100644
index 0000000..941b15a
--- /dev/null
+++ b/mesonbuild/modules/unstable_cuda.py
@@ -0,0 +1,259 @@
+# Copyright 2017 The Meson development team
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+
+from ..mesonlib import version_compare
+from ..interpreter import CompilerHolder
+from ..compilers import CudaCompiler
+
+from . import ExtensionModule, ModuleReturnValue
+
+from ..interpreterbase import (
+ flatten, permittedKwargs, noKwargs,
+ InvalidArguments, FeatureNew
+)
+
+class CudaModule(ExtensionModule):
+
+ @FeatureNew('CUDA module', '0.50.0')
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ @noKwargs
+ def min_driver_version(self, state, args, kwargs):
+ argerror = InvalidArguments('min_driver_version must have exactly one positional argument: ' +
+ 'an NVCC compiler object, or its version string.')
+
+ if len(args) != 1:
+ raise argerror
+ else:
+ cuda_version = self._version_from_compiler(args[0])
+ if cuda_version == 'unknown':
+ raise argerror
+
+ driver_version_table = [
+ {'cuda_version': '>=10.0.130', 'windows': '411.31', 'linux': '410.48'},
+ {'cuda_version': '>=9.2.148', 'windows': '398.26', 'linux': '396.37'},
+ {'cuda_version': '>=9.2.88', 'windows': '397.44', 'linux': '396.26'},
+ {'cuda_version': '>=9.1.85', 'windows': '391.29', 'linux': '390.46'},
+ {'cuda_version': '>=9.0.76', 'windows': '385.54', 'linux': '384.81'},
+ {'cuda_version': '>=8.0.61', 'windows': '376.51', 'linux': '375.26'},
+ {'cuda_version': '>=8.0.44', 'windows': '369.30', 'linux': '367.48'},
+ {'cuda_version': '>=7.5.16', 'windows': '353.66', 'linux': '352.31'},
+ {'cuda_version': '>=7.0.28', 'windows': '347.62', 'linux': '346.46'},
+ ]
+
+ driver_version = 'unknown'
+ for d in driver_version_table:
+ if version_compare(cuda_version, d['cuda_version']):
+ driver_version = d.get(state.host_machine.system, d['linux'])
+ break
+
+ return ModuleReturnValue(driver_version, [driver_version])
+
+ @permittedKwargs(['detected'])
+ def nvcc_arch_flags(self, state, args, kwargs):
+ nvcc_arch_args = self._validate_nvcc_arch_args(state, args, kwargs)
+ ret = self._nvcc_arch_flags(*nvcc_arch_args)[0]
+ return ModuleReturnValue(ret, [ret])
+
+ @permittedKwargs(['detected'])
+ def nvcc_arch_readable(self, state, args, kwargs):
+ nvcc_arch_args = self._validate_nvcc_arch_args(state, args, kwargs)
+ ret = self._nvcc_arch_flags(*nvcc_arch_args)[1]
+ return ModuleReturnValue(ret, [ret])
+
+ @staticmethod
+ def _break_arch_string(s):
+ s = re.sub('[ \t,;]+', ';', s)
+ s = s.strip(';').split(';')
+ return s
+
+ @staticmethod
+ def _version_from_compiler(c):
+ if isinstance(c, CompilerHolder):
+ c = c.compiler
+ if isinstance(c, CudaCompiler):
+ return c.version
+ if isinstance(c, str):
+ return c
+ return 'unknown'
+
+ def _validate_nvcc_arch_args(self, state, args, kwargs):
+ argerror = InvalidArguments('The first argument must be an NVCC compiler object, or its version string!')
+
+ if len(args) < 1:
+ raise argerror
+ else:
+ cuda_version = self._version_from_compiler(args[0])
+ if cuda_version == 'unknown':
+ raise argerror
+
+ arch_list = [] if len(args) <= 1 else flatten(args[1:])
+ arch_list = [self._break_arch_string(a) for a in arch_list]
+ arch_list = flatten(arch_list)
+ if len(arch_list) > 1 and not set(arch_list).isdisjoint({'All', 'Common', 'Auto'}):
+ raise InvalidArguments('''The special architectures 'All', 'Common' and 'Auto' must appear alone, as a positional argument!''')
+ arch_list = arch_list[0] if len(arch_list) == 1 else arch_list
+
+ detected = flatten([kwargs.get('detected', [])])
+ detected = [self._break_arch_string(a) for a in detected]
+ detected = flatten(detected)
+ if not set(detected).isdisjoint({'All', 'Common', 'Auto'}):
+ raise InvalidArguments('''The special architectures 'All', 'Common' and 'Auto' must appear alone, as a positional argument!''')
+
+ return cuda_version, arch_list, detected
+
+ def _nvcc_arch_flags(self, cuda_version, cuda_arch_list='Auto', detected=''):
+ """
+ Using the CUDA Toolkit version (the NVCC version) and the target
+ architectures, compute the NVCC architecture flags.
+ """
+
+ cuda_known_gpu_architectures = ['Fermi', 'Kepler', 'Maxwell'] # noqa: E221
+ cuda_common_gpu_architectures = ['3.0', '3.5', '5.0'] # noqa: E221
+ cuda_limit_gpu_architecture = None # noqa: E221
+ cuda_all_gpu_architectures = ['3.0', '3.2', '3.5', '5.0'] # noqa: E221
+
+ if version_compare(cuda_version, '<7.0'):
+ cuda_limit_gpu_architecture = '5.2'
+
+ if version_compare(cuda_version, '>=7.0'):
+ cuda_known_gpu_architectures += ['Kepler+Tegra', 'Kepler+Tesla', 'Maxwell+Tegra'] # noqa: E221
+ cuda_common_gpu_architectures += ['5.2'] # noqa: E221
+
+ if version_compare(cuda_version, '<8.0'):
+ cuda_common_gpu_architectures += ['5.2+PTX'] # noqa: E221
+ cuda_limit_gpu_architecture = '6.0' # noqa: E221
+
+ if version_compare(cuda_version, '>=8.0'):
+ cuda_known_gpu_architectures += ['Pascal', 'Pascal+Tegra'] # noqa: E221
+ cuda_common_gpu_architectures += ['6.0', '6.1'] # noqa: E221
+ cuda_all_gpu_architectures += ['6.0', '6.1', '6.2'] # noqa: E221
+
+ if version_compare(cuda_version, '<9.0'):
+ cuda_common_gpu_architectures += ['6.1+PTX'] # noqa: E221
+ cuda_limit_gpu_architecture = '7.0' # noqa: E221
+
+ if version_compare(cuda_version, '>=9.0'):
+ cuda_known_gpu_architectures += ['Volta', 'Volta+Tegra'] # noqa: E221
+ cuda_common_gpu_architectures += ['7.0', '7.0+PTX'] # noqa: E221
+ cuda_all_gpu_architectures += ['7.0', '7.0+PTX', '7.2', '7.2+PTX'] # noqa: E221
+
+ if version_compare(cuda_version, '<10.0'):
+ cuda_limit_gpu_architecture = '7.5'
+
+ if version_compare(cuda_version, '>=10.0'):
+ cuda_known_gpu_architectures += ['Turing'] # noqa: E221
+ cuda_common_gpu_architectures += ['7.5', '7.5+PTX'] # noqa: E221
+ cuda_all_gpu_architectures += ['7.5', '7.5+PTX'] # noqa: E221
+
+ if version_compare(cuda_version, '<11.0'):
+ cuda_limit_gpu_architecture = '8.0'
+
+ if not cuda_arch_list:
+ cuda_arch_list = 'Auto'
+
+ if cuda_arch_list == 'All': # noqa: E271
+ cuda_arch_list = cuda_known_gpu_architectures
+ elif cuda_arch_list == 'Common': # noqa: E271
+ cuda_arch_list = cuda_common_gpu_architectures
+ elif cuda_arch_list == 'Auto': # noqa: E271
+ if detected:
+ if isinstance(detected, list):
+ cuda_arch_list = detected
+ else:
+ cuda_arch_list = self._break_arch_string(detected)
+
+ if cuda_limit_gpu_architecture:
+ filtered_cuda_arch_list = []
+ for arch in cuda_arch_list:
+ if arch:
+ if version_compare(arch, '>=' + cuda_limit_gpu_architecture):
+ arch = cuda_common_gpu_architectures[-1]
+ if arch not in filtered_cuda_arch_list:
+ filtered_cuda_arch_list.append(arch)
+ cuda_arch_list = filtered_cuda_arch_list
+ else:
+ cuda_arch_list = cuda_common_gpu_architectures
+ elif isinstance(cuda_arch_list, str):
+ cuda_arch_list = self._break_arch_string(cuda_arch_list)
+
+ cuda_arch_list = sorted([x for x in set(cuda_arch_list) if x])
+
+ cuda_arch_bin = []
+ cuda_arch_ptx = []
+ for arch_name in cuda_arch_list:
+ arch_bin = []
+ arch_ptx = []
+ add_ptx = arch_name.endswith('+PTX')
+ if add_ptx:
+ arch_name = arch_name[:-len('+PTX')]
+
+ if re.fullmatch('[0-9]+\\.[0-9](\\([0-9]+\\.[0-9]\\))?', arch_name):
+ arch_bin, arch_ptx = [arch_name], [arch_name]
+ else:
+ arch_bin, arch_ptx = {
+ 'Fermi': (['2.0', '2.1(2.0)'], []),
+ 'Kepler+Tegra': (['3.2'], []),
+ 'Kepler+Tesla': (['3.7'], []),
+ 'Kepler': (['3.0', '3.5'], ['3.5']),
+ 'Maxwell+Tegra': (['5.3'], []),
+ 'Maxwell': (['5.0', '5.2'], ['5.2']),
+ 'Pascal': (['6.0', '6.1'], ['6.1']),
+ 'Pascal+Tegra': (['6.2'], []),
+ 'Volta': (['7.0'], ['7.0']),
+ 'Volta+Tegra': (['7.2'], []),
+ 'Turing': (['7.5'], ['7.5']),
+ }.get(arch_name, (None, None))
+
+ if arch_bin is None:
+ raise InvalidArguments('Unknown CUDA Architecture Name {}!'
+ .format(arch_name))
+
+ cuda_arch_bin += arch_bin
+
+ if add_ptx:
+ if not arch_ptx:
+ arch_ptx = arch_bin
+ cuda_arch_ptx += arch_ptx
+
+ cuda_arch_bin = re.sub('\\.', '', ' '.join(cuda_arch_bin))
+ cuda_arch_ptx = re.sub('\\.', '', ' '.join(cuda_arch_ptx))
+ cuda_arch_bin = re.findall('[0-9()]+', cuda_arch_bin)
+ cuda_arch_ptx = re.findall('[0-9]+', cuda_arch_ptx)
+ cuda_arch_bin = sorted(list(set(cuda_arch_bin)))
+ cuda_arch_ptx = sorted(list(set(cuda_arch_ptx)))
+
+ nvcc_flags = []
+ nvcc_archs_readable = []
+
+ for arch in cuda_arch_bin:
+ m = re.match('([0-9]+)\\(([0-9]+)\\)', arch)
+ if m:
+ nvcc_flags += ['-gencode', 'arch=compute_' + m[2] + ',code=sm_' + m[1]]
+ nvcc_archs_readable += ['sm_' + m[1]]
+ else:
+ nvcc_flags += ['-gencode', 'arch=compute_' + arch + ',code=sm_' + arch]
+ nvcc_archs_readable += ['sm_' + arch]
+
+ for arch in cuda_arch_ptx:
+ nvcc_flags += ['-gencode', 'arch=compute_' + arch + ',code=compute_' + arch]
+ nvcc_archs_readable += ['compute_' + arch]
+
+ return nvcc_flags, nvcc_archs_readable
+
+def initialize(*args, **kwargs):
+ return CudaModule(*args, **kwargs)
diff --git a/run_tests.py b/run_tests.py
index 20cb4e2..d72546b 100755
--- a/run_tests.py
+++ b/run_tests.py
@@ -80,7 +80,7 @@ def get_fake_env(sdir='', bdir=None, prefix='', opts=None):
if opts is None:
opts = get_fake_options(prefix)
env = Environment(sdir, bdir, opts)
- env.coredata.compiler_options['c_args'] = FakeCompilerOptions()
+ env.coredata.compiler_options.host['c_args'] = FakeCompilerOptions()
env.machines.host.cpu_family = 'x86_64' # Used on macOS inside find_library
return env
diff --git a/run_unittests.py b/run_unittests.py
index bc827bc..a244bbd 100755
--- a/run_unittests.py
+++ b/run_unittests.py
@@ -798,7 +798,7 @@ class InternalTests(unittest.TestCase):
env = get_fake_env()
compiler = env.detect_c_compiler(False)
env.coredata.compilers = {'c': compiler}
- env.coredata.compiler_options['c_link_args'] = FakeCompilerOptions()
+ env.coredata.compiler_options.host['c_link_args'] = FakeCompilerOptions()
p1 = Path(tmpdir) / '1'
p2 = Path(tmpdir) / '2'
p1.mkdir()
@@ -2937,10 +2937,10 @@ recommended as it is not supported on some platforms''')
# c_args value should be parsed with shlex
self.init(testdir, extra_args=['-Dc_args=foo bar "one two"'])
obj = mesonbuild.coredata.load(self.builddir)
- self.assertEqual(obj.compiler_options['c_args'].value, ['foo', 'bar', 'one two'])
+ self.assertEqual(obj.compiler_options.host['c_args'].value, ['foo', 'bar', 'one two'])
self.setconf('-Dc_args="foo bar" one two')
obj = mesonbuild.coredata.load(self.builddir)
- self.assertEqual(obj.compiler_options['c_args'].value, ['foo bar', 'one', 'two'])
+ self.assertEqual(obj.compiler_options.host['c_args'].value, ['foo bar', 'one', 'two'])
self.wipe()
# Setting a 2nd time the same option should override the first value
@@ -2953,7 +2953,7 @@ recommended as it is not supported on some platforms''')
self.assertEqual(obj.builtins['bindir'].value, 'bar')
self.assertEqual(obj.builtins['buildtype'].value, 'release')
self.assertEqual(obj.base_options['b_sanitize'].value, 'thread')
- self.assertEqual(obj.compiler_options['c_args'].value, ['bar'])
+ self.assertEqual(obj.compiler_options.host['c_args'].value, ['bar'])
self.setconf(['--bindir=bar', '--bindir=foo',
'-Dbuildtype=release', '-Dbuildtype=plain',
'-Db_sanitize=thread', '-Db_sanitize=address',
@@ -2962,7 +2962,7 @@ recommended as it is not supported on some platforms''')
self.assertEqual(obj.builtins['bindir'].value, 'foo')
self.assertEqual(obj.builtins['buildtype'].value, 'plain')
self.assertEqual(obj.base_options['b_sanitize'].value, 'address')
- self.assertEqual(obj.compiler_options['c_args'].value, ['foo'])
+ self.assertEqual(obj.compiler_options.host['c_args'].value, ['foo'])
self.wipe()
except KeyError:
# Ignore KeyError, it happens on CI for compilers that does not
@@ -3351,7 +3351,7 @@ recommended as it is not supported on some platforms''')
# Check buildsystem_files
bs_files = ['meson.build', 'sharedlib/meson.build', 'staticlib/meson.build']
bs_files = [os.path.join(testdir, x) for x in bs_files]
- self.assertPathListEqual(res['buildsystem_files'], bs_files)
+ self.assertPathListEqual(list(sorted(res['buildsystem_files'])), list(sorted(bs_files)))
# Check dependencies
dependencies_to_find = ['threads']
diff --git a/test cases/cuda/3 cudamodule/meson.build b/test cases/cuda/3 cudamodule/meson.build
new file mode 100644
index 0000000..0dc9489
--- /dev/null
+++ b/test cases/cuda/3 cudamodule/meson.build
@@ -0,0 +1,16 @@
+project('cudamodule', 'cuda', version : '1.0.0')
+
+nvcc = meson.get_compiler('cuda')
+cuda = import('unstable-cuda')
+
+arch_flags = cuda.nvcc_arch_flags(nvcc, 'Auto', detected: ['3.0'])
+arch_readable = cuda.nvcc_arch_readable(nvcc, 'Auto', detected: ['3.0'])
+driver_version = cuda.min_driver_version(nvcc)
+
+message('NVCC version: ' + nvcc.version())
+message('NVCC flags: ' + ' '.join(arch_flags))
+message('NVCC readable: ' + ' '.join(arch_readable))
+message('Driver version: >=' + driver_version)
+
+exe = executable('prog', 'prog.cu', cuda_args: arch_flags)
+test('cudatest', exe)
diff --git a/test cases/cuda/3 cudamodule/prog.cu b/test cases/cuda/3 cudamodule/prog.cu
new file mode 100644
index 0000000..7eab673
--- /dev/null
+++ b/test cases/cuda/3 cudamodule/prog.cu
@@ -0,0 +1,30 @@
+#include <iostream>
+
+int main(int argc, char **argv) {
+ int cuda_devices = 0;
+ std::cout << "CUDA version: " << CUDART_VERSION << "\n";
+ cudaGetDeviceCount(&cuda_devices);
+ if(cuda_devices == 0) {
+ std::cout << "No Cuda hardware found. Exiting.\n";
+ return 0;
+ }
+ std::cout << "This computer has " << cuda_devices << " Cuda device(s).\n";
+ cudaDeviceProp props;
+ cudaGetDeviceProperties(&props, 0);
+ std::cout << "Properties of device 0.\n\n";
+
+ std::cout << " Name: " << props.name << "\n";
+ std::cout << " Global memory: " << props.totalGlobalMem << "\n";
+ std::cout << " Shared memory: " << props.sharedMemPerBlock << "\n";
+ std::cout << " Constant memory: " << props.totalConstMem << "\n";
+ std::cout << " Block registers: " << props.regsPerBlock << "\n";
+
+ std::cout << " Warp size: " << props.warpSize << "\n";
+ std::cout << " Threads per block: " << props.maxThreadsPerBlock << "\n";
+ std::cout << " Max block dimensions: [ " << props.maxThreadsDim[0] << ", " << props.maxThreadsDim[1] << ", " << props.maxThreadsDim[2] << " ]" << "\n";
+ std::cout << " Max grid dimensions: [ " << props.maxGridSize[0] << ", " << props.maxGridSize[1] << ", " << props.maxGridSize[2] << " ]" << "\n";
+ std::cout << "\n";
+
+ return 0;
+}
+
diff --git a/test cases/fortran/12 submodule/a1.f90 b/test cases/fortran/12 submodule/a1.f90
new file mode 100644
index 0000000..cb44916
--- /dev/null
+++ b/test cases/fortran/12 submodule/a1.f90
@@ -0,0 +1,25 @@
+module a1
+implicit none
+
+interface
+module elemental real function pi2tau(pi)
+ real, intent(in) :: pi
+end function pi2tau
+
+module real function get_pi()
+end function get_pi
+end interface
+
+end module a1
+
+program hierN
+
+use a1
+
+pi = get_pi()
+
+tau = pi2tau(pi)
+
+print *,'pi=',pi,'tau=',tau
+
+end program
diff --git a/test cases/fortran/12 submodule/a2.f90 b/test cases/fortran/12 submodule/a2.f90
new file mode 100644
index 0000000..b3ce1f0
--- /dev/null
+++ b/test cases/fortran/12 submodule/a2.f90
@@ -0,0 +1,10 @@
+submodule (a1) a2
+
+contains
+
+module procedure pi2tau
+ pi2tau = 2*pi
+end procedure pi2tau
+
+
+end submodule a2
diff --git a/test cases/fortran/12 submodule/a3.f90 b/test cases/fortran/12 submodule/a3.f90
new file mode 100644
index 0000000..d6929b0
--- /dev/null
+++ b/test cases/fortran/12 submodule/a3.f90
@@ -0,0 +1,10 @@
+submodule (a1:a2) a3
+
+contains
+
+module procedure get_pi
+ get_pi = 4.*atan(1.)
+end procedure get_pi
+
+
+end submodule a3
diff --git a/test cases/fortran/12 submodule/child.f90 b/test cases/fortran/12 submodule/child.f90
new file mode 100644
index 0000000..aa5bb5e
--- /dev/null
+++ b/test cases/fortran/12 submodule/child.f90
@@ -0,0 +1,10 @@
+submodule (mother) daughter
+
+contains
+
+module procedure pi2tau
+ pi2tau = 2*pi
+end procedure pi2tau
+
+end submodule daughter
+
diff --git a/test cases/fortran/12 submodule/meson.build b/test cases/fortran/12 submodule/meson.build
new file mode 100644
index 0000000..cd62a30
--- /dev/null
+++ b/test cases/fortran/12 submodule/meson.build
@@ -0,0 +1,7 @@
+project('submodule single level', 'fortran')
+
+hier2 = executable('single', 'parent.f90','child.f90')
+test('single-level hierarchy', hier2)
+
+hierN = executable('multi', 'a1.f90', 'a2.f90', 'a3.f90')
+test('multi-level hierarchy', hierN)
diff --git a/test cases/fortran/12 submodule/parent.f90 b/test cases/fortran/12 submodule/parent.f90
new file mode 100644
index 0000000..05fe431
--- /dev/null
+++ b/test cases/fortran/12 submodule/parent.f90
@@ -0,0 +1,23 @@
+module mother
+real, parameter :: pi = 4.*atan(1.)
+real :: tau
+
+interface
+module elemental real function pi2tau(pi)
+ real, intent(in) :: pi
+end function pi2tau
+end interface
+
+contains
+
+end module mother
+
+
+program hier1
+use mother
+
+tau = pi2tau(pi)
+
+print *,'pi=',pi, 'tau=', tau
+
+end program