Skip to content

Commit

Permalink
Move Clang-Tidy and sanitizers to Clang 9 in Ubuntu 20.04 (#3684)
Browse files Browse the repository at this point in the history
Fixes #3627, fixes #3650, closes #3636

Description of changes:
- run Clang-Tidy, ASAN, UBSAN in the Ubuntu 20.04 image
- re-enable Clang-Tidy on CUDA code
- fix Clang-Tidy 9 warnings
- replace `boost::optional` by custom code in CUDA sources
- remove unused code
  • Loading branch information
kodiakhq[bot] committed Apr 25, 2020
2 parents 7f48b4c + f4195c5 commit ed01bed
Show file tree
Hide file tree
Showing 80 changed files with 1,347 additions and 1,289 deletions.
4 changes: 2 additions & 2 deletions .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@ Checks: |
-clang-analyzer-optin.mpi.MPI-Checker,
-clang-analyzer-security.FloatLoopCounter,
bugprone-*,
-bugprone-macro-parentheses,
clang-analyzer-alpha.*,
modernize-avoid-c-arrays,
modernize-deprecated-headers,
modernize-make-shared,
modernize-make-unique,
Expand Down Expand Up @@ -36,7 +36,7 @@ Checks: |
readability-string-compare,
readability-uniqueptr-delete-release,
readability-function-size'
WarningsAsErrors: '*'
WarningsAsErrors: '*,-clang-analyzer-core.StackAddrEscapeBase,-clang-analyzer-optin.mpi.MPI-Checker'
HeaderFilterRegex: '.*'
AnalyzeTemporaryDtors: false
FormatStyle: none
Expand Down
46 changes: 27 additions & 19 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ status_pending:
style:
<<: *global_job_definition
stage: prepare
image: docker.pkg.github.com/espressomd/docker/clang:a1192b35590a1a474c55fe1e9a1e6c25758454ea
image: docker.pkg.github.com/espressomd/docker/ubuntu-20.04:b0de8c7df57d39edf72ad7d40c28bb67467dd7ec
dependencies: []
before_script:
- git submodule deinit .
Expand Down Expand Up @@ -102,6 +102,25 @@ no_rotation:
- docker
- linux

ubuntu-20.04-sanitizer:
<<: *global_job_definition
image: docker.pkg.github.com/espressomd/docker/ubuntu-20.04:b0de8c7df57d39edf72ad7d40c28bb67467dd7ec
stage: build
variables:
CC: 'clang-9'
CXX: 'clang++-9'
script:
- export myconfig=maxset with_cuda=true with_cuda_compiler=clang with_coverage=false
- export with_static_analysis=true test_timeout=900
- export with_asan=true ASAN_OPTIONS="allocator_may_return_null=1"
- export with_ubsan=true UBSAN_OPTIONS=suppressions=${CI_PROJECT_DIR}/maintainer/CI/ubsan.supp
- bash maintainer/CI/build_cmake.sh
timeout: 2h
tags:
- docker
- linux
- cuda

### Builds with different Distributions

debian:10:
Expand Down Expand Up @@ -285,9 +304,12 @@ installation:
empty:
<<: *global_job_definition
stage: build
image: docker.pkg.github.com/espressomd/docker/clang:a1192b35590a1a474c55fe1e9a1e6c25758454ea
image: docker.pkg.github.com/espressomd/docker/ubuntu-20.04:b0de8c7df57d39edf72ad7d40c28bb67467dd7ec
variables:
CC: 'clang-9'
CXX: 'clang++-9'
script:
- export myconfig=empty with_cuda=true cmake_params="-DWITH_CUDA_COMPILER=clang" with_static_analysis=true
- export myconfig=empty with_cuda=true with_cuda_compiler=clang with_static_analysis=true with_scafacos=false
- bash maintainer/CI/build_cmake.sh
tags:
- docker
Expand All @@ -312,7 +334,7 @@ rocm-maxset:
stage: build
image: docker.pkg.github.com/espressomd/docker/rocm:446ff604bbfa63f30ddb462697fa0d0dc2630460
script:
- export myconfig=maxset with_cuda=true cmake_params="-DWITH_CUDA_COMPILER=hip"
- export myconfig=maxset with_cuda=true with_cuda_compiler=hip
- bash maintainer/CI/build_cmake.sh
tags:
- amdgpu
Expand All @@ -322,7 +344,7 @@ rocm:latest:
stage: build
image: docker.pkg.github.com/espressomd/docker/rocm:latest_base
script:
- export myconfig=maxset with_cuda=true cmake_params="-DWITH_CUDA_COMPILER=hip"
- export myconfig=maxset with_cuda=true with_cuda_compiler=hip
- bash maintainer/CI/build_cmake.sh
tags:
- amdgpu
Expand All @@ -341,20 +363,6 @@ osx:

### Builds with different compilers

clang:6.0:
<<: *global_job_definition
stage: build
image: docker.pkg.github.com/espressomd/docker/clang:446ff604bbfa63f30ddb462697fa0d0dc2630460
script:
- export myconfig=maxset with_cuda=true cmake_params="-DWITH_CUDA_COMPILER=clang" with_coverage=false with_static_analysis=true with_asan=true with_ubsan=true test_timeout=900
- bash maintainer/CI/build_cmake.sh
timeout: 2h
tags:
- docker
- linux
- cuda
- ptrace

intel:19:
<<: *global_job_definition
stage: build
Expand Down
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -414,7 +414,7 @@ target_link_libraries(cxx_interface INTERFACE coverage_interface)

if(WITH_CLANG_TIDY)
find_package(ClangTidy "${CMAKE_CXX_COMPILER_VERSION}" EXACT REQUIRED)
set(CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_EXE}")
set(CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_EXE};--extra-arg=--cuda-host-only")
endif()

if(WITH_TESTS)
Expand Down
5 changes: 3 additions & 2 deletions maintainer/CI/build_cmake.sh
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ set_default_value make_check_tutorials false
set_default_value make_check_samples false
set_default_value make_check_benchmarks false
set_default_value with_cuda false
set_default_value with_cuda_compiler "nvcc"
set_default_value build_type "RelWithAssert"
set_default_value with_ccache false
set_default_value with_scafacos true
Expand Down Expand Up @@ -144,7 +145,7 @@ if [ "${with_static_analysis}" = true ]; then
fi

if [ "${with_cuda}" = true ]; then
cmake_params="-DWITH_CUDA=ON ${cmake_params}"
cmake_params="-DWITH_CUDA=ON -DWITH_CUDA_COMPILER=${with_cuda_compiler} ${cmake_params}"
else
cmake_params="-DWITH_CUDA=OFF ${cmake_params}"
fi
Expand All @@ -165,7 +166,7 @@ outp srcdir builddir \
check_odd_only \
with_static_analysis myconfig \
build_procs check_procs \
with_cuda with_ccache
with_cuda with_cuda_compiler with_ccache

echo "Creating ${builddir}..."
mkdir -p "${builddir}"
Expand Down
7 changes: 6 additions & 1 deletion maintainer/CI/fix_style.sh
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ if ! git diff-index --quiet HEAD -- && [ "${1}" != "-f" ]; then
fi

maintainer/lint/pre_commit.sh run --all-files
pre_commit_return_code="${?}"

if [ "${CI}" != "" ]; then
git --no-pager diff > style.patch
Expand All @@ -40,7 +41,11 @@ if [ "${?}" = 1 ]; then
fi
exit 1
else
echo "Passed style check"
if [ "${pre_commit_return_code}" = 0 ]; then
echo "Passed style check"
else
echo "Failed style check" >&2
fi
fi

maintainer/lint/pylint.sh --score=no --reports=no --output-format=text src doc maintainer testsuite samples | tee pylint.log
Expand Down
10 changes: 10 additions & 0 deletions maintainer/CI/ubsan.supp
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# Stuff occuring in boost directly
null:boost/serialization/singleton.hpp
null:boost/mpi/detail/binary_buffer_iprimitive.hpp
null:boost/mpi/communicator.hpp

# Caused by boost MPI
null:stl_iterator.h

# function calls with invalid pointers in Cython-generated code
function:python/espressomd/*.cpp
6 changes: 3 additions & 3 deletions maintainer/format/clang-format.sh
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,11 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.



CLANG_FORMAT_VER=6.0
CLANG_FORMAT_VER=9.0
if hash clang-format-${CLANG_FORMAT_VER} 2>/dev/null; then
CLANGFORMAT="$(which clang-format-${CLANG_FORMAT_VER})"
elif hash clang-format-${CLANG_FORMAT_VER%.*} 2>/dev/null; then
CLANGFORMAT="$(which clang-format-${CLANG_FORMAT_VER%.*})"
elif hash clang-format 2>/dev/null; then
CLANGFORMAT="$(which clang-format)"
else
Expand Down
2 changes: 2 additions & 0 deletions src/core/BondList.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,7 @@ class BondList {

return *this;
}
// NOLINTNEXTLINE(bugprone-exception-escape)
BondList &operator=(BondList &&rhs) noexcept {
if (this != std::addressof(rhs)) {
std::swap(m_storage, rhs.m_storage);
Expand Down Expand Up @@ -224,6 +225,7 @@ class BondList {
*/
bool empty() const { return m_storage.empty(); }

// NOLINTNEXTLINE(bugprone-exception-escape)
friend void swap(BondList &lhs, BondList &rhs) {
using std::swap;

Expand Down
47 changes: 24 additions & 23 deletions src/core/EspressoSystemInterface_cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
// Position and charge
__global__ void split_kernel_rq(CUDA_particle_data *particles, float *r,
float *q, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
auto idx = static_cast<int>(blockDim.x * blockIdx.x + threadIdx.x);
if (idx >= n)
return;

Expand All @@ -50,7 +50,7 @@ __global__ void split_kernel_rq(CUDA_particle_data *particles, float *r,

// Charge only
__global__ void split_kernel_q(CUDA_particle_data *particles, float *q, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
auto idx = static_cast<int>(blockDim.x * blockIdx.x + threadIdx.x);
if (idx >= n)
return;

Expand All @@ -63,7 +63,7 @@ __global__ void split_kernel_q(CUDA_particle_data *particles, float *q, int n) {

// Position only
__global__ void split_kernel_r(CUDA_particle_data *particles, float *r, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
auto idx = static_cast<int>(blockDim.x * blockIdx.x + threadIdx.x);
if (idx >= n)
return;

Expand All @@ -79,7 +79,7 @@ __global__ void split_kernel_r(CUDA_particle_data *particles, float *r, int n) {
#ifdef CUDA
// Velocity
__global__ void split_kernel_v(CUDA_particle_data *particles, float *v, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
auto idx = static_cast<int>(blockDim.x * blockIdx.x + threadIdx.x);
if (idx >= n)
return;

Expand All @@ -97,7 +97,7 @@ __global__ void split_kernel_v(CUDA_particle_data *particles, float *v, int n) {
// Dipole moment
__global__ void split_kernel_dip(CUDA_particle_data *particles, float *dip,
int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
auto idx = static_cast<int>(blockDim.x * blockIdx.x + threadIdx.x);
if (idx >= n)
return;

Expand All @@ -114,7 +114,7 @@ __global__ void split_kernel_dip(CUDA_particle_data *particles, float *dip,
__global__ void split_kernel_director(CUDA_particle_data *particles,
float *director, int n) {
#ifdef ROTATION
int idx = blockDim.x * blockIdx.x + threadIdx.x;
auto idx = static_cast<int>(blockDim.x * blockIdx.x + threadIdx.x);
if (idx >= n)
return;

Expand All @@ -129,37 +129,37 @@ __global__ void split_kernel_director(CUDA_particle_data *particles,
}

void EspressoSystemInterface::reallocDeviceMemory(int n) {
if (m_needsRGpu && ((n != m_gpu_npart) || (m_r_gpu_begin == 0))) {
if (m_r_gpu_begin != 0)
if (m_needsRGpu && ((n != m_gpu_npart) || (m_r_gpu_begin == nullptr))) {
if (m_r_gpu_begin != nullptr)
cuda_safe_mem(cudaFree(m_r_gpu_begin));
cuda_safe_mem(cudaMalloc(&m_r_gpu_begin, 3 * n * sizeof(float)));
m_r_gpu_end = m_r_gpu_begin + 3 * n;
}
#ifdef DIPOLES
if (m_needsDipGpu && ((n != m_gpu_npart) || (m_dip_gpu_begin == 0))) {
if (m_dip_gpu_begin != 0)
if (m_needsDipGpu && ((n != m_gpu_npart) || (m_dip_gpu_begin == nullptr))) {
if (m_dip_gpu_begin != nullptr)
cuda_safe_mem(cudaFree(m_dip_gpu_begin));
cuda_safe_mem(cudaMalloc(&m_dip_gpu_begin, 3 * n * sizeof(float)));
m_dip_gpu_end = m_dip_gpu_begin + 3 * n;
}
#endif
if (m_needsVGpu && ((n != m_gpu_npart) || (m_v_gpu_begin == 0))) {
if (m_v_gpu_begin != 0)
if (m_needsVGpu && ((n != m_gpu_npart) || (m_v_gpu_begin == nullptr))) {
if (m_v_gpu_begin != nullptr)
cuda_safe_mem(cudaFree(m_v_gpu_begin));
cuda_safe_mem(cudaMalloc(&m_v_gpu_begin, 3 * n * sizeof(float)));
m_v_gpu_end = m_v_gpu_begin + 3 * n;
}

if (m_needsQGpu && ((n != m_gpu_npart) || (m_q_gpu_begin == 0))) {
if (m_q_gpu_begin != 0)
if (m_needsQGpu && ((n != m_gpu_npart) || (m_q_gpu_begin == nullptr))) {
if (m_q_gpu_begin != nullptr)
cuda_safe_mem(cudaFree(m_q_gpu_begin));
cuda_safe_mem(cudaMalloc(&m_q_gpu_begin, 3 * n * sizeof(float)));
m_q_gpu_end = m_q_gpu_begin + 3 * n;
}

if (m_needsDirectorGpu &&
((n != m_gpu_npart) || (m_director_gpu_begin == 0))) {
if (m_director_gpu_begin != 0)
((n != m_gpu_npart) || (m_director_gpu_begin == nullptr))) {
if (m_director_gpu_begin != nullptr)
cuda_safe_mem(cudaFree(m_director_gpu_begin));
cuda_safe_mem(cudaMalloc(&m_director_gpu_begin, 3 * n * sizeof(float)));
m_director_gpu_end = m_director_gpu_begin + 3 * n;
Expand All @@ -178,28 +178,29 @@ void EspressoSystemInterface::split_particle_struct() {
dim3 block(512, 1, 1);

if (m_needsQGpu && m_needsRGpu)
hipLaunchKernelGGL(split_kernel_rq, dim3(grid), dim3(block), 0, 0,
hipLaunchKernelGGL(split_kernel_rq, dim3(grid), dim3(block), 0, nullptr,
device_particles.data(), m_r_gpu_begin, m_q_gpu_begin,
n);
if (m_needsQGpu && !m_needsRGpu)
hipLaunchKernelGGL(split_kernel_q, dim3(grid), dim3(block), 0, 0,
hipLaunchKernelGGL(split_kernel_q, dim3(grid), dim3(block), 0, nullptr,
device_particles.data(), m_q_gpu_begin, n);
if (!m_needsQGpu && m_needsRGpu)
hipLaunchKernelGGL(split_kernel_r, dim3(grid), dim3(block), 0, 0,
hipLaunchKernelGGL(split_kernel_r, dim3(grid), dim3(block), 0, nullptr,
device_particles.data(), m_r_gpu_begin, n);
#ifdef CUDA
if (m_needsVGpu)
hipLaunchKernelGGL(split_kernel_v, dim3(grid), dim3(block), 0, 0,
hipLaunchKernelGGL(split_kernel_v, dim3(grid), dim3(block), 0, nullptr,
device_particles.data(), m_v_gpu_begin, n);
#endif
#ifdef DIPOLES
if (m_needsDipGpu)
hipLaunchKernelGGL(split_kernel_dip, dim3(grid), dim3(block), 0, 0,
hipLaunchKernelGGL(split_kernel_dip, dim3(grid), dim3(block), 0, nullptr,
device_particles.data(), m_dip_gpu_begin, n);

#endif

if (m_needsDirectorGpu)
hipLaunchKernelGGL(split_kernel_director, dim3(grid), dim3(block), 0, 0,
device_particles.data(), m_director_gpu_begin, n);
hipLaunchKernelGGL(split_kernel_director, dim3(grid), dim3(block), 0,
nullptr, device_particles.data(), m_director_gpu_begin,
n);
}
2 changes: 1 addition & 1 deletion src/core/Particle.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ struct ParticleLocal {
};

/** Struct holding all information for one particle. */
struct Particle {
struct Particle { // NOLINT(bugprone-exception-escape)
int &identity() { return p.identity; }
int const &identity() const { return p.identity; }

Expand Down
Loading

0 comments on commit ed01bed

Please sign in to comment.