From 8ec13fb4c18ea20cd6b73afa91fa030ecc9950f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-No=C3=ABl=20Grad?= Date: Thu, 22 Aug 2024 20:32:23 +0200 Subject: [PATCH 1/7] Add missing feature guards --- testsuite/python/hybrid_decomposition.py | 5 +++-- testsuite/python/lb.py | 1 + testsuite/python/lb_thermostat.py | 5 +++-- testsuite/python/lees_edwards.py | 23 +++++++---------------- testsuite/python/test_checkpoint.py | 2 +- 5 files changed, 15 insertions(+), 21 deletions(-) diff --git a/testsuite/python/hybrid_decomposition.py b/testsuite/python/hybrid_decomposition.py index fa6828ebd9..8dc8e720ec 100644 --- a/testsuite/python/hybrid_decomposition.py +++ b/testsuite/python/hybrid_decomposition.py @@ -97,6 +97,7 @@ def prepare_hybrid_setup(self, n_part_small=0, n_part_large=0): initial velocities; cutoff of small particles is 2.5. """ + assert espressomd.has_features(["LENNARD_JONES"]) box_l = self.system.box_l[0] if n_part_small > 0: self.system.part.add( @@ -249,7 +250,7 @@ def add_particles(self, charge=False, dip=False): @ut.skipIf(system.cell_system.get_state()["n_nodes"] != 1, "Skipping test: only runs for n_nodes == 1") - @utx.skipIfMissingFeatures(["P3M"]) + @utx.skipIfMissingFeatures(["P3M", "LENNARD_JONES"]) def test_against_regular_p3m(self): import espressomd.electrostatics @@ -264,7 +265,7 @@ def test_against_regular_p3m(self): @ut.skipIf(system.cell_system.get_state()["n_nodes"] != 1, "Skipping test: only runs for n_nodes == 1") - @utx.skipIfMissingFeatures(["DP3M"]) + @utx.skipIfMissingFeatures(["DP3M", "LENNARD_JONES"]) def test_against_regular_dp3m(self): import espressomd.magnetostatics diff --git a/testsuite/python/lb.py b/testsuite/python/lb.py index 86daa7fe51..17594a8e4f 100644 --- a/testsuite/python/lb.py +++ b/testsuite/python/lb.py @@ -663,6 +663,7 @@ def test_viscous_coupling_rounding(self): self.system.integrator.run(1) self.assertTrue(np.all(p.f != 0.0)) + @utx.skipIfMissingFeatures("VIRTUAL_SITES_INERTIALESS_TRACERS") def test_tracers_coupling_rounding(self): import espressomd.propagation lbf = self.lb_class(**self.params, **self.lb_params) diff --git a/testsuite/python/lb_thermostat.py b/testsuite/python/lb_thermostat.py index c67709dd98..77677bb96e 100644 --- a/testsuite/python/lb_thermostat.py +++ b/testsuite/python/lb_thermostat.py @@ -111,7 +111,7 @@ def check_partcl_temp(self, partcl_vel): partcl_vel.reshape((-1, 3)), vel_range, n_bins, 0.02, KT, mass=self.partcl_mass) - @utx.skipIfMissingFeatures(["MASS"]) + @utx.skipIfMissingFeatures(["MASS", "THERMOSTAT_PER_PARTICLE"]) def test_temperature_with_particles(self): n_partcls_per_type = 50 partcls_global_gamma = self.system.part.add( @@ -140,7 +140,8 @@ def test_temperature_with_particles(self): np.testing.assert_allclose(fluid_kT, KT, rtol=0.03) - @utx.skipIfMissingFeatures(["EXTERNAL_FORCES"]) + @utx.skipIfMissingFeatures(["EXTERNAL_FORCES", "THERMOSTAT_PER_PARTICLE", + "MASS"]) def test_friction(self): """apply force and measure if the average velocity matches expectation""" diff --git a/testsuite/python/lees_edwards.py b/testsuite/python/lees_edwards.py index 4ebdb2dafb..5c6fc10400 100644 --- a/testsuite/python/lees_edwards.py +++ b/testsuite/python/lees_edwards.py @@ -75,6 +75,7 @@ def setUp(self): def tearDown(self): system = self.system system.part.clear() + system.non_bonded_inter.reset() system.bonded_inter.clear() system.lees_edwards.protocol = None if espressomd.has_features("COLLISION_DETECTION"): @@ -745,44 +746,33 @@ def test_le_breaking_bonds(self): bond_list += p.bonds np.testing.assert_array_equal(len(bond_list), 0) - const_offset_params = { - 'shear_velocity': 0.0, - 'shear_direction': 0, - 'shear_plane_normal': 1, - 'initial_pos_offset': 17.2} - - shear_params = { - 'shear_velocity': 0.1, - 'shear_direction': 0, - 'shear_plane_normal': 2, - 'initial_pos_offset': -np.sqrt(0.1)} - - @utx.skipIfMissingFeatures("LENNARD_JONES") def run_lj_pair_visibility(self, shear_direction, shear_plane_normal): """ Simulate LJ particles coming into contact under linear shear and verify forces. This is to make sure that no pairs get lost or are outdated in the short range loop. """ + assert espressomd.has_features(["LENNARD_JONES"]) shear_axis, normal_axis = axis( shear_direction), axis(shear_plane_normal) system = self.system system.part.clear() system.time = 0 system.time_step = 0.1 + cutoff = 1.5 protocol = espressomd.lees_edwards.LinearShear( shear_velocity=3, initial_pos_offset=5) system.lees_edwards.set_boundary_conditions( shear_direction=shear_direction, shear_plane_normal=shear_plane_normal, protocol=protocol) system.cell_system.skin = 0.2 system.non_bonded_inter[0, 0].lennard_jones.set_params( - epsilon=1E-6, sigma=1, cutoff=1.2, shift="auto") + epsilon=1E-6, sigma=1, cutoff=cutoff, shift="auto") system.part.add( pos=(0.1 * normal_axis, -0.8 * normal_axis), v=(1.0 * shear_axis, -0.3 * shear_axis)) assert np.all(system.part.all().f == 0.) tests_common.check_non_bonded_loop_trace( - self, system, cutoff=system.non_bonded_inter[0, 0].lennard_jones.get_params()["cutoff"] + system.cell_system.skin) + self, system, cutoff=cutoff + system.cell_system.skin) # Rewind the clock to get back the LE offset applied during force calc system.time = system.time - system.time_step @@ -793,11 +783,12 @@ def run_lj_pair_visibility(self, shear_direction, shear_plane_normal): if np.any(np.abs(system.part.all().f) > 0): have_interacted = True tests_common.check_non_bonded_loop_trace( - self, system, cutoff=system.non_bonded_inter[0, 0].lennard_jones.get_params()["cutoff"] + system.cell_system.skin) + self, system, cutoff=cutoff + system.cell_system.skin) system.time = system.time - system.time_step tests_common.verify_lj_forces(system, 1E-7) assert have_interacted + @utx.skipIfMissingFeatures(["LENNARD_JONES"]) def test_zz_lj_pair_visibility(self): # check that regular decomposition without fully connected doesn't # catch the particle diff --git a/testsuite/python/test_checkpoint.py b/testsuite/python/test_checkpoint.py index 6d096aca7f..45c14b47b4 100644 --- a/testsuite/python/test_checkpoint.py +++ b/testsuite/python/test_checkpoint.py @@ -55,7 +55,7 @@ and ('LB.CPU' in modes or 'LB.GPU' in modes and is_gpu_available)) has_p3m_mode = 'P3M.CPU' in modes or 'P3M.GPU' in modes and is_gpu_available has_thermalized_bonds = 'THERM.LB' in modes or 'THERM.LANGEVIN' in modes -has_drude = (espressomd.has_features(['ELECTROSTATICS' and 'MASS', 'ROTATION']) +has_drude = (espressomd.has_features(['ELECTROSTATICS', 'MASS', 'ROTATION']) and has_thermalized_bonds) has_ase = 'ASE' in modes From e0d501584abf2672f15fe6a15941dfa2d0b0acae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-No=C3=ABl=20Grad?= Date: Thu, 22 Aug 2024 20:44:02 +0200 Subject: [PATCH 2/7] Fix Doxygen pipeline Use standard CMake functionality instead of GNU sed extensions. Fix Doxygen warnings about undocumented parameters. --- doc/doxygen/CMakeLists.txt | 14 ++++++------- doc/doxygen/bibtex_postprocess.cmake | 26 +++++++++++++++++++++++ doc/doxygen/bibtex_preprocess.cmake | 29 ++++++++++++++++++++++++++ src/core/cell_system/CellStructure.hpp | 1 + 4 files changed, 62 insertions(+), 8 deletions(-) create mode 100644 doc/doxygen/bibtex_postprocess.cmake create mode 100644 doc/doxygen/bibtex_preprocess.cmake diff --git a/doc/doxygen/CMakeLists.txt b/doc/doxygen/CMakeLists.txt index 329ca02610..062902b6a4 100644 --- a/doc/doxygen/CMakeLists.txt +++ b/doc/doxygen/CMakeLists.txt @@ -1,5 +1,5 @@ # -# Copyright (C) 2015-2022 The ESPResSo project +# Copyright (C) 2015-2024 The ESPResSo project # # This file is part of ESPResSo. # @@ -28,25 +28,23 @@ if(DOXYGEN_FOUND) set(DOXYGEN_BIB_IN ${CMAKE_SOURCE_DIR}/doc/bibliography.bib) set(DOXYGEN_BIB_OUT ${CMAKE_CURRENT_BINARY_DIR}/bibliography.bib) - # transform BibTeX DOI fields into URL fields (bibliographic styles available - # to Doxygen do not process the DOI field) add_custom_command( OUTPUT ${DOXYGEN_BIB_OUT} COMMAND - sed -r "'s_^ *doi *= *([^0-9]+)(10\\.[0-9]+)_url=\\1https://doi.org/\\2_'" - ${DOXYGEN_BIB_IN} > ${DOXYGEN_BIB_OUT} + ${CMAKE_COMMAND} -DINPUT=${DOXYGEN_BIB_IN} -DOUTPUT=${DOXYGEN_BIB_OUT} -P + ${CMAKE_CURRENT_SOURCE_DIR}/bibtex_preprocess.cmake DEPENDS ${DOXYGEN_BIB_IN}) set(DOXYFILE_IN ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in) set(DOXYFILE_OUT ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile) - set(CITELIST ${CMAKE_CURRENT_BINARY_DIR}/html/citelist.html) + set(DOXYGEN_CITELIST ${CMAKE_CURRENT_BINARY_DIR}/html/citelist.html) configure_file(${DOXYFILE_IN} ${DOXYFILE_OUT} @ONLY) add_custom_target( doxygen COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYFILE_OUT} - COMMAND sed -ri "s/ textsuperscript (\\S+) /\\1<\\/sup> /" ${CITELIST} - COMMAND sed -ri "s/ textsubscript (\\S+) /\\1<\\/sub> /" ${CITELIST} + COMMAND ${CMAKE_COMMAND} -DBIBLIOGRAPHY=${DOXYGEN_CITELIST} -P + ${CMAKE_CURRENT_SOURCE_DIR}/bibtex_postprocess.cmake WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} COMMENT "Generating API documentation with Doxygen" DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/doxy-features ${DOXYGEN_BIB_OUT} diff --git a/doc/doxygen/bibtex_postprocess.cmake b/doc/doxygen/bibtex_postprocess.cmake new file mode 100644 index 0000000000..17bd5166ab --- /dev/null +++ b/doc/doxygen/bibtex_postprocess.cmake @@ -0,0 +1,26 @@ +# +# Copyright (C) 2023-2024 The ESPResSo project +# +# This file is part of ESPResSo. +# +# ESPResSo is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ESPResSo is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +if(EXISTS "${BIBLIOGRAPHY}") + file(READ "${BIBLIOGRAPHY}" FILE_CONTENTS) + # convert unsupported LaTeX formatting commands to HTML + string(REGEX REPLACE " textsuperscript ([^ ]+) " "\\1 " FILE_CONTENTS "${FILE_CONTENTS}") + string(REGEX REPLACE " textsubscript ([^ ]+) " "\\1 " FILE_CONTENTS "${FILE_CONTENTS}") + file(WRITE "${BIBLIOGRAPHY}" "${FILE_CONTENTS}") +endif() diff --git a/doc/doxygen/bibtex_preprocess.cmake b/doc/doxygen/bibtex_preprocess.cmake new file mode 100644 index 0000000000..a8e53368dd --- /dev/null +++ b/doc/doxygen/bibtex_preprocess.cmake @@ -0,0 +1,29 @@ +# +# Copyright (C) 2019-2024 The ESPResSo project +# +# This file is part of ESPResSo. +# +# ESPResSo is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ESPResSo is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +file(READ "${INPUT}" FILE_CONTENTS) + +# transform BibTeX DOI fields into URL fields (bibliographic styles available +# to Doxygen do not process the DOI field) +string(REGEX REPLACE + "([\r\n])[\t ]*doi *= *([\\{\"]+)(10\\.[0-9]+)" + "\\1url=\\2https://doi.org/\\3" + FILE_CONTENTS "${FILE_CONTENTS}") + +file(WRITE "${OUTPUT}" "${FILE_CONTENTS}") diff --git a/src/core/cell_system/CellStructure.hpp b/src/core/cell_system/CellStructure.hpp index 9993fffc3d..47fb58c875 100644 --- a/src/core/cell_system/CellStructure.hpp +++ b/src/core/cell_system/CellStructure.hpp @@ -557,6 +557,7 @@ struct CellStructure : public System::Leaf { * @brief Set the particle decomposition to @ref RegularDecomposition. * * @param range Interaction range. + * @param fully_connected_boundary neighbor cell directions for Lees-Edwards. */ void set_regular_decomposition( double range, From 1ef5b120bf57c32100f5198b53664655160f29d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-No=C3=ABl=20Grad?= Date: Fri, 23 Aug 2024 07:54:27 +0200 Subject: [PATCH 3/7] Fix CI timeout issues Clang with UBSAN takes 3 to 10 min to compile each Ekin kernel. GCC builds with maxset can last for more than 1 hour due to wait times when downloading Docker images, assembling the code coverage report, or uploading the job artifacts. --- .gitlab-ci.yml | 2 ++ .../src/electrokinetics/generated_kernels/CMakeLists.txt | 7 +++++++ 2 files changed, 9 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index ea94df3ffd..b4c3f70734 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -223,6 +223,7 @@ cuda12-coverage: with_stokesian_dynamics: 'true' script: - bash maintainer/CI/build_cmake.sh + timeout: 90m tags: - espresso - cuda @@ -252,6 +253,7 @@ cuda12-maxset: paths: - build/ expire_in: 1 week + timeout: 90m tags: - espresso - cuda diff --git a/src/walberla_bridge/src/electrokinetics/generated_kernels/CMakeLists.txt b/src/walberla_bridge/src/electrokinetics/generated_kernels/CMakeLists.txt index d83586ea5c..b3236033ce 100644 --- a/src/walberla_bridge/src/electrokinetics/generated_kernels/CMakeLists.txt +++ b/src/walberla_bridge/src/electrokinetics/generated_kernels/CMakeLists.txt @@ -29,4 +29,11 @@ foreach(precision double_precision single_precision) FrictionCouplingKernel_${precision}.cpp FixedFlux_${precision}.cpp Dirichlet_${precision}.cpp) + # disable UBSAN on large files due to excessive compile times + set_source_files_properties( + AdvectiveFluxKernel_${precision}.cpp + DiffusiveFluxKernelWithElectrostatic_${precision}.cpp + DiffusiveFluxKernelWithElectrostaticThermalized_${precision}.cpp + TARGET_DIRECTORY espresso_walberla PROPERTIES COMPILE_FLAGS + -fno-sanitize=undefined) endforeach() From 940373f39d11b93a1703c6357590ac3cd03cf081 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-No=C3=ABl=20Grad?= Date: Fri, 23 Aug 2024 09:37:41 +0200 Subject: [PATCH 4/7] Fix checkpointing bug --- src/python/espressomd/checkpointing.py | 2 +- src/python/espressomd/system.py | 23 ++++++++++++----------- testsuite/python/save_checkpoint.py | 2 +- testsuite/python/test_checkpoint.py | 13 +++++++------ 4 files changed, 21 insertions(+), 19 deletions(-) diff --git a/src/python/espressomd/checkpointing.py b/src/python/espressomd/checkpointing.py index 22f0d953e7..f8ccd10674 100644 --- a/src/python/espressomd/checkpointing.py +++ b/src/python/espressomd/checkpointing.py @@ -199,7 +199,7 @@ def get_last_checkpoint_index(self): def save(self, checkpoint_index=None): """ Saves all registered python objects in the given checkpoint directory - using cPickle. + using pickle. """ # get attributes of registered objects diff --git a/src/python/espressomd/system.py b/src/python/espressomd/system.py index ed21a3b760..22739211f2 100644 --- a/src/python/espressomd/system.py +++ b/src/python/espressomd/system.py @@ -135,18 +135,19 @@ def __init__(self, **kwargs): if "sip" in kwargs: super().__init__(**kwargs) self._setup_atexit() - return - super().__init__(_regular_constructor=True, **kwargs) - if has_features("CUDA"): - self.cuda_init_handle = cuda_init.CudaInitHandle() - if has_features("WALBERLA"): - self._lb = None - self._ekcontainer = None - self._ase_interface = None + else: + super().__init__(_regular_constructor=True, **kwargs) + if has_features("CUDA"): + self.cuda_init_handle = cuda_init.CudaInitHandle() + if has_features("WALBERLA"): + self._lb = None + self._ekcontainer = None - # lock class - self.call_method("lock_system_creation") - self._setup_atexit() + # lock class + self.call_method("lock_system_creation") + self._setup_atexit() + + self._ase_interface = None def _setup_atexit(self): import atexit diff --git a/testsuite/python/save_checkpoint.py b/testsuite/python/save_checkpoint.py index 839687b2f4..c40e54caba 100644 --- a/testsuite/python/save_checkpoint.py +++ b/testsuite/python/save_checkpoint.py @@ -168,7 +168,7 @@ system.electrostatics.solver = p3m p3m.charge_neutrality_tolerance = 5e-12 -if "ase" in sys.modules: +if has_ase and "ase" in sys.modules: system.ase = espressomd.plugins.ase.ASEInterface( type_mapping={0: "H", 1: "O", 10: "Cl"}, ) diff --git a/testsuite/python/test_checkpoint.py b/testsuite/python/test_checkpoint.py index 45c14b47b4..1594902557 100644 --- a/testsuite/python/test_checkpoint.py +++ b/testsuite/python/test_checkpoint.py @@ -65,6 +65,7 @@ class CheckpointTest(ut.TestCase): checkpoint = espressomd.checkpointing.Checkpoint( **config.get_checkpoint_params()) checkpoint.load(0) + checkpoint.save(1) path_cpt_root = pathlib.Path(checkpoint.checkpoint_dir) @classmethod @@ -78,7 +79,7 @@ def setUpClass(cls): cls.ref_periodicity = np.array([False, False, False]) @utx.skipIfMissingFeatures(["WALBERLA"]) - @ut.skipIf(not has_lb_mode, "Skipping test due to missing LB feature.") + @ut.skipIf(not has_lb_mode, "Skipping test due to missing LB mode.") def test_lb_fluid(self): lbf = system.lb cpt_mode = 0 if 'LB.ASCII' in modes else 1 @@ -164,7 +165,7 @@ def test_lb_fluid(self): np.copy(lbf[:, :, :].is_boundary.astype(int)), 0) @utx.skipIfMissingFeatures(["WALBERLA"]) - @ut.skipIf(not has_lb_mode, "Skipping test due to missing EK feature.") + @ut.skipIf(not has_lb_mode, "Skipping test due to missing EK mode.") def test_ek_species(self): cpt_mode = 0 if 'LB.ASCII' in modes else 1 cpt_root = pathlib.Path(self.checkpoint.checkpoint_dir) @@ -269,7 +270,7 @@ def generator(value, shape): @utx.skipIfMissingFeatures(["WALBERLA"]) @ut.skipIf('LB.GPU' in modes, 'VTK not implemented for LB GPU') - @ut.skipIf(not has_lb_mode, "Skipping test due to missing LB feature.") + @ut.skipIf(not has_lb_mode, "Skipping test due to missing LB mode.") def test_lb_vtk(self): lbf = system.lb self.assertEqual(len(lbf.vtk_writers), 2) @@ -316,7 +317,7 @@ def test_lb_vtk(self): (vtk_root / filename.format(2)).unlink(missing_ok=True) @utx.skipIfMissingFeatures(["WALBERLA"]) - @ut.skipIf(not has_lb_mode, "Skipping test due to missing EK feature.") + @ut.skipIf(not has_lb_mode, "Skipping test due to missing EK mode.") def test_ek_vtk(self): ek_species = system.ekcontainer[0] vtk_suffix = config.test_name @@ -507,7 +508,7 @@ def test_shape_based_constraints_serialization(self): self.assertGreater(np.linalg.norm(np.copy(p3.f) - old_force), 1e6) @utx.skipIfMissingFeatures(["WALBERLA"]) - @ut.skipIf(not has_lb_mode, "Skipping test due to missing LB feature.") + @ut.skipIf(not has_lb_mode, "Skipping test due to missing LB mode.") @ut.skipIf('THERM.LB' not in modes, 'LB thermostat not in modes') def test_thermostat_LB(self): thmst = system.thermostat.lb @@ -916,7 +917,7 @@ def test_scafacos_dipoles(self): self.assertEqual(state[key], reference[key], msg=f'for {key}') def test_comfixed(self): - self.assertEqual(list(system.comfixed.types), [0, 2]) + self.assertEqual(set(system.comfixed.types), {0, 2}) @utx.skipIfMissingFeatures('COLLISION_DETECTION') def test_collision_detection(self): From a9ffeb0a27b0bf80f5d65c7e3ed2b24cf6ac85b8 Mon Sep 17 00:00:00 2001 From: phohenberger Date: Fri, 16 Aug 2024 20:22:17 +0200 Subject: [PATCH 5/7] Add new ZnDraw features --- doc/sphinx/installation.rst | 2 +- doc/sphinx/visualization.rst | 9 +- src/python/espressomd/zn.py | 165 ++++++++++++++++++++----- testsuite/scripts/importlib_wrapper.py | 2 +- 4 files changed, 145 insertions(+), 33 deletions(-) diff --git a/doc/sphinx/installation.rst b/doc/sphinx/installation.rst index 198e90d2f8..adb639a6c5 100644 --- a/doc/sphinx/installation.rst +++ b/doc/sphinx/installation.rst @@ -132,7 +132,7 @@ To install the ZnDraw visualizer: .. code-block:: bash - python3 -m pip install --user -c requirements.txt 'zndraw==0.4.5' + python3 -m pip install --user -c requirements.txt 'zndraw==0.4.6' .. _Nvidia GPU acceleration: diff --git a/doc/sphinx/visualization.rst b/doc/sphinx/visualization.rst index b421f49aca..652013b560 100644 --- a/doc/sphinx/visualization.rst +++ b/doc/sphinx/visualization.rst @@ -303,7 +303,7 @@ ZnDraw visualizer |es| supports the ZnDraw visualizer :cite:`elijosius24a` in Jupyter Notebooks. With ZnDraw [1]_, you can visualize your simulation live in a notebook or -web browser. The visualizer is based on ``THREE.js``. +web browser. .. _ZnDraw General usage: @@ -316,6 +316,9 @@ colors like ``red``, ``black`` etc., but one can also use hex colors like ``#ff0 Then write your integration loop in a separate function, and call the update function of the visualizer to capture the current state of the system and visualize it. Note that the visualizer needs to be started by pressing space. +Note that due to the need of a server running in the background, the visualizer is currently not able to run in multiple notebooks at once. +Make sure only one kernel is running the visualizer at a time. + Example code:: import espressomd @@ -351,6 +354,10 @@ as a criterium. The ``normalize`` boolean which normalizes the color to the larg ``normalize`` is false and describes the range to what the colorrange is applied to. ``scale_vector_thickness`` is a boolean and changes the thickness scaling of the vectors and ``opacity`` is a float value that sets the opacity of the vectors. +The ``ZnDraw``-object used for visualization is wrapped inside the visualizer object and is exposed by the attribute ``vis.zndraw``. + +When the ``jupyter`` visualization-window comes up as a blank page, make sure to allow third party cookies in your browser settings. + An example code snippet containing the :class:`~espressomd.zn.LBField` object:: import espressomd.zn diff --git a/src/python/espressomd/zn.py b/src/python/espressomd/zn.py index 7cb887cd5c..9520664230 100644 --- a/src/python/espressomd/zn.py +++ b/src/python/espressomd/zn.py @@ -29,6 +29,7 @@ import time import urllib.parse import typing as t +import scipy.spatial.transform # Standard colors @@ -417,10 +418,13 @@ class Visualizer(): """ + SERVER_PORT = None + SOCKET_PORT = None + def __init__(self, system: espressomd.system.System = None, port: int = 1234, - token: str = secrets.token_hex(4), + token: str = None, folded: bool = True, colors: dict = None, radii: dict = None, @@ -440,13 +444,18 @@ def __init__(self, self.url = "http://127.0.0.1" self.frame_count = 0 - self.port = port - self.token = token + if token is None: + self.token = secrets.token_hex(4) + else: + self.token = token # A server is started in a subprocess, and we have to wait for it - print("Starting ZnDraw server, this may take a few seconds") - self._start_server() - time.sleep(10) + if self.SERVER_PORT is None: + print("Starting ZnDraw server, this may take a few seconds") + self.port = port + self._start_server() + time.sleep(10) + self._start_zndraw() time.sleep(1) @@ -470,9 +479,8 @@ def __init__(self, if jupyter: self._show_jupyter() else: - # Problems with server being terminated at the end of the script - raise NotImplementedError("Only Jupyter is supported for now") - # webbrowser.open_new_tab(self.address) + raise NotImplementedError( + "Only Jupyter notebook is supported at the moment") def _start_server(self): """ @@ -480,6 +488,9 @@ def _start_server(self): """ self.socket_port = zndraw.utils.get_port(default=6374) + Visualizer.SERVER_PORT = self.port + Visualizer.SOCKET_PORT = self.socket_port + self.server = subprocess.Popen(["zndraw", "--no-browser", f"--port={self.port}", f"--storage-port={self.socket_port}"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL @@ -500,12 +511,12 @@ def _start_zndraw(self): while True: try: self.r = znsocket.Client( - address=f"{self.url}:{self.socket_port}") + address=f"{self.url}:{self.SOCKET_PORT}") break except BaseException: time.sleep(0.5) - url = f"{self.url}:{self.port}" + url = f"{self.url}:{self.SERVER_PORT}" self.zndraw = zndraw.zndraw.ZnDrawLocal( r=self.r, url=url, token=self.token, timeout=config) parsed_url = urllib.parse.urlparse( @@ -532,16 +543,28 @@ def update(self): ) # Catch when the server is initializing an empty frame - if self.frame_count == 0 and len(self.zndraw) > 0: - self.zndraw.__setitem__(0, data) - else: + # len(self.zndraw) is a expensive socket call, so we try to avoid it + if self.frame_count != 0 or len(self.zndraw) == 0: self.zndraw.append(data) + else: + self.zndraw.__setitem__(0, data) - if self.params["vector_field"] is not None and self.frame_count == 0: + if self.frame_count == 0: self.zndraw.socket.sleep(1) - for key, value in self.arrow_config.items(): - setattr(self.zndraw.config.arrows, key, value) + x = self.system.box_l[0] / 2 + y = self.system.box_l[1] / 2 + z = self.system.box_l[2] / 2 + + z_dist = max([1.5 * y, 1.5 * x, 1.5 * z]) + + self.zndraw.camera = {'position': [ + x, y, z_dist], 'target': [x, y, z]} + self.zndraw.config.scene.frame_update = False + + if self.params["vector_field"] is not None: + for key, value in self.arrow_config.items(): + setattr(self.zndraw.config.arrows, key, value) self.frame_count += 1 @@ -580,18 +603,40 @@ def draw_constraints(self, shapes: list): dist = shape.dist normal = np.array(shape.normal) - rotation_angles = zndraw.utils.direction_to_euler(normal) - - position = (dist * normal).tolist() - # Not optimal, but ensures its always larger than the box. - wall_width = wall_height = 2 * max(self.system.box_l) - - objects.append(zndraw.draw.Plane( - position=position, - rotation=rotation_angles, - width=wall_width, - height=wall_height, - material=mat)) + position = dist * normal + helper = WallIntersection( + plane_normal=normal, plane_point=position, box_l=self.system.box_l) + corners = helper.get_intersections() + + base_position = np.copy(corners[0]) + corners -= base_position + + # Rotate plane to align with z-axis, Custom2DShape only works + # in the xy-plane + unit_z = np.array([0, 0, 1]) + r, _ = scipy.spatial.transform.Rotation.align_vectors( + [unit_z], [normal]) + rotated_corners = r.apply(corners) + + # Sort corners in a clockwise order, except the first corner + angles = np.arctan2( + rotated_corners[1:, 1], rotated_corners[1:, 0]) + sorted_indices = np.argsort(angles) + sorted_corners = rotated_corners[1:][sorted_indices] + sorted_corners = np.vstack( + [rotated_corners[0], sorted_corners])[:, :2] + + r, _ = scipy.spatial.transform.Rotation.align_vectors( + [normal], [unit_z]) + euler_angles = r.as_euler("xyz") + + # invert the z-axis, unsure why this is needed, maybe + # different coordinate systems + euler_angles[2] *= -1. + + objects.append(zndraw.draw.Custom2DShape( + position=base_position, rotation=euler_angles, + points=sorted_corners, material=mat)) elif shape_type == "Sphere": center = shape.center @@ -621,4 +666,64 @@ def draw_constraints(self, shapes: list): raise NotImplementedError( f"Shape of type {shape_type} isn't available in ZnDraw") - self.zndraw.geometries = objects + self.zndraw.geometries = objects + + +class WallIntersection: + """ + Simple helper to calculate all Box edges that intersect with a plane. + """ + + def __init__(self, plane_point, plane_normal, box_l): + self.plane_point = plane_point + self.plane_normal = plane_normal / np.linalg.norm(plane_normal) + self.box_l = box_l + + # Create 8 vertices of the bounding box + self.vertices = np.array([ + [0, 0, 0], + [0, 0, box_l[2]], + [0, box_l[1], 0], + [0, box_l[1], box_l[2]], + [box_l[0], 0, 0], + [box_l[0], 0, box_l[2]], + [box_l[0], box_l[1], 0], + [box_l[0], box_l[1], box_l[2]] + ]) + + self.edges = [ + (self.vertices[0], self.vertices[1]), + (self.vertices[0], self.vertices[2]), + (self.vertices[0], self.vertices[4]), + (self.vertices[1], self.vertices[3]), + (self.vertices[1], self.vertices[5]), + (self.vertices[2], self.vertices[3]), + (self.vertices[2], self.vertices[6]), + (self.vertices[3], self.vertices[7]), + (self.vertices[4], self.vertices[5]), + (self.vertices[4], self.vertices[6]), + (self.vertices[5], self.vertices[7]), + (self.vertices[6], self.vertices[7]) + ] + + def plane_intersection_with_line(self, line_point1, line_point2): + # Calculate the intersection point of a line and a plane + line_dir = line_point2 - line_point1 + denom = np.dot(self.plane_normal, line_dir) + + if np.abs(denom) > 1e-6: # Avoid division by zero + t = np.dot(self.plane_normal, + (self.plane_point - line_point1)) / denom + if 0 <= t <= 1: + return line_point1 + t * line_dir + return None + + def get_intersections(self): + intersections = [] + + for edge in self.edges: + intersection = self.plane_intersection_with_line(edge[0], edge[1]) + if intersection is not None: + intersections.append(intersection) + + return np.array(intersections) diff --git a/testsuite/scripts/importlib_wrapper.py b/testsuite/scripts/importlib_wrapper.py index f3dd8e5b44..5bc879338c 100644 --- a/testsuite/scripts/importlib_wrapper.py +++ b/testsuite/scripts/importlib_wrapper.py @@ -394,7 +394,7 @@ class GetEspressomdVisualizerImports(ast.NodeVisitor): """ def __init__(self): - self.visualizers = {"visualization"} + self.visualizers = {"visualization", "zn"} self.namespace_visualizers = { "espressomd." + x for x in self.visualizers} self.visu_items = {} From cfa136a22cb73fb764a6b99b3b5d9653b45c33de Mon Sep 17 00:00:00 2001 From: Rebecca Stephan Date: Mon, 12 Aug 2024 13:13:10 +0200 Subject: [PATCH 6/7] Add Zndraw in tutorials --- doc/tutorials/constant_pH/constant_pH.ipynb | 44 ++++- .../electrodes/electrodes_part2.ipynb | 66 +++++-- .../ferrofluid/ferrofluid_part1.ipynb | 174 ++---------------- .../ferrofluid/ferrofluid_part2.ipynb | 85 +-------- .../ferrofluid/ferrofluid_part3.ipynb | 45 ++++- .../langevin_dynamics/langevin_dynamics.ipynb | 74 +++++++- .../raspberry_electrophoresis.ipynb | 52 ++++++ 7 files changed, 277 insertions(+), 263 deletions(-) diff --git a/doc/tutorials/constant_pH/constant_pH.ipynb b/doc/tutorials/constant_pH/constant_pH.ipynb index e147a6c3bb..8dabb3dc2b 100644 --- a/doc/tutorials/constant_pH/constant_pH.ipynb +++ b/doc/tutorials/constant_pH/constant_pH.ipynb @@ -153,6 +153,7 @@ "import espressomd.electrostatics\n", "import espressomd.reaction_methods\n", "import espressomd.polymer\n", + "import espressomd.zn\n", "from espressomd.interactions import HarmonicBond" ] }, @@ -569,7 +570,7 @@ "\n", "# add thermostat and short integration to let the system relax further\n", "system.thermostat.set_langevin(kT=KT_REDUCED, gamma=1.0, seed=7)\n", - "system.integrator.run(steps=1000)\n", + "system.integrator.run(1000)\n", "\n", "if USE_ELECTROSTATICS:\n", " COULOMB_PREFACTOR=BJERRUM_LENGTH_REDUCED * KT_REDUCED\n", @@ -739,6 +740,40 @@ "outputs": [], "source": [] }, + { + "cell_type": "markdown", + "id": "cd417295-bab3-46a5-a574-fc60d06371a5", + "metadata": {}, + "source": [ + "We will initialize the zndraw visualizer to visualize the simulation for increasing pH value:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "04dd303c", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "color = {TYPES[\"HA\"]: \"#7fc454\", #green\n", + " TYPES[\"A\"]: \"#225204\", #dark green\n", + " TYPES[\"B\"]: \"#fca000\", #orange\n", + " TYPES[\"Na\"]: \"#ff0000\", #red\n", + " TYPES[\"Cl\"]: \"#030ffc\" #blue\n", + " }\n", + "radii = {TYPES[\"HA\"]: 2,\n", + " TYPES[\"A\"]: 2,\n", + " TYPES[\"B\"]: 2,\n", + " TYPES[\"Na\"]: 2,\n", + " TYPES[\"Cl\"]: 2\n", + " }\n", + "\n", + "vis = espressomd.zn.Visualizer(system, colors=color, radii=radii)\n", + "vis.update()" + ] + }, { "cell_type": "markdown", "id": "07daa583", @@ -778,11 +813,14 @@ "outputs": [], "source": [ "# SOLUTION CELL\n", - "def perform_sampling(type_A, num_samples, num_As:np.ndarray, reaction_steps, \n", + "def perform_sampling(type_A, num_samples, num_As:np.ndarray, reaction_steps,\n", " prob_integration=0.5, integration_steps=1000):\n", " for i in range(num_samples):\n", " if USE_WCA and np.random.random() < prob_integration:\n", - " system.integrator.run(integration_steps)\n", + " for _ in range(integration_steps):\n", + " system.integrator.run(1)\n", + " global vis\n", + " vis.update()\n", " # we should do at least one reaction attempt per reactive particle\n", " RE.reaction(steps=reaction_steps)\n", " num_As[i] = system.number_of_particles(type=type_A)" diff --git a/doc/tutorials/electrodes/electrodes_part2.ipynb b/doc/tutorials/electrodes/electrodes_part2.ipynb index 4cae3ebcd5..1aaf34d86e 100644 --- a/doc/tutorials/electrodes/electrodes_part2.ipynb +++ b/doc/tutorials/electrodes/electrodes_part2.ipynb @@ -214,6 +214,7 @@ "import espressomd.observables\n", "import espressomd.accumulators\n", "import espressomd.shapes\n", + "import espressomd.zn\n", "\n", "espressomd.assert_features(['WCA', 'ELECTROSTATICS'])\n", "rng = np.random.default_rng(42)\n", @@ -476,20 +477,20 @@ "source": [ "# SOLUTION CELL\n", "offset = LJ_SIGMA # avoid unfavorable overlap at close distance to the walls\n", - "init_part_btw_z1 = offset \n", + "init_part_btw_z1 = offset\n", "init_part_btw_z2 = box_l_z - offset\n", - "ion_pos = np.empty((3), dtype=float)\n", + "ion_pos = np.empty((3,), dtype=float)\n", "\n", - "for i in range (N_IONPAIRS):\n", - " ion_pos[0] = rng.random(1) * system.box_l[0]\n", - " ion_pos[1] = rng.random(1) * system.box_l[1]\n", - " ion_pos[2] = rng.random(1) * (init_part_btw_z2 - init_part_btw_z1) + init_part_btw_z1\n", + "for i in range(N_IONPAIRS):\n", + " ion_pos[0] = rng.random(1)[0] * system.box_l[0]\n", + " ion_pos[1] = rng.random(1)[0] * system.box_l[1]\n", + " ion_pos[2] = rng.random(1)[0] * (init_part_btw_z2 - init_part_btw_z1) + init_part_btw_z1\n", " system.part.add(pos=ion_pos, type=types[\"Cation\"], q=charges[\"Cation\"])\n", - " \n", - "for i in range (N_IONPAIRS):\n", - " ion_pos[0] = rng.random(1) * system.box_l[0]\n", - " ion_pos[1] = rng.random(1) * system.box_l[1]\n", - " ion_pos[2] = rng.random(1) * (init_part_btw_z2 - init_part_btw_z1) + init_part_btw_z1\n", + "\n", + "for i in range(N_IONPAIRS):\n", + " ion_pos[0] = rng.random(1)[0] * system.box_l[0]\n", + " ion_pos[1] = rng.random(1)[0] * system.box_l[1]\n", + " ion_pos[2] = rng.random(1)[0] * (init_part_btw_z2 - init_part_btw_z1) + init_part_btw_z1\n", " system.part.add(pos=ion_pos, type=types[\"Anion\"], q=charges[\"Anion\"])" ] }, @@ -1008,7 +1009,40 @@ "\n", "With the above knowledge, we can now assess the \n", "differential capacitance of the system, by changing the applied voltage\n", - "difference and determining the corresponding surface charge density." + "difference and determining the corresponding surface charge density.\n", + "We will use the ZnDraw visualizer to visualize our system:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ef5d908f-a7f0-4845-9555-34822128c141", + "metadata": {}, + "outputs": [], + "source": [ + "color = {\n", + " types[\"Cation\"]: \"#ff0000\", #red\n", + " types[\"Anion\"]: \"#030ffc\" #blue\n", + " }\n", + " \n", + "radii = {\n", + " types[\"Cation\"]: LJ_SIGMA,\n", + " types[\"Anion\"]: LJ_SIGMA\n", + " }\n", + "\n", + "vis = espressomd.zn.Visualizer(system, colors=color, radii=radii)\n", + "#vis.draw_constraints([floor, ceiling])\n", + "\n", + "#note: The system displayed is the enlarged system as you can see the ELG-GAP along the nonperiodic direction. The particles are only in the smaller subsystem.\n", + "#note: The particles are shown bigger for visualization purpose." + ] + }, + { + "cell_type": "markdown", + "id": "21da58e8-e666-4d06-8538-a8d6af521148", + "metadata": {}, + "source": [ + "Do the sampling from high to low potential:" ] }, { @@ -1029,7 +1063,9 @@ "for potential_diff in tqdm.tqdm(np.linspace(MIN_PHI, MAX_PHI, N_PHI)[::-1]):\n", " system.auto_update_accumulators.clear()\n", " system.electrostatics.solver = setup_electrostatic_solver(potential_diff)\n", - " system.integrator.run(N_SAMPLES_EQUIL_CAP * STEPS_PER_SAMPLE)\n", + " for i in range(N_SAMPLES_EQUIL_CAP * STEPS_PER_SAMPLE//50):\n", + " system.integrator.run(50)\n", + " vis.update()\n", " sigmas = []\n", "\n", " for tm in range(N_SAMPLES_CAP):\n", @@ -1039,7 +1075,9 @@ " system.auto_update_accumulators.clear()\n", " system.auto_update_accumulators.add(density_accumulator_cation)\n", " system.auto_update_accumulators.add(density_accumulator_anion)\n", - " system.integrator.run(STEPS_PER_SAMPLE)\n", + " for j in range(int(STEPS_PER_SAMPLE/50)):\n", + " system.integrator.run(50)\n", + " vis.update()\n", "\n", " cation_profile_mean = density_accumulator_cation.mean()[0, 0, :]\n", " anion_profile_mean = density_accumulator_anion.mean()[0, 0, :]\n", diff --git a/doc/tutorials/ferrofluid/ferrofluid_part1.ipynb b/doc/tutorials/ferrofluid/ferrofluid_part1.ipynb index d65111cce2..2bd4d2ad56 100644 --- a/doc/tutorials/ferrofluid/ferrofluid_part1.ipynb +++ b/doc/tutorials/ferrofluid/ferrofluid_part1.ipynb @@ -257,6 +257,7 @@ "import espressomd.magnetostatics\n", "import espressomd.cluster_analysis\n", "import espressomd.pair_criteria\n", + "import espressomd.zn\n", "\n", "espressomd.assert_features(['DIPOLES', 'DP3M', 'LENNARD_JONES'])\n", "\n", @@ -607,7 +608,7 @@ "id": "a6c4c46b", "metadata": {}, "source": [ - "## Sampling" + "## Sampling and cluster analysis" ] }, { @@ -615,6 +616,8 @@ "id": "af0906f5", "metadata": {}, "source": [ + "To quantify the number of clusters and their respective sizes, we now want to perform a cluster analysis.\n", + "For that we can use ESPREsSo's [cluster analysis class](https://espressomd.github.io/doc/analysis.html?highlight=cluster%20analysis#cluster-analysis).\n", "The system will be sampled over 100 loops." ] }, @@ -628,159 +631,6 @@ "LOOPS = 100" ] }, - { - "cell_type": "markdown", - "id": "516c2f33", - "metadata": {}, - "source": [ - "As the system is two dimensional, we can simply do a scatter plot to get a visual representation of a system state. To get a better insight of how a ferrofluid system develops during time we will create a video of the development of our system during the sampling. If you only want to sample the system simply go to [Sampling without animation](#Sampling-without-animation)" - ] - }, - { - "cell_type": "markdown", - "id": "60dc1653", - "metadata": {}, - "source": [ - "### Sampling with animation" - ] - }, - { - "cell_type": "markdown", - "id": "5f7fe3a7", - "metadata": {}, - "source": [ - "To get an animation of the system development we have to create a function which will save the video and embed it in an html string." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ed0ee691", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.animation as animation\n", - "import tempfile\n", - "import base64\n", - "\n", - "VIDEO_TAG = \"\"\"\"\"\"\n", - "\n", - "\n", - "def anim_to_html(anim):\n", - " if not hasattr(anim, '_encoded_video'):\n", - " with tempfile.NamedTemporaryFile(suffix='.mp4') as f:\n", - " anim.save(f.name, fps=20, extra_args=['-vcodec', 'libx264'])\n", - " with open(f.name, \"rb\") as g:\n", - " video = g.read()\n", - " anim._encoded_video = base64.b64encode(video).decode('ascii')\n", - " plt.close(anim._fig)\n", - " return VIDEO_TAG.format(anim._encoded_video)\n", - "\n", - "\n", - "animation.Animation._repr_html_ = anim_to_html\n", - "\n", - "\n", - "def init():\n", - " # Set x and y range\n", - " ax.set_ylim(0, BOX_SIZE)\n", - " ax.set_xlim(0, BOX_SIZE)\n", - " x_data, y_data = [], []\n", - " part.set_data(x_data, y_data)\n", - " return part," - ] - }, - { - "cell_type": "markdown", - "id": "89d9844a", - "metadata": {}, - "source": [ - "## Exercise:\n", - "\n", - "In the following an animation loop is defined, however it is incomplete.\n", - "Extend the code such that in every loop the system is integrated for 100 steps.\n", - "Afterwards `x_data` and `y_data` have to be populated by the folded $x$- and $y$- positions of the particles.\n", - "\n", - "(You may copy and paste the incomplete code template to the empty cell below.)\n", - "\n", - "```python\n", - "def run(i):\n", - " # < excercise >\n", - " \n", - " # Save current system state as a plot\n", - " x_data, y_data = # < excercise >\n", - " ax.figure.canvas.draw()\n", - " part.set_data(x_data, y_data)\n", - " print(f'progress: {(i + 1) * 100. / LOOPS:3.0f}%', end='\\r')\n", - " return part,\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "91004d62", - "metadata": {}, - "outputs": [], - "source": [ - "# SOLUTION CELL\n", - "def run(i):\n", - " system.integrator.run(100)\n", - "\n", - " # Save current system state as a plot\n", - " x_data, y_data = system.part.all().pos_folded[:, 0], system.part.all().pos_folded[:, 1]\n", - " ax.figure.canvas.draw()\n", - " part.set_data(x_data, y_data)\n", - " print(f'progress: {(i + 1) * 100. / LOOPS:3.0f}%', end='\\r')\n", - " return part," - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "95caca0c", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "markdown", - "id": "0f7e9093", - "metadata": {}, - "source": [ - "Now we use the animation class of matplotlib to save snapshots of the system as frames of a video which is then displayed after the sampling is finished. Between two frames are 100 integration steps.\n", - "\n", - "In the video chain-like and ring-like clusters should be visible, as well as some isolated monomers." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3d11e830", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "fig, ax = plt.subplots(figsize=(10, 10))\n", - "part, = ax.plot([], [], 'o')\n", - "\n", - "animation.FuncAnimation(fig, run, frames=LOOPS, blit=True, interval=0, repeat=False, init_func=init)" - ] - }, - { - "cell_type": "markdown", - "id": "a7ab793c", - "metadata": {}, - "source": [ - "## Cluster analysis\n", - "\n", - "To quantify the number of clusters and their respective sizes, we now want to perform a cluster analysis.\n", - "For that we can use ESPREsSo's [cluster analysis class](https://espressomd.github.io/doc/analysis.html?highlight=cluster%20analysis#cluster-analysis)." - ] - }, { "cell_type": "markdown", "id": "2f6d201a", @@ -847,15 +697,19 @@ "id": "6b8d8e7f", "metadata": {}, "source": [ - "### Sampling without animation" + "We initialize an instance of the ZnDraw visualizer to animate the sampling. As our system is constrained to 2D, the particles are on a single face of the cubic system box." ] }, { - "cell_type": "markdown", + "cell_type": "code", "id": "2098e033", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "The following code just samples the system and does a cluster analysis every loops (100 by default) simulation steps." + "color = {0: \"#7fc454\"} \n", + "radii = {0: LJ_SIGMA/2}\n", + "vis = espressomd.zn.Visualizer(system, colors=color, radii=radii)" ] }, { @@ -878,6 +732,7 @@ " for c in cluster_structure.clusters:\n", " cluster_sizes.append(# < excercise >)\n", " system.integrator.run(100)\n", + " vis.update()\n", "```" ] }, @@ -899,7 +754,8 @@ " n_clusters.append(len(cluster_structure.clusters))\n", " for c in cluster_structure.clusters:\n", " cluster_sizes.append(c[1].size())\n", - " system.integrator.run(100)" + " system.integrator.run(100)\n", + " vis.update()" ] }, { @@ -915,7 +771,7 @@ "id": "0042827e", "metadata": {}, "source": [ - "You may want to get a visualization of the current state of the system. For that we plot the particle positions folded to the simulation box using matplotlib." + "You may want to get a 2D visualization of the current state of the system. For that we plot the particle positions folded to the simulation box using matplotlib." ] }, { diff --git a/doc/tutorials/ferrofluid/ferrofluid_part2.ipynb b/doc/tutorials/ferrofluid/ferrofluid_part2.ipynb index 453d5f9e97..e13eb759f9 100644 --- a/doc/tutorials/ferrofluid/ferrofluid_part2.ipynb +++ b/doc/tutorials/ferrofluid/ferrofluid_part2.ipynb @@ -59,6 +59,7 @@ "source": [ "import espressomd\n", "import espressomd.magnetostatics\n", + "import espressomd.zn\n", "\n", "espressomd.assert_features(['DIPOLES', 'DP3M', 'LENNARD_JONES'])\n", "\n", @@ -305,95 +306,27 @@ "plt.show()" ] }, - { - "cell_type": "markdown", - "id": "2f52d782", - "metadata": {}, - "source": [ - "## Video of the development of the system" - ] - }, { "cell_type": "markdown", "id": "eb06ab55", "metadata": {}, "source": [ - "You may want to get an insight of how the system develops in time. Thus we now create a function which will save a video and embed it in an html string to create a video of the systems development " + "You may want to get an insight of how the system develops in time. We use the ZnDraw visualiser:" ] }, { "cell_type": "code", "execution_count": null, - "id": "e25ba03b", + "id": "a6807ebd-2f97-476c-94e1-ea5fe1d383a4", "metadata": {}, "outputs": [], "source": [ - "import matplotlib.pyplot as plt\n", - "import matplotlib.animation as animation\n", - "import tempfile\n", - "import base64\n", - "\n", - "VIDEO_TAG = \"\"\"\"\"\"\n", - "\n", - "\n", - "def anim_to_html(anim):\n", - " if not hasattr(anim, '_encoded_video'):\n", - " with tempfile.NamedTemporaryFile(suffix='.mp4') as f:\n", - " anim.save(f.name, fps=20, extra_args=['-vcodec', 'libx264'])\n", - " with open(f.name, \"rb\") as g:\n", - " video = g.read()\n", - " anim._encoded_video = base64.b64encode(video).decode('ascii')\n", - " plt.close(anim._fig)\n", - " return VIDEO_TAG.format(anim._encoded_video)\n", - "\n", - "\n", - "animation.Animation._repr_html_ = anim_to_html\n", - "\n", - "\n", - "def init():\n", - " # Set x and y range\n", - " ax.set_ylim(0, box_size)\n", - " ax.set_xlim(0, box_size)\n", - " xdata, ydata = [], []\n", - " part.set_data(xdata, ydata)\n", - " return part,\n", - "\n", - "\n", - "def run(i):\n", + "color = {0: \"#7fc454\"} \n", + "radii = {0: LJ_SIGMA/2}\n", + "vis = espressomd.zn.Visualizer(system, colors=color, radii=radii)\n", + "for _ in range(100):\n", " system.integrator.run(50)\n", - "\n", - " # Save current system state as a plot\n", - " xdata, ydata = particles.pos_folded[:, 0], particles.pos_folded[:, 1]\n", - " ax.figure.canvas.draw()\n", - " part.set_data(xdata, ydata)\n", - " print(f'progress: {(i + 1):3.0f}%', end='\\r')\n", - " return part," - ] - }, - { - "cell_type": "markdown", - "id": "25b6af88", - "metadata": {}, - "source": [ - "We now can start the sampling over the animation class of matplotlib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8c9fe146", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "fig, ax = plt.subplots(figsize=(10, 10))\n", - "part, = ax.plot([], [], 'o')\n", - "\n", - "animation.FuncAnimation(fig, run, frames=100, blit=True, interval=0, repeat=False, init_func=init)" + " vis.update()" ] }, { @@ -401,7 +334,7 @@ "id": "278cfc7f", "metadata": {}, "source": [ - "In the visualization video we can see that the single chains break and connect to each other during time. Also some monomers are present which break from and connect to chains. If you want to have some more frames, i.e. a longer video, just adjust the frames parameter in FuncAnimation." + "In the visualization video we can see that the single chains break and connect to each other during time. Also some monomers are present which break from and connect to chains." ] }, { diff --git a/doc/tutorials/ferrofluid/ferrofluid_part3.ipynb b/doc/tutorials/ferrofluid/ferrofluid_part3.ipynb index b7f896f46a..8bce1733db 100644 --- a/doc/tutorials/ferrofluid/ferrofluid_part3.ipynb +++ b/doc/tutorials/ferrofluid/ferrofluid_part3.ipynb @@ -134,6 +134,7 @@ "source": [ "import espressomd\n", "import espressomd.magnetostatics\n", + "import espressomd.zn\n", "\n", "espressomd.assert_features(['DIPOLES', 'DP3M', 'LENNARD_JONES'])\n", "\n", @@ -536,6 +537,14 @@ "As in **part II** we use the same system for every value of the Langevin parameter $\\alpha$. Thus we use that the system is already pre-equilibrated from the previous run so we save some equilibration time. For scientific purposes one would use a new system for every value for the Langevin parameter to ensure that the systems are independent and no correlation effects are measured. Also one would perform more than just one simulation for each value of $\\alpha$ to increase the precision of the results." ] }, + { + "cell_type": "markdown", + "id": "395a4fc0-baaf-48d6-8b88-b8ba42408bfe", + "metadata": {}, + "source": [ + "Additionally, we will visualize the sampling of the magnetization curve using the ZnDraw visualizer. Note, that the magnetic field is increasing during the video." + ] + }, { "cell_type": "code", "execution_count": null, @@ -579,7 +588,8 @@ " magnetizations[ndx] = np.mean(magn_temp)\n", "\n", " # remove constraint\n", - " system.constraints.clear()" + " if not alpha == alphas[-1]:\n", + " system.constraints.clear()" ] }, { @@ -668,6 +678,39 @@ "At sufficiently high volume fractions $\\phi$ and dipolar interaction parameters $\\lambda$ these clusters can be so rigid, that simulations with normal methods are impossible as the relaxation times exceeds normal simulation times by far, resulting in strongly correlated configurations and thus measurements." ] }, + { + "cell_type": "markdown", + "id": "d2333fe6-c427-48b6-92f0-d946f4084b30", + "metadata": {}, + "source": [ + "## Visualization" + ] + }, + { + "cell_type": "markdown", + "id": "7fd720f4-858e-4d2e-8d4a-23644ab996e3", + "metadata": {}, + "source": [ + "Finally, we use ZnDraw to visualize the evolving system for high $\\alpha$. We just need to continue running our system as it is already warmed up for high magnetic field." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8e1d8ade-efa9-4d73-a328-27b0e63993b4", + "metadata": {}, + "outputs": [], + "source": [ + "# initializing zndraw visualizer\n", + "color = {0: \"#7fc454\"} \n", + "radii = {0: lj_sigma/2}\n", + "vis = espressomd.zn.Visualizer(system, colors=color, radii=radii)\n", + "\n", + "for _ in range(400):\n", + " system.integrator.run(5)\n", + " vis.update()" + ] + }, { "cell_type": "markdown", "id": "81e06b9b", diff --git a/doc/tutorials/langevin_dynamics/langevin_dynamics.ipynb b/doc/tutorials/langevin_dynamics/langevin_dynamics.ipynb index 804725d179..5f6471c40b 100644 --- a/doc/tutorials/langevin_dynamics/langevin_dynamics.ipynb +++ b/doc/tutorials/langevin_dynamics/langevin_dynamics.ipynb @@ -159,14 +159,6 @@ "## 2. Simulating Brownian motion" ] }, - { - "cell_type": "markdown", - "id": "c38456fa", - "metadata": {}, - "source": [ - "We will simulate the diffusion of a single particle that is coupled to an implicit solvent." - ] - }, { "cell_type": "code", "execution_count": null, @@ -181,6 +173,7 @@ "import espressomd\n", "import espressomd.accumulators\n", "import espressomd.observables\n", + "import espressomd.zn\n", "\n", "logging.basicConfig(level=logging.INFO, stream=sys.stdout)\n", "\n", @@ -188,15 +181,76 @@ "KT = 1.1\n", "STEPS = 1000000\n", "\n", + "gammas = [1.0, 2.0, 4.0, 10.0]" + ] + }, + { + "cell_type": "markdown", + "id": "c38456fa", + "metadata": {}, + "source": [ + "We will simulate the diffusion of a single particle that is coupled to an implicit solvent. First, we have to setup our system." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "45a6ad79-fae4-4bd7-b041-012b193b83bf", + "metadata": {}, + "outputs": [], + "source": [ "# System setup\n", "system = espressomd.System(box_l=[16] * 3)\n", "system.time_step = 0.01\n", "system.cell_system.skin = 0.4\n", "\n", - "particle = system.part.add(pos=[0, 0, 0])\n", + "particle = system.part.add(pos=[0, 0, 0])" + ] + }, + { + "cell_type": "markdown", + "id": "3d594176-bd64-4ce8-9e25-f0efefbf996c", + "metadata": {}, + "source": [ + "In order to get an idea of the Brownian motion, we will visualize our system evolving for a few steps with the ZnDraw visualizer:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "80788a7f-4bf2-4fa6-9292-5de8b2a926ff", + "metadata": {}, + "outputs": [], + "source": [ + "system.thermostat.set_langevin(kT=KT, gamma=gammas[0], seed=42)\n", "\n", + "# Setup ZnDraw visualizer\n", + "color = {0: \"#7fc454\"} \n", + "radii = {0: 1}\n", + "vis = espressomd.zn.Visualizer(system, colors=color, radii=radii)\n", + "\n", + "system.integrator.run(1000)\n", + "for _ in range(200):\n", + " system.integrator.run(50)\n", + " vis.update()" + ] + }, + { + "cell_type": "markdown", + "id": "933cea77-4bca-47a6-8a82-cc13d07ec673", + "metadata": {}, + "source": [ + "Now we can do the actual simulation where we simulate for different friction coefficients and sample our observables:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bf997a0a", + "metadata": {}, + "outputs": [], + "source": [ "# Run for different friction coefficients\n", - "gammas = [1.0, 2.0, 4.0, 10.0]\n", "tau_results = []\n", "msd_results = []\n", "vacf_results = []\n", diff --git a/doc/tutorials/raspberry_electrophoresis/raspberry_electrophoresis.ipynb b/doc/tutorials/raspberry_electrophoresis/raspberry_electrophoresis.ipynb index 1d91441855..a72c35294e 100644 --- a/doc/tutorials/raspberry_electrophoresis/raspberry_electrophoresis.ipynb +++ b/doc/tutorials/raspberry_electrophoresis/raspberry_electrophoresis.ipynb @@ -71,6 +71,7 @@ "import espressomd.interactions\n", "import espressomd.electrostatics\n", "import espressomd.lb\n", + "import espressomd.zn\n", "\n", "import sys\n", "import tqdm\n", @@ -683,6 +684,56 @@ "system.thermostat.set_lb(LB_fluid=lb, seed=123, gamma=20.0)" ] }, + { + "cell_type": "markdown", + "id": "a7388457-8c1b-4839-bfc6-e34778ae9511", + "metadata": {}, + "source": [ + "## Visualization" + ] + }, + { + "cell_type": "markdown", + "id": "f3deb299-de31-4901-8f65-0ff06112232c", + "metadata": {}, + "source": [ + "We use the ZnDraw visualizer and run our simulation for a few steps in order to get an idea what is happening in our system:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c24f4ee4-2a6a-46ff-ba89-4f6c7f7fb227", + "metadata": {}, + "outputs": [], + "source": [ + "# Initializing ZnDraw visualizer\n", + "colors = {\n", + " TYPE_CENTRAL: \"#fca000\", #orange\n", + " TYPE_SURFACE: \"#7fc454\", #green\n", + " TYPE_CATIONS: \"#ff0000\", #red\n", + " TYPE_ANIONS: \"#030ffc\" #blue\n", + " }\n", + "radii = {\n", + " TYPE_CENTRAL: sig_ss/2,\n", + " TYPE_SURFACE: sig_ss/2,\n", + " TYPE_CATIONS: sig_ss/2,\n", + " TYPE_ANIONS: sig_ss/2\n", + " }\n", + "arrows_config = {'colormap': [[-0.5, 0.9, 0.5], [-0.0, 0.9, 0.5]],\n", + " 'normalize': True,\n", + " 'colorrange': [0, 1],\n", + " 'scale_vector_thickness': True,\n", + " 'opacity': 1.0}\n", + "\n", + "lbfield = espressomd.zn.LBField(system, step_x=2, step_y=2, step_z=5, scale=0.3)\n", + "vis = espressomd.zn.Visualizer(system, colors=colors, radii=radii, folded=True, vector_field=lbfield)\n", + "\n", + "for _ in range(50):\n", + " system.integrator.run(10)\n", + " vis.update()" + ] + }, { "cell_type": "markdown", "id": "f848c501", @@ -709,6 +760,7 @@ "with open('posVsTime.dat', 'w') as f: # file where the raspberry trajectory will be written to\n", " for _ in tqdm.tqdm(range(num_iterations)):\n", " system.integrator.run(num_steps_per_iteration)\n", + " vis.update()\n", " pos = central_part.pos - initial_pos\n", " f.write(f\"{system.time:.2f} {pos[0]:.4f} {pos[1]:.4f} {pos[2]:.4f}\\n\")\n", "\n", From efacee2da2bc083b56911d3e310fd45b69cc5398 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-No=C3=ABl=20Grad?= Date: Fri, 23 Aug 2024 19:48:16 +0200 Subject: [PATCH 7/7] Edit and format tutorials --- doc/tutorials/constant_pH/constant_pH.ipynb | 4 +- .../electrodes/electrodes_part2.ipynb | 7 +- .../ferrofluid/ferrofluid_part1.ipynb | 74 +++++++++---------- .../ferrofluid/ferrofluid_part2.ipynb | 22 +++--- .../ferrofluid/ferrofluid_part3.ipynb | 8 +- .../langevin_dynamics/langevin_dynamics.ipynb | 2 +- .../widom_insertion/widom_insertion.ipynb | 12 ++- 7 files changed, 69 insertions(+), 60 deletions(-) diff --git a/doc/tutorials/constant_pH/constant_pH.ipynb b/doc/tutorials/constant_pH/constant_pH.ipynb index 8dabb3dc2b..0c52dde352 100644 --- a/doc/tutorials/constant_pH/constant_pH.ipynb +++ b/doc/tutorials/constant_pH/constant_pH.ipynb @@ -745,7 +745,7 @@ "id": "cd417295-bab3-46a5-a574-fc60d06371a5", "metadata": {}, "source": [ - "We will initialize the zndraw visualizer to visualize the simulation for increasing pH value:" + "We will initialize ZnDraw to visualize the simulation for increasing pH values:" ] }, { @@ -753,7 +753,7 @@ "execution_count": null, "id": "04dd303c", "metadata": { - "scrolled": true + "scrolled": false }, "outputs": [], "source": [ diff --git a/doc/tutorials/electrodes/electrodes_part2.ipynb b/doc/tutorials/electrodes/electrodes_part2.ipynb index 1aaf34d86e..e7007c62e3 100644 --- a/doc/tutorials/electrodes/electrodes_part2.ipynb +++ b/doc/tutorials/electrodes/electrodes_part2.ipynb @@ -1010,7 +1010,7 @@ "With the above knowledge, we can now assess the \n", "differential capacitance of the system, by changing the applied voltage\n", "difference and determining the corresponding surface charge density.\n", - "We will use the ZnDraw visualizer to visualize our system:" + "We will use ZnDraw to visualize our system:" ] }, { @@ -1033,8 +1033,9 @@ "vis = espressomd.zn.Visualizer(system, colors=color, radii=radii)\n", "#vis.draw_constraints([floor, ceiling])\n", "\n", - "#note: The system displayed is the enlarged system as you can see the ELG-GAP along the nonperiodic direction. The particles are only in the smaller subsystem.\n", - "#note: The particles are shown bigger for visualization purpose." + "# note: you may need to zoom out since the ELC gap region takes a significant portion of\n", + "# the box along the non-periodic direction. The particles are only in the smaller subsystem.\n", + "# note: The particles are shown bigger for visualization purpose." ] }, { diff --git a/doc/tutorials/ferrofluid/ferrofluid_part1.ipynb b/doc/tutorials/ferrofluid/ferrofluid_part1.ipynb index 2bd4d2ad56..72d116a517 100644 --- a/doc/tutorials/ferrofluid/ferrofluid_part1.ipynb +++ b/doc/tutorials/ferrofluid/ferrofluid_part1.ipynb @@ -15,15 +15,13 @@ "source": [ "## Table of Contents\n", "1. [Introduction](#Introduction)\n", - "2. [The Model](#The-Model)\n", + "2. [The model](#The-model)\n", "3. [Structure of this tutorial](#Structure-of-this-tutorial)\n", - "4. [Compiling ESPResSo for this Tutorial](#Compiling-ESPResSo-for-this-Tutorial)\n", - "5. [A Monolayer-Ferrofluid System in ESPResSo](#A-Monolayer-Ferrofluid-System-in-ESPResSo)\n", + "4. [Compiling ESPResSo for this tutorial](#Compiling-ESPResSo-for-this-tutorial)\n", + "5. [A monolayer-ferrofluid system in ESPResSo](#A-monolayer-ferrofluid-system-in-ESPResSo)\n", " 1. [Setup](#Setup)\n", - " 2. [Sampling](#Sampling)\n", - " 3. [Sampling with animation](#Sampling-with-animation)\n", - " 4. [Sampling without animation](#Sampling-without-animation)\n", - " 5. [Cluster distribution](#Cluster-distribution)" + " 2. [Sampling and cluster analysis](#Sampling-and-cluster-analysis)\n", + " 3. [Cluster distribution](#Cluster-distribution)" ] }, { @@ -54,10 +52,8 @@ "metadata": {}, "source": [ "
\n", - "\n", - "
\n", - "
Figure 1: Schematic representation of electrostatically stabilization (picture top) and steric stabilization (picture bottom) [3]
\n", - "
\n", + "\n", + "
Figure 1: Schematic representation of electrostatically stabilization (picture top) and steric stabilization (picture bottom) [3]
\n", "
" ] }, @@ -67,10 +63,8 @@ "metadata": {}, "source": [ "
\n", - "ferrofluid on glass plate under which a strong magnet is placed\n", - "
\n", - "
Figure 2: Real Ferrofluid exposed to an external magnetic field (neodymium magnet) [4]
\n", - "
\n", + "ferrofluid on glass plate under which a strong magnet is placed\n", + "
Figure 2: Real ferrofluid exposed to an external magnetic field (neodymium magnet) [4]
\n", "
" ] }, @@ -79,7 +73,7 @@ "id": "095339c1", "metadata": {}, "source": [ - "## The Model" + "## The model" ] }, { @@ -124,7 +118,7 @@ " \\lambda = \\frac{\\tilde{u}_{\\text{DD}}}{u_T} = \\gamma \\frac{\\mu^2}{k_{\\text{B}}T\\sigma^3}\n", "\\end{equation}\n", "\n", - "where $u_\\mathrm{T} = k_{\\text{B}}T$ is the thermal energy and $\\tilde{u}_{DD}$ is the absolute value of the dipole-dipole interaction energy at close contact (cc) and head-to-tail configuration (htt) (see figure 4) per particle, i.e. in formulas it reads\n", + "where $u_\\mathrm{T} = k_{\\text{B}}T$ is the thermal energy and $\\tilde{u}_{DD}$ is the absolute value of the dipole-dipole interaction energy at close contact (cc) and head-to-tail configuration (htt) (see figure 3) per particle, i.e. in formulas it reads\n", "\n", "\\begin{equation}\n", " \\tilde{u}_{\\text{DD}} = \\frac{ \\left| u_{\\text{DD}}^{\\text{htt, cc}} \\right| }{2}\n", @@ -143,11 +137,9 @@ "id": "62b95d9c", "metadata": {}, "source": [ - "
\n", - "schematic representation of head to tail configuration\n", - "
\n", - "
Figure 4: Schematic representation of the head-to-tail configuration of two magnetic particles at close contact.
\n", - "
\n", + "
\n", + "schematic representation of head to tail configuration\n", + "
Figure 3: Schematic representation of the head-to-tail configuration of two magnetic particles at close contact.
\n", "
" ] }, @@ -182,7 +174,7 @@ "id": "b6d5a65e", "metadata": {}, "source": [ - "## Compiling ESPResSo for this Tutorial" + "## Compiling ESPResSo for this tutorial" ] }, { @@ -218,7 +210,7 @@ "id": "a0bffbeb", "metadata": {}, "source": [ - "## A Monolayer-Ferrofluid System in ESPResSo" + "## A monolayer-ferrofluid system in ESPResSo" ] }, { @@ -324,7 +316,8 @@ "id": "136aa645", "metadata": {}, "source": [ - "## Exercise:\n", + "**Exercise:**\n", + "\n", "How large does `BOX_SIZE` have to be for a system of `N_PART` particles with a volume (area) fraction `PHI`?\n", "Define `BOX_SIZE`." ] @@ -384,7 +377,8 @@ "outputs": [], "source": [ "# Lennard-Jones interaction\n", - "system.non_bonded_inter[0, 0].lennard_jones.set_params(epsilon=LJ_EPSILON, sigma=LJ_SIGMA, cutoff=LJ_CUT, shift=\"auto\")" + "system.non_bonded_inter[0, 0].lennard_jones.set_params(\n", + " epsilon=LJ_EPSILON, sigma=LJ_SIGMA, cutoff=LJ_CUT, shift=\"auto\")" ] }, { @@ -394,7 +388,7 @@ "source": [ "Now we generate random positions and orientations of the particles and their dipole moments. \n", "\n", - "**Hint:**\n", + "Hint:\n", "It should be noted that we seed the random number generator of numpy. Thus the initial configuration of our system is the same every time this script will be executed. You can change it to another one to simulate with a different initial configuration." ] }, @@ -403,9 +397,11 @@ "id": "c49729f7", "metadata": {}, "source": [ - "## Exercise:\n", + "**Exercise:**\n", + "\n", "How does one set up randomly oriented dipole moments?\n", - "*Hint*: Think of the way that different methods could introduce a bias in the distribution of the orientations.\n", + "\n", + "Hint: Think of the way that different methods could introduce a bias in the distribution of the orientations.\n", "\n", "Create a variable `dip` as a `N_PART x 3` numpy array, which contains the randomly distributed dipole moments." ] @@ -523,7 +519,7 @@ "For the simulation of our system we choose the velocity Verlet integrator.\n", "After that we set up the thermostat which is, in our case, a Langevin thermostat to simulate in an NVT ensemble.\n", "\n", - "**Hint:**\n", + "Hint:\n", "It should be noted that we seed the Langevin thermostat, thus the time evolution of the system is partly predefined.\n", "Partly because of the numeric accuracy and the automatic tuning algorithms of Dipolar P3M and DLC where the resulting parameters are slightly different every time.\n", "You can change the seed to get a guaranteed different time evolution." @@ -562,7 +558,7 @@ "execution_count": null, "id": "fd12b1f6", "metadata": { - "scrolled": true + "scrolled": false }, "outputs": [], "source": [ @@ -592,7 +588,7 @@ "execution_count": null, "id": "95c4a8b6", "metadata": { - "scrolled": true + "scrolled": false }, "outputs": [], "source": [ @@ -608,7 +604,7 @@ "id": "a6c4c46b", "metadata": {}, "source": [ - "## Sampling and cluster analysis" + "### Sampling and cluster analysis" ] }, { @@ -617,7 +613,7 @@ "metadata": {}, "source": [ "To quantify the number of clusters and their respective sizes, we now want to perform a cluster analysis.\n", - "For that we can use ESPREsSo's [cluster analysis class](https://espressomd.github.io/doc/analysis.html?highlight=cluster%20analysis#cluster-analysis).\n", + "For that we can use ESPResSo's [cluster analysis class](https://espressomd.github.io/doc/analysis.html#cluster-analysis).\n", "The system will be sampled over 100 loops." ] }, @@ -636,7 +632,7 @@ "id": "2f6d201a", "metadata": {}, "source": [ - "## Exercise:\n", + "**Exercise:**\n", "\n", "Setup a cluster analysis object (`ClusterStructure` class) and assign its instance to the variable `cluster_structure`.\n", "As criterion for the cluster analysis use a distance criterion where particles are assumed to be\n", @@ -717,7 +713,7 @@ "id": "98b99dd6", "metadata": {}, "source": [ - "## Exercise:\n", + "**Exercise:**\n", "\n", "Write an integration loop which runs a cluster analysis on the system, saving the number of clusters `n_clusters` and the size distribution `cluster_sizes`.\n", "Take the following as a starting point:\n", @@ -803,7 +799,7 @@ "id": "4c0828ea", "metadata": {}, "source": [ - "## Cluster distribution" + "### Cluster distribution" ] }, { @@ -819,12 +815,12 @@ "id": "1c6be57c", "metadata": {}, "source": [ - "## Exercise:\n", + "**Exercise:**\n", "\n", "Use `numpy` to calculate a histogram of the cluster sizes and assign it to the variable `size_dist`.\n", "Take only clusters up to a size of 19 particles into account.\n", "\n", - "*Hint*: In order not to count clusters with size 20 or more, one may include an additional bin containing these.\n", + "Hint: In order not to count clusters with size 20 or more, one may include an additional bin containing these.\n", "The reason for that is that `numpy` defines the histogram bins as half-open intervals with the open border at the upper bin edge.\n", "Consequently clusters of larger sizes are attributed to the last bin.\n", "By not using the last bin in the plot below, these clusters can effectively be neglected." diff --git a/doc/tutorials/ferrofluid/ferrofluid_part2.ipynb b/doc/tutorials/ferrofluid/ferrofluid_part2.ipynb index e13eb759f9..80dcc881bb 100644 --- a/doc/tutorials/ferrofluid/ferrofluid_part2.ipynb +++ b/doc/tutorials/ferrofluid/ferrofluid_part2.ipynb @@ -167,7 +167,8 @@ "pos = box_size * np.hstack((np.random.random((N_PART, 2)), np.zeros((N_PART, 1))))\n", "\n", "# Add particles\n", - "particles = system.part.add(pos=pos, rotation=N_PART * [(True, True, True)], dip=dip, fix=N_PART * [(False, False, True)])\n", + "particles = system.part.add(pos=pos, rotation=N_PART * [(True, True, True)],\n", + " dip=dip, fix=N_PART * [(False, False, True)])\n", "\n", "# Remove overlap between particles by means of the steepest descent method\n", "MASS = 1.0\n", @@ -270,7 +271,7 @@ "execution_count": null, "id": "7750295c", "metadata": { - "scrolled": true + "scrolled": false }, "outputs": [], "source": [ @@ -471,7 +472,8 @@ "pos = box_size * np.hstack((np.random.random((N_PART, 2)), np.zeros((N_PART, 1))))\n", "\n", "# Add particles\n", - "particles = system.part.add(pos=pos, rotation=N_PART * [(True, True, True)], dip=dip, fix=N_PART * [(False, False, True)])\n", + "particles = system.part.add(pos=pos, rotation=N_PART * [(True, True, True)],\n", + " dip=dip, fix=N_PART * [(False, False, True)])\n", "\n", "# Remove overlap between particles by means of the steepest descent method\n", "system.integrator.set_steepest_descent(f_max=0, gamma=0.1, max_displacement=0.05)\n", @@ -803,7 +805,7 @@ "outputs": [], "source": [ "def dL_dB(alpha):\n", - " return (1. / (alpha**2.) - 1. / ((np.sinh(alpha))**2.)) * dipm / (KT)" + " return (1. / (alpha**2) - 1. / np.sinh(alpha)**2) * dipm / (KT)" ] }, { @@ -884,8 +886,8 @@ "plt.xlabel(r'$\\alpha$', fontsize=20)\n", "plt.ylabel(r'$M^*$', fontsize=20)\n", "plt.plot(x, L(x), label='Langevin function')\n", - "plt.plot(x, magnetization_approx_perp(x), label='q2D approximation $\\perp$')\n", - "plt.plot(x, magnetization_approx_para(x), label='q2D approximation $\\parallel$')\n", + "plt.plot(x, magnetization_approx_perp(x), label=r'q2D approximation $\\perp$')\n", + "plt.plot(x, magnetization_approx_para(x), label=r'q2D approximation $\\parallel$')\n", "# < exercise >\n", "plt.legend(fontsize=20)\n", "plt.show()\n", @@ -907,10 +909,10 @@ "plt.xlabel(r'$\\alpha$', fontsize=20)\n", "plt.ylabel(r'$M^*$', fontsize=20)\n", "plt.plot(x, L(x), label='Langevin function')\n", - "plt.plot(x, magnetization_approx_perp(x), label='q2D approximation $\\perp$')\n", - "plt.plot(x, magnetization_approx_para(x), label='q2D approximation $\\parallel$')\n", - "plt.plot(alphas, magnetization_perp / N_PART, 'o', label='simulation results $\\perp$')\n", - "plt.plot(alphas, magnetization_para / N_PART, 'o', label='simulation results $\\parallel$')\n", + "plt.plot(x, magnetization_approx_perp(x), label=r'q2D approximation $\\perp$')\n", + "plt.plot(x, magnetization_approx_para(x), label=r'q2D approximation $\\parallel$')\n", + "plt.plot(alphas, magnetization_perp / N_PART, 'o', label=r'simulation results $\\perp$')\n", + "plt.plot(alphas, magnetization_para / N_PART, 'o', label=r'simulation results $\\parallel$')\n", "plt.legend(fontsize=20)\n", "plt.show()" ] diff --git a/doc/tutorials/ferrofluid/ferrofluid_part3.ipynb b/doc/tutorials/ferrofluid/ferrofluid_part3.ipynb index 8bce1733db..b3be7c3c32 100644 --- a/doc/tutorials/ferrofluid/ferrofluid_part3.ipynb +++ b/doc/tutorials/ferrofluid/ferrofluid_part3.ipynb @@ -273,7 +273,7 @@ "execution_count": null, "id": "89966f9b", "metadata": { - "scrolled": true + "scrolled": false }, "outputs": [], "source": [ @@ -334,11 +334,11 @@ "execution_count": null, "id": "bd24f8eb", "metadata": { - "scrolled": true + "scrolled": false }, "outputs": [], "source": [ - "# initialize array for hold the sampled dipole moments\n", + "# initialize array that will hold the sampled dipole moments\n", "dipms = np.full((loops, 3), np.nan)\n", "\n", "# sample dipole moment\n", @@ -542,7 +542,7 @@ "id": "395a4fc0-baaf-48d6-8b88-b8ba42408bfe", "metadata": {}, "source": [ - "Additionally, we will visualize the sampling of the magnetization curve using the ZnDraw visualizer. Note, that the magnetic field is increasing during the video." + "Additionally, we will visualize the sampling of the magnetization curve using ZnDraw. Note, that the magnetic field is increasing during the video." ] }, { diff --git a/doc/tutorials/langevin_dynamics/langevin_dynamics.ipynb b/doc/tutorials/langevin_dynamics/langevin_dynamics.ipynb index 5f6471c40b..6c602341e1 100644 --- a/doc/tutorials/langevin_dynamics/langevin_dynamics.ipynb +++ b/doc/tutorials/langevin_dynamics/langevin_dynamics.ipynb @@ -240,7 +240,7 @@ "id": "933cea77-4bca-47a6-8a82-cc13d07ec673", "metadata": {}, "source": [ - "Now we can do the actual simulation where we simulate for different friction coefficients and sample our observables:" + "Now we can do the actual production run where we simulate for different friction coefficients and sample our observables:" ] }, { diff --git a/doc/tutorials/widom_insertion/widom_insertion.ipynb b/doc/tutorials/widom_insertion/widom_insertion.ipynb index 04e9140afd..cf06a7c507 100644 --- a/doc/tutorials/widom_insertion/widom_insertion.ipynb +++ b/doc/tutorials/widom_insertion/widom_insertion.ipynb @@ -195,6 +195,7 @@ "Now we are ready to set up the system. Because we will need to rescale the simulation box, we will initially only set up the short-range interactions.\n", "\n", "**Exercise:**\n", + "\n", "* Set up a system with box length $10\\,\\sigma$, integrator time step $\\Delta t=0.01 \\,\\tau$ ($\\tau$ is the Lennard-Jones timescale, i.e. equal to 1 in the employed unit system) and Verlet skin width of $\\delta = 0.4\\sigma$\n", "* Add a total of ``N_ION_PAIRS`` ion pairs at random positions to the simulation box\n", "* Add a WCA interaction (purely repulsive Lennard-Jones interaction) between the particles\n", @@ -244,10 +245,12 @@ "metadata": {}, "source": [ "**Exercise:**\n", + "\n", "* Implement a function ``system_setup(c_salt_SI)`` that takes the desired salt concentration ``c_salt_SI`` in SI-units (mol/l) as an argument and rescales the simulation box accordingly\n", "* Afterwards, the function should also add the P3M actor to account for electrostatic interactions\n", "\n", "**Hints:**\n", + "\n", "* Use the prefactor ``PREF`` defined above to convert the concentration from SI-units to the unit system employed in the simulation\n", "* An accuracy of $10^{-3}$ for the P$^3$M should be enough for this exercise" ] @@ -290,9 +293,11 @@ "metadata": {}, "source": [ "**Exercise:**\n", + "\n", "* Implement a function ``warmup()`` that removes potential overlaps between particles with 10000 steps of the steepest descent integrator and performs a warmup integration with 100 loops of 100 simulation steps\n", "\n", "**Hints:**\n", + "\n", "* keep in mind that after the overlaps have been removed, the integrator should be switched\n", " back to Velocity-Verlet with the Langevin thermostat (using ``GAMMA`` and ``KT``)\n", " in order to perform a warmup integration\n", @@ -340,10 +345,11 @@ "metadata": {}, "source": [ "**Exercise:**\n", - "* Add the functionality to perform Widom insertion moves. Make sure that you always insert an ion pair in order to conserve the system electroneutrality.\n", "\n", + "* Add the functionality to perform Widom insertion moves. Make sure that you always insert an ion pair in order to conserve the system electroneutrality.\n", "\n", "**Hints:**\n", + "\n", "* Refer to the documentation to find out how to set up Widom particle insertion ([Widom Insertion](https://espressomd.github.io/doc/reaction_methods.html#widom-insertion-for-homogeneous-systems))" ] }, @@ -377,6 +383,7 @@ "metadata": {}, "source": [ "**Exercise:**\n", + "\n", "* Implement a function ``calculate_excess_chemical_potential(n_md_steps, n_mc_steps, sample_size)``\n", " that performs ``n_mc_steps`` Widom particle insertions every ``n_md_steps`` MD steps in a loop\n", " that repeats ``sample_size`` times and returns the chemical potential and its error as a tuple via\n", @@ -471,6 +478,7 @@ "a logarithmically scaled x-axis.\n", "\n", "**Exercise:**\n", + "\n", "* Explain the observed behaviour qualitatively\n", "* How do you expect the excess chemical potential to behave in the limit $c_{\\mathrm{salt}}\\rightarrow 0$ mol/l?" ] @@ -524,9 +532,11 @@ "$B \\simeq 1.0$ and $C = 0.2$ in reduced units for NaCl in an aqueous solution at 25°C.\n", "\n", "**Exercise:**\n", + "\n", "* Compare your simulation results with the Debye-Hückel theory and the Davies equation by plotting all three curves in a single plot\n", "\n", "**Hint:**\n", + "\n", "* re-use the code from the previous figure\n", "* create a logarithmic concentration range from $10^{-4}$ to $10^{0}$ mol/l with ``np.logspace(-4, 0.0, num=500, base=10)`` to plot the analytical solutions" ]