diff --git a/.github/workflows/cffconvert.yml b/.github/workflows/cffconvert.yml index 807c7e63..47c15ee8 100644 --- a/.github/workflows/cffconvert.yml +++ b/.github/workflows/cffconvert.yml @@ -12,7 +12,7 @@ jobs: timeout-minutes: 2 steps: - name: Check out a copy of the repository - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Check whether the citation metadata from CITATION.cff is valid uses: citation-file-format/cffconvert-github-action@2.0.0 diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index b2229ac6..b8e01bad 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -7,10 +7,10 @@ jobs: timeout-minutes: 15 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Set up Python 3.8 - uses: actions/setup-python@v1 + uses: actions/setup-python@v4 with: python-version: 3.8 diff --git a/.github/workflows/ci_macos11_clang.yaml b/.github/workflows/ci_macos11_clang.yaml index ac139b58..e1168c93 100644 --- a/.github/workflows/ci_macos11_clang.yaml +++ b/.github/workflows/ci_macos11_clang.yaml @@ -16,7 +16,7 @@ jobs: timeout-minutes: 30 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Install dependencies run: | diff --git a/.github/workflows/ci_macos11_gcc_gfortran.yaml b/.github/workflows/ci_macos11_gcc_gfortran.yaml index f83e7242..6dd73eea 100644 --- a/.github/workflows/ci_macos11_gcc_gfortran.yaml +++ b/.github/workflows/ci_macos11_gcc_gfortran.yaml @@ -15,7 +15,7 @@ jobs: timeout-minutes: 30 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Install dependencies run: | diff --git a/.github/workflows/ci_macos12_clang.yaml b/.github/workflows/ci_macos12_clang.yaml index 603aa7bc..db96b555 100644 --- a/.github/workflows/ci_macos12_clang.yaml +++ b/.github/workflows/ci_macos12_clang.yaml @@ -16,7 +16,7 @@ jobs: timeout-minutes: 30 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Install dependencies run: | diff --git a/.github/workflows/ci_macos12_gcc_gfortran.yaml b/.github/workflows/ci_macos12_gcc_gfortran.yaml index a754fea2..92e2146a 100644 --- a/.github/workflows/ci_macos12_gcc_gfortran.yaml +++ b/.github/workflows/ci_macos12_gcc_gfortran.yaml @@ -15,7 +15,7 @@ jobs: timeout-minutes: 30 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Install dependencies run: | diff --git a/.github/workflows/ci_macos13_clang.yaml b/.github/workflows/ci_macos13_clang.yaml index 00f1abc8..8fbe27f1 100644 --- a/.github/workflows/ci_macos13_clang.yaml +++ b/.github/workflows/ci_macos13_clang.yaml @@ -16,7 +16,7 @@ jobs: timeout-minutes: 30 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Install dependencies run: | diff --git a/.github/workflows/ci_macos13_gcc_gfortran.yaml b/.github/workflows/ci_macos13_gcc_gfortran.yaml index 36302282..bcbf7c9a 100644 --- a/.github/workflows/ci_macos13_gcc_gfortran.yaml +++ b/.github/workflows/ci_macos13_gcc_gfortran.yaml @@ -15,7 +15,7 @@ jobs: timeout-minutes: 30 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Install dependencies run: | diff --git a/.github/workflows/ci_python_compatibility.yaml b/.github/workflows/ci_python_compatibility.yaml index 21013188..d21765ae 100644 --- a/.github/workflows/ci_python_compatibility.yaml +++ b/.github/workflows/ci_python_compatibility.yaml @@ -7,19 +7,19 @@ jobs: timeout-minutes: 5 strategy: matrix: - python-version: ["3.7", "3.8", "3.9", "3.10"] + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] steps: - name: Check out the source code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Cache Tox - uses: actions/cache@v1 + uses: actions/cache@v3 with: path: ${{ github.workspace }}/.tox key: python-compatibility-${{ matrix.python-version }}-tox diff --git a/.github/workflows/ci_python_macos.yaml b/.github/workflows/ci_python_macos.yaml index def3ff7c..9107472c 100644 --- a/.github/workflows/ci_python_macos.yaml +++ b/.github/workflows/ci_python_macos.yaml @@ -7,19 +7,19 @@ jobs: timeout-minutes: 10 strategy: matrix: - python-version: ["3.7", "3.8", "3.9", "3.10"] + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] steps: - name: Check out the source code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Cache Tox - uses: actions/cache@v1 + uses: actions/cache@v3 with: path: ${{ github.workspace }}/.tox key: python-macos-${{ matrix.python-version }}-tox diff --git a/.github/workflows/ci_ubuntu20.04.yaml b/.github/workflows/ci_ubuntu20.04.yaml index 03006993..2c89d255 100644 --- a/.github/workflows/ci_ubuntu20.04.yaml +++ b/.github/workflows/ci_ubuntu20.04.yaml @@ -14,7 +14,7 @@ jobs: timeout-minutes: 10 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Run tests on Ubuntu 20.04 run: docker run -v "${GITHUB_WORKSPACE}:/workspace" --env LC_ALL=C.UTF-8 --env LANG=C.UTF-8 --env DEBIAN_FRONTEND=noninteractive ubuntu:20.04 /bin/bash -c 'apt-get update && apt-get -y dist-upgrade && apt-get -y install build-essential cmake gfortran git valgrind libopenmpi-dev pkg-config python3 python3-pip python3-venv curl && apt-get -y remove libssl-dev zlib1g-dev && useradd -m -d /home/muscle3 muscle3 && su muscle3 -c -- "cp -r --preserve=mode /workspace /home/muscle3/muscle3" && su muscle3 -c -- "pip3 install --user -U \"pip<22\" setuptools wheel" && su muscle3 -c -- "pip3 install --user \"ymmsl>=0.13.0,<0.14\" qcg-pilotjob==0.13.1" && su muscle3 -c -- "cd /home/muscle3/muscle3 && make test_examples"' diff --git a/.github/workflows/ci_ubuntu20.04_clang.yaml b/.github/workflows/ci_ubuntu20.04_clang.yaml index 1050b45b..f6fde0ad 100644 --- a/.github/workflows/ci_ubuntu20.04_clang.yaml +++ b/.github/workflows/ci_ubuntu20.04_clang.yaml @@ -14,7 +14,7 @@ jobs: timeout-minutes: 10 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Run tests on Ubuntu 20.04 with Clang run: docker run -v "${GITHUB_WORKSPACE}:/workspace" --env LC_ALL=C.UTF-8 --env LANG=C.UTF-8 --env DEBIAN_FRONTEND=noninteractive ubuntu:20.04 /bin/bash -c 'apt-get update && apt-get -y dist-upgrade && apt-get -y install build-essential clang cmake gfortran git valgrind libopenmpi-dev pkg-config python3 python3-pip python3-venv curl && apt-get -y remove libssl-dev zlib1g-dev && useradd -m -d /home/muscle3 muscle3 && su muscle3 -c -- "cp -r --preserve=mode /workspace /home/muscle3/muscle3" && su muscle3 -c -- "pip3 install --user -U \"pip<22\" setuptools wheel" && su muscle3 -c -- "pip3 install --user \"ymmsl>=0.13.0,<0.14\" qcg-pilotjob==0.13.1" && su muscle3 -c -- "cd /home/muscle3/muscle3 && CXXFLAGS=-fPIE OMPI_CXX=clang++ CXX=clang++ make test_examples"' diff --git a/.github/workflows/ci_ubuntu20.04_intel.yaml b/.github/workflows/ci_ubuntu20.04_intel.yaml index 96f24e73..8243cbbd 100644 --- a/.github/workflows/ci_ubuntu20.04_intel.yaml +++ b/.github/workflows/ci_ubuntu20.04_intel.yaml @@ -17,7 +17,7 @@ jobs: timeout-minutes: 15 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Run tests on Ubuntu 20.04 with Clang - run: docker run -v "${GITHUB_WORKSPACE}:/workspace" --env LC_ALL=C.UTF-8 --env LANG=C.UTF-8 --env DEBIAN_FRONTEND=noninteractive ubuntu:20.04 /bin/bash -c 'apt-get update && apt-get -y install wget && wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB && mv GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB /etc/apt/trusted.gpg.d/intel-sw-products.asc && echo "deb https://apt.repos.intel.com/oneapi all main" >/etc/apt/sources.list.d/oneAPI.list && apt-get update && apt-get -y dist-upgrade && apt-get -y install build-essential cmake git valgrind pkg-config python3 python3-pip python3-venv curl intel-oneapi-compiler-dpcpp-cpp-2021.1.1 intel-oneapi-compiler-fortran-2021.1.1 intel-oneapi-mpi-devel-2021.1.1 && apt-get -y remove libssl-dev zlib1g-dev && useradd -m -d /home/muscle3 muscle3 && su muscle3 -c -- "cp -r --preserve=mode /workspace /home/muscle3/muscle3" && su muscle3 -c -- "pip3 install --user -U \"pip<22\" setuptools wheel" && su muscle3 -c -- "pip3 install --user \"ymmsl>=0.13.0,<0.14\" qcg-pilotjob==0.13.1" && su muscle3 -s /bin/bash -c -- "cd /home/muscle3/muscle3 && . /opt/intel/oneapi/setvars.sh && CXX=icpx MPICXX=\"mpiicpc -cxx=icpx\" FC=ifx MPIFC=\"mpiifort -fc=ifx\" make test_examples"' + run: docker run -v "${GITHUB_WORKSPACE}:/workspace" --env LC_ALL=C.UTF-8 --env LANG=C.UTF-8 --env DEBIAN_FRONTEND=noninteractive ubuntu:20.04 /bin/bash -c 'apt-get update && apt-get -y install wget && wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB && mv GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB /etc/apt/trusted.gpg.d/intel-sw-products.asc && echo "deb https://apt.repos.intel.com/oneapi all main" >/etc/apt/sources.list.d/oneAPI.list && apt-get update && apt-get -y dist-upgrade && apt-get -y install build-essential cmake git valgrind pkg-config python3 python3-pip python3-venv curl intel-oneapi-compiler-dpcpp-cpp-2021.2.0 intel-oneapi-compiler-fortran-2021.2.0 intel-oneapi-mpi-devel-2021.2.0 && apt-get -y remove libssl-dev zlib1g-dev && useradd -m -d /home/muscle3 muscle3 && su muscle3 -c -- "cp -r --preserve=mode /workspace /home/muscle3/muscle3" && su muscle3 -c -- "pip3 install --user -U \"pip<22\" setuptools wheel" && su muscle3 -c -- "pip3 install --user \"ymmsl>=0.13.0,<0.14\" qcg-pilotjob==0.13.1" && su muscle3 -s /bin/bash -c -- "cd /home/muscle3/muscle3 && . /opt/intel/oneapi/setvars.sh && CXX=icpx MPICXX=\"mpiicpc -cxx=icpx\" FC=ifx MPIFC=\"mpiifort -fc=ifx\" make test_examples"' diff --git a/.github/workflows/ci_ubuntu22.04.yaml b/.github/workflows/ci_ubuntu22.04.yaml index 639a8b96..7f65dc8e 100644 --- a/.github/workflows/ci_ubuntu22.04.yaml +++ b/.github/workflows/ci_ubuntu22.04.yaml @@ -14,7 +14,7 @@ jobs: timeout-minutes: 10 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Run tests on Ubuntu 22.04 run: docker run -v "${GITHUB_WORKSPACE}:/workspace" --env LC_ALL=C.UTF-8 --env LANG=C.UTF-8 --env DEBIAN_FRONTEND=noninteractive ubuntu:22.04 /bin/bash -c 'apt-get update && apt-get -y dist-upgrade && apt-get -y install build-essential cmake gfortran git valgrind libopenmpi-dev pkg-config python3 python3-pip python3-venv curl && apt-get -y remove libssl-dev && useradd -m -d /home/muscle3 muscle3 && su muscle3 -c -- "cp -r --preserve=mode /workspace /home/muscle3/muscle3" && su muscle3 -c -- "pip3 install -U pip setuptools wheel" && su muscle3 -c -- "pip3 install \"ymmsl>=0.13.0,<0.14\" qcg-pilotjob==0.13.1" && su muscle3 -c -- "cd /home/muscle3/muscle3 && make test_examples"' diff --git a/.github/workflows/ci_ubuntu22.04_clang.yaml b/.github/workflows/ci_ubuntu22.04_clang.yaml index c9f40582..343af0a6 100644 --- a/.github/workflows/ci_ubuntu22.04_clang.yaml +++ b/.github/workflows/ci_ubuntu22.04_clang.yaml @@ -14,7 +14,7 @@ jobs: timeout-minutes: 10 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Run tests on Ubuntu 22.04 with Clang run: docker run -v "${GITHUB_WORKSPACE}:/workspace" --env LC_ALL=C.UTF-8 --env LANG=C.UTF-8 --env DEBIAN_FRONTEND=noninteractive ubuntu:22.04 /bin/bash -c 'apt-get update && apt-get -y dist-upgrade && apt-get -y install build-essential clang cmake gfortran git valgrind libopenmpi-dev pkg-config python3 python3-pip python3-venv curl && apt-get -y remove libssl-dev && useradd -m -d /home/muscle3 muscle3 && su muscle3 -c -- "cp -r --preserve=mode /workspace /home/muscle3/muscle3" && su muscle3 -c -- "pip3 install -U pip setuptools wheel" && su muscle3 -c -- "pip3 install \"ymmsl>=0.13.0,<0.14\" qcg-pilotjob==0.13.1" && su muscle3 -c -- "cd /home/muscle3/muscle3 && CXXFLAGS=-fPIE OMPI_CXX=clang++ CXX=clang++ make test_examples"' diff --git a/.github/workflows/ci_ubuntu22.04_intel.yaml b/.github/workflows/ci_ubuntu22.04_intel.yaml index fecdf10f..2af4a7db 100644 --- a/.github/workflows/ci_ubuntu22.04_intel.yaml +++ b/.github/workflows/ci_ubuntu22.04_intel.yaml @@ -15,7 +15,7 @@ jobs: timeout-minutes: 10 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Run tests on Ubuntu 22.04 with the Intel compiler run: docker run -v "${GITHUB_WORKSPACE}:/workspace" --env LC_ALL=C.UTF-8 --env LANG=C.UTF-8 --env DEBIAN_FRONTEND=noninteractive ubuntu:22.04 /bin/bash -c 'apt-get update && apt-get -y install wget && wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB && mv GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB /etc/apt/trusted.gpg.d/intel-sw-products.asc && echo "deb https://apt.repos.intel.com/oneapi all main" >/etc/apt/sources.list.d/oneAPI.list && apt-get update && apt-get -y dist-upgrade && apt-get -y install build-essential cmake git valgrind pkg-config python3 python3-pip python3-venv curl intel-oneapi-compiler-dpcpp-cpp intel-oneapi-compiler-fortran intel-oneapi-mpi-devel && apt-get -y remove libssl-dev && useradd -m -d /home/muscle3 muscle3 && su muscle3 -c -- "cp -r --preserve=mode /workspace /home/muscle3/muscle3" && su muscle3 -c -- "pip3 install -U pip setuptools wheel" && su muscle3 -c -- "pip3 install \"ymmsl>=0.13.0,<0.14\" qcg-pilotjob==0.13.1" && su muscle3 -c -- "cd /home/muscle3/muscle3 && . /opt/intel/oneapi/setvars.sh && MPICXX=\"mpiicpc -cxx=icpx\" CXX=icpx MPIFC=\"mpiifort -fc=ifx\" FC=ifx make test_examples"' diff --git a/.github/workflows/ci_ubuntu23.04.yaml b/.github/workflows/ci_ubuntu23.04.yaml new file mode 100644 index 00000000..5113acf9 --- /dev/null +++ b/.github/workflows/ci_ubuntu23.04.yaml @@ -0,0 +1,20 @@ +# Run Continuous Integration for the latest Ubuntu release +# This mainly checks for issues/regressions in the native build +name: native_compatibility_ubuntu23.04 +on: + schedule: + - cron: '30 7 * * 6' + push: + branches: + - 'release-*' + - fix_native_compatibility_ci +jobs: + build: + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - uses: actions/checkout@v4 + + - name: Run tests on Ubuntu 23.04 + run: docker run -v "${GITHUB_WORKSPACE}:/workspace" --env LC_ALL=C.UTF-8 --env LANG=C.UTF-8 --env DEBIAN_FRONTEND=noninteractive ubuntu:23.04 /bin/bash -c 'apt-get update && apt-get -y dist-upgrade && apt-get -y install build-essential cmake gfortran git valgrind libopenmpi-dev pkg-config python3 python3-pip python3-venv curl && apt-get -y remove libssl-dev && useradd -m -d /home/muscle3 muscle3 && su muscle3 -c -- "cp -r --preserve=mode /workspace /home/muscle3/muscle3" && su muscle3 -c -- "cd /home/muscle3/muscle3 && make test_examples"' diff --git a/.github/workflows/ci_ubuntu23.04_clang.yaml b/.github/workflows/ci_ubuntu23.04_clang.yaml new file mode 100644 index 00000000..922edf5d --- /dev/null +++ b/.github/workflows/ci_ubuntu23.04_clang.yaml @@ -0,0 +1,20 @@ +# Run Continuous Integration for the latest Ubuntu release +# This mainly checks for issues/regressions in the native build +name: native_compatibility_ubuntu23.04_clang +on: + schedule: + - cron: '30 6 * * 6' + push: + branches: + - 'release-*' + - fix_native_compatibility_ci +jobs: + build: + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - uses: actions/checkout@v4 + + - name: Run tests on Ubuntu 23.04 with Clang + run: docker run -v "${GITHUB_WORKSPACE}:/workspace" --env LC_ALL=C.UTF-8 --env LANG=C.UTF-8 --env DEBIAN_FRONTEND=noninteractive ubuntu:23.04 /bin/bash -c 'apt-get update && apt-get -y dist-upgrade && apt-get -y install build-essential clang cmake gfortran git valgrind libopenmpi-dev pkg-config python3 python3-pip python3-venv curl && apt-get -y remove libssl-dev && useradd -m -d /home/muscle3 muscle3 && su muscle3 -c -- "cp -r --preserve=mode /workspace /home/muscle3/muscle3" && su muscle3 -c -- "cd /home/muscle3/muscle3 && CXXFLAGS=-fPIE OMPI_CXX=clang++ CXX=clang++ make test_examples"' diff --git a/.github/workflows/ci_ubuntu23.04_intel.yaml b/.github/workflows/ci_ubuntu23.04_intel.yaml new file mode 100644 index 00000000..8b6051c9 --- /dev/null +++ b/.github/workflows/ci_ubuntu23.04_intel.yaml @@ -0,0 +1,21 @@ +# Run Continuous Integration for the latest Ubuntu release +# This mainly checks for issues/regressions in the native build +name: native_compatibility_ubuntu23.04_intel +on: + schedule: + - cron: '0 7 * * 6' + push: + branches: + - 'release-*' + - fix_native_compatibility_ci + - issue-25-intel-compiler-support +jobs: + build: + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - uses: actions/checkout@v4 + + - name: Run tests on Ubuntu 23.04 with the Intel compiler + run: docker run -v "${GITHUB_WORKSPACE}:/workspace" --env LC_ALL=C.UTF-8 --env LANG=C.UTF-8 --env DEBIAN_FRONTEND=noninteractive ubuntu:23.04 /bin/bash -c 'apt-get update && apt-get -y install wget && wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB && mv GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB /etc/apt/trusted.gpg.d/intel-sw-products.asc && echo "deb https://apt.repos.intel.com/oneapi all main" >/etc/apt/sources.list.d/oneAPI.list && apt-get update && apt-get -y dist-upgrade && apt-get -y install build-essential cmake git valgrind pkg-config python3 python3-pip python3-venv curl intel-oneapi-compiler-dpcpp-cpp intel-oneapi-compiler-fortran intel-oneapi-mpi-devel && apt-get -y remove libssl-dev && useradd -m -d /home/muscle3 muscle3 && su muscle3 -c -- "cp -r --preserve=mode /workspace /home/muscle3/muscle3" && su muscle3 -c -- "cd /home/muscle3/muscle3 && . /opt/intel/oneapi/setvars.sh && MPICXX=\"mpiicpc -cxx=icpx\" CXX=icpx MPIFC=\"mpiifort -fc=ifx\" FC=ifx make test_examples"' diff --git a/CHANGELOG.rst b/CHANGELOG.rst index cc2b8393..a00c6907 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,6 +5,34 @@ Change Log All notable changes to this project will be documented in this file. This project adheres to `Semantic Versioning `_. +0.7.1 +***** + +Added +----- + +* Support for Python 3.11 (working already, now official) +* Enabled type checking support for the libmuscle Python API + +Improved +-------- + +* Easier crash debugging due to improved root cause detection +* Fixed crash in profiling timeline plot +* Better performance of timeline plot +* Better visual quality of timeline plot +* Improved profiling of shutdown process +* Fixed crash in profiler for large simulations +* Fixed several (harmless) compiler warnings +* Small documentation rendering improvements + + +Thanks +------ + +* David for reporting many of these and submitting a fix too! + + 0.7.0 ***** diff --git a/CITATION.cff b/CITATION.cff index 1eb23089..3b3ef8f7 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -12,6 +12,9 @@ authors: family-names: Veen given-names: Lourens orcid: "https://orcid.org/0000-0002-6311-1168" + - + family-names: Sebregts + given-names: Maarten keywords: - multiscale diff --git a/MANIFEST.in b/MANIFEST.in index d52a8f9e..27a384aa 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -2,3 +2,5 @@ include LICENSE include README.rst include NOTICE include VERSION +include muscle3/py.typed +include libmuscle/python/libmuscle/py.typed diff --git a/README.rst b/README.rst index 5b492f01..2b0f3e2c 100644 --- a/README.rst +++ b/README.rst @@ -1,8 +1,8 @@ .. image:: https://github.com/multiscale/muscle3/raw/develop/docs/source/muscle3_logo_readme.png :alt: MUSCLE3 -.. image:: https://readthedocs.org/projects/muscle3/badge/?version=latest - :target: https://muscle3.readthedocs.io/en/develop/?badge=latest +.. image:: https://readthedocs.org/projects/muscle3/badge/?version=master + :target: https://muscle3.readthedocs.io/en/develop/?badge=master :alt: Documentation Build Status .. image:: https://github.com/multiscale/muscle3/workflows/continuous_integration/badge.svg?branch=master @@ -18,12 +18,22 @@ | MUSCLE3 is the third incarnation of the MUSCLE Multiscale Coupling Library and -Environment. It is developed by the e-MUSC project of the University of -Amsterdam and the Netherlands eScience Center. +Environment. With MUSCLE3, you can connect multiple simulation models together into +a multiscale simulation. Browse to `the MUSCLE3 documentation`_ to get started. +Collaboration +============= + +For academic collaboration, please contact `prof. Alfons Hoekstra (UvA CSL) `_ +and/or `Lourens Veen (NLeSC) `_. + +Commercial support for MUSCLE3 is provided by +`Ignition Computing `_. + + Legal ===== diff --git a/VERSION b/VERSION index faef31a4..39e898a4 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.7.0 +0.7.1 diff --git a/docs/source/conf.py b/docs/source/conf.py index 36445d26..cd2c6483 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -136,6 +136,13 @@ def patch_installation_version(): import subprocess subprocess.call('cd ../.. ; doxygen', shell=True) +# -- Remove impl namespaces for exported symbols -- +for p in pathlib.Path('..', 'doxygen', 'xml').iterdir(): + contents = p.read_text() + contents = contents.replace('::_MUSCLE_IMPL_NS', '') + contents = contents.replace('::impl', '') + p.write_text(contents) + # -- Run apidoc plug-in manually, as readthedocs doesn't support it ------- # See https://github.com/rtfd/readthedocs.org/issues/1139 def run_apidoc(_): diff --git a/docs/source/contributor_logos.png b/docs/source/contributor_logos.png new file mode 100644 index 00000000..57e2fe67 Binary files /dev/null and b/docs/source/contributor_logos.png differ diff --git a/docs/source/cpp_api.rst b/docs/source/cpp_api.rst index ba6e5b12..8141e21f 100644 --- a/docs/source/cpp_api.rst +++ b/docs/source/cpp_api.rst @@ -3,39 +3,75 @@ API Documentation for C++ This page provides full documentation for the C++ API of MUSCLE3. -Note that in a few places, classes are referred to as -``libmuscle::_MUSCLE_IMPL_NS::`` or ``ymmsl::impl::``. This -is a bug in the documentation rendering process, the class is actually -available as ``libmuscle::`` and should be used as such. - - Namespace libmuscle ------------------- -.. doxygenclass:: libmuscle::_MUSCLE_IMPL_NS::Data -.. doxygenclass:: libmuscle::_MUSCLE_IMPL_NS::DataConstRef -.. doxygenclass:: libmuscle::_MUSCLE_IMPL_NS::Instance -.. doxygenenum:: libmuscle::_MUSCLE_IMPL_NS::InstanceFlags -.. doxygenclass:: libmuscle::_MUSCLE_IMPL_NS::Message -.. doxygentypedef:: libmuscle::_MUSCLE_IMPL_NS::PortsDescription +Data +```` +.. doxygenclass:: libmuscle::Data + +DataConstRef +```````````` +.. doxygenclass:: libmuscle::DataConstRef + +Instance +```````` +.. doxygenclass:: libmuscle::Instance + +InstanceFlags +````````````` +.. doxygenenum:: libmuscle::InstanceFlags + +Message +``````` +.. doxygenclass:: libmuscle::Message + +PortsDescription +```````````````` +.. doxygentypedef:: libmuscle::PortsDescription Namespace ymmsl --------------- -.. doxygenfunction:: ymmsl::impl::allows_sending -.. doxygenfunction:: ymmsl::impl::allows_receiving - -.. doxygenclass:: ymmsl::impl::Conduit -.. doxygenclass:: ymmsl::impl::Identifier -.. doxygenfunction:: ymmsl::impl::operator<<(std::ostream&, Identifier const&) -.. doxygenenum:: ymmsl::impl::Operator -.. doxygenstruct:: ymmsl::impl::Port -.. doxygenclass:: ymmsl::impl::Reference -.. doxygenfunction:: ymmsl::impl::operator<<(std::ostream&, Reference const&) -.. doxygenclass:: ymmsl::impl::ReferencePart -.. doxygenclass:: ymmsl::impl::Settings -.. doxygenfunction:: ymmsl::impl::operator<<(std::ostream&, ymmsl::impl::Settings const&) -.. doxygenclass:: ymmsl::impl::SettingValue -.. doxygenfunction:: ymmsl::impl::operator<<(std::ostream&, ymmsl::impl::SettingValue const&) +allows_sending +`````````````` +.. doxygenfunction:: ymmsl::allows_sending + +allows_receiving +```````````````` +.. doxygenfunction:: ymmsl::allows_receiving + +Conduit +``````` +.. doxygenclass:: ymmsl::Conduit + +Identifier +`````````` +.. doxygenclass:: ymmsl::Identifier +.. doxygenfunction:: ymmsl::operator<<(std::ostream&, Identifier const&) + +Operator +```````` +.. doxygenenum:: ymmsl::Operator + +Port +```` +.. doxygenstruct:: ymmsl::Port + +Reference +````````` +.. doxygenclass:: ymmsl::Reference +.. doxygenfunction:: ymmsl::operator<<(std::ostream&, Reference const&) +.. doxygenclass:: ymmsl::ReferencePart + +Settings +```````` +.. doxygenclass:: ymmsl::Settings +.. doxygenfunction:: ymmsl::operator<<(std::ostream&, ymmsl::Settings const&) + +SettingValue +```````````` +.. doxygenclass:: ymmsl::SettingValue +.. doxygenfunction:: ymmsl::operator<<(std::ostream&, ymmsl::SettingValue const&) diff --git a/docs/source/examples/.gitignore b/docs/source/examples/.gitignore index 5d3ee3e1..18937208 100644 --- a/docs/source/examples/.gitignore +++ b/docs/source/examples/.gitignore @@ -1,3 +1,4 @@ rd_implementations.ymmsl benchmark_implementations.ymmsl +dispatch_implementations.ymmsl run_* diff --git a/docs/source/examples/Makefile b/docs/source/examples/Makefile index d308b42b..1f948a6e 100644 --- a/docs/source/examples/Makefile +++ b/docs/source/examples/Makefile @@ -69,7 +69,7 @@ endif .PHONY: base -base: python rd_implementations.ymmsl benchmark_implementations.ymmsl +base: python rd_implementations.ymmsl benchmark_implementations.ymmsl dispatch_implementations.ymmsl @@ -106,6 +106,7 @@ clean: $(MAKE) -C python clean rm -f rd_implementations.ymmsl rm -f benchmark_implementations.ymmsl + rm -f dispatch_implementations.ymmsl rm -rf run_*/ @@ -129,6 +130,7 @@ test_cpp: base cpp $$(ls $$(ls -d run_checkpointing_reaction_diffusion_cpp* | tail -n1)/snapshots/*.ymmsl | head -n1) . python/build/venv/bin/activate && muscle_manager --start-all rd_implementations.ymmsl rd_python_cpp.ymmsl rd_settings.ymmsl . python/build/venv/bin/activate && muscle_manager --start-all rd_implementations.ymmsl rdmc_cpp.ymmsl rdmc_settings.ymmsl + . python/build/venv/bin/activate && muscle_manager --start-all dispatch_implementations.ymmsl dispatch_cpp.ymmsl .PHONY: test_cpp_mpi test_cpp_mpi: base cpp_mpi diff --git a/docs/source/examples/cpp/buffer.cpp b/docs/source/examples/cpp/buffer.cpp new file mode 100644 index 00000000..8cd2d4da --- /dev/null +++ b/docs/source/examples/cpp/buffer.cpp @@ -0,0 +1,31 @@ +#include "libmuscle/libmuscle.hpp" +#include "ymmsl/ymmsl.hpp" + +#include "unistd.h" + + +using libmuscle::Data; +using libmuscle::Instance; +using libmuscle::Message; +using ymmsl::Operator; + + +int main(int argc, char * argv[]) { + Instance instance(argc, argv, { + {Operator::F_INIT, {"in"}}, + {Operator::O_F, {"out"}}}); + + while (instance.reuse_instance()) { + // F_INIT + Message msg = instance.receive("in", Message(0.0, Data("Testing"))); + + // S + usleep(250000); + + // O_F + instance.send("out", msg); + } + + return 0; +} + diff --git a/docs/source/examples/cpp/build/Makefile b/docs/source/examples/cpp/build/Makefile index e6fd6332..a52fd13f 100644 --- a/docs/source/examples/cpp/build/Makefile +++ b/docs/source/examples/cpp/build/Makefile @@ -5,7 +5,9 @@ MPI_CXXFLAGS := -std=c++14 -g $(shell pkg-config --cflags libmuscle_mpi ymmsl) MPI_LDFLAGS := $(shell pkg-config --libs libmuscle_mpi ymmsl) -binaries := reaction diffusion mc_driver load_balancer checkpointing_reaction checkpointing_diffusion benchmark +binaries := reaction diffusion mc_driver load_balancer +binaries += checkpointing_reaction checkpointing_diffusion +binaries += benchmark buffer mpi_binaries := reaction_mpi diff --git a/docs/source/examples/dispatch_cpp.ymmsl b/docs/source/examples/dispatch_cpp.ymmsl new file mode 100644 index 00000000..304de8c8 --- /dev/null +++ b/docs/source/examples/dispatch_cpp.ymmsl @@ -0,0 +1,34 @@ +ymmsl_version: v0.1 + +model: + name: dispatch_cpp + + components: + component1: + implementation: buffer_cpp + ports: + o_f: out + + component2: + implementation: buffer_cpp + ports: + f_init: in + o_f: out + + component3: + implementation: buffer_cpp + ports: + f_init: in + + conduits: + component1.out: component2.in + component2.out: component3.in + +resources: + component1: + threads: 1 + component2: + threads: 1 + component3: + threads: 1 + diff --git a/docs/source/examples/dispatch_implementations.ymmsl.in b/docs/source/examples/dispatch_implementations.ymmsl.in new file mode 100644 index 00000000..553db472 --- /dev/null +++ b/docs/source/examples/dispatch_implementations.ymmsl.in @@ -0,0 +1,8 @@ +ymmsl_version: v0.1 + +implementations: + buffer_cpp: + env: + +LD_LIBRARY_PATH: :MUSCLE3_HOME/lib + executable: MUSCLE3_EXAMPLES/cpp/build/buffer + diff --git a/docs/source/examples/python/Makefile b/docs/source/examples/python/Makefile index 48f27607..55f3a218 100644 --- a/docs/source/examples/python/Makefile +++ b/docs/source/examples/python/Makefile @@ -7,6 +7,7 @@ test: . build/venv/bin/activate && DONTPLOT=1 python3 reaction_diffusion.py . build/venv/bin/activate && DONTPLOT=1 python3 reaction_diffusion_qmc.py . build/venv/bin/activate && DONTPLOT=1 python3 interact_coupling.py + . build/venv/bin/activate && DONTPLOT=1 python3 dispatch.py .PHONY: clean clean: diff --git a/docs/source/examples/python/dispatch.py b/docs/source/examples/python/dispatch.py new file mode 100644 index 00000000..ea74e245 --- /dev/null +++ b/docs/source/examples/python/dispatch.py @@ -0,0 +1,54 @@ +import logging +import time + +from libmuscle import Instance, Message +from libmuscle.runner import run_simulation +from ymmsl import ( + Component, Conduit, Configuration, Model, Operator, Ports, Settings) + + +def buffer() -> None: + """A component that passes on its input to its output. + + If the input is not connected, it'll generate a message. + """ + instance = Instance({ + Operator.F_INIT: ['in'], + Operator.O_F: ['out']}) + + while instance.reuse_instance(): + # F_INIT + msg = instance.receive('in', default=Message(0.0, data='Testing')) + + # S + time.sleep(0.25) + + # O_F + instance.send('out', msg) + + +if __name__ == '__main__': + logging.basicConfig() + logging.getLogger().setLevel(logging.INFO) + + components = [ + Component( + 'component1', 'buffer', None, + Ports(o_f=['out'])), + Component( + 'component2', 'buffer', None, + Ports(f_init=['in'], o_f=['out'])), + Component( + 'component3', 'buffer', None, + Ports(f_init=['in']))] + + conduits = [ + Conduit('component1.out', 'component2.in'), + Conduit('component2.out', 'component3.in')] + + model = Model('dispatch', components, conduits) + settings = Settings({}) + configuration = Configuration(model, settings) + + implementations = {'buffer': buffer} + run_simulation(configuration, implementations) diff --git a/docs/source/index.rst b/docs/source/index.rst index 3eb13295..1b475e14 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -14,6 +14,44 @@ multiscale simulation. Simulation models can be as simple as a single Python file, or as complex as a combination of multiple separate simulation codes written in C++ or Fortran, and running on an HPC machine. +Contributors +------------ + +.. image:: contributor_logos.png + :alt: Logos of the University of Amsterdam Computational Science Lab, Netherlands eScience Center, and Ignition Computing + +University of Amsterdam Computational Science Lab + Original concept, Multiscale Modelling and Simulation Framework (MMSF) coupling + theory, original MUSCLE, MUSCLE2. + +Netherlands eScience Center + MUSCLE3 implementation, teaching materials. + +Ignition Computing + Checkpointing implementation and additional development. + + +This work was supported by the Netherlands eScience Center and NWO under grant number +27015G01. + +We would like to acknowledge the contribution by The ITER Organization of results of +work carried out within the framework of ITER contract IO/22/CT/4300002587. The views +and opinions expressed herein do not necessarily reflect those of the ITER Organization. + +Academic collaboration +---------------------- + +Please contact `prof. Alfons Hoekstra (UvA CSL) `_ +and/or `Lourens Veen (NLeSC) `_. + +Commercial support +------------------ + +Please contact `Ignition Computing `_. + +Citing MUSCLE3 +-------------- + If you use MUSCLE3 for scientific work, please `cite the version of the MUSCLE3 software you used `_ and the following paper: diff --git a/docs/source/profiling.rst b/docs/source/profiling.rst index 6e7b678a..93bc35f3 100644 --- a/docs/source/profiling.rst +++ b/docs/source/profiling.rst @@ -62,11 +62,11 @@ are described here. Plotting statistics from the command line ----------------------------------------- -The most simplest way of examining performance data gathered by MUSCLE3 is -through the ``muscle3 profile`` command from the shell. If you have done a run, -then you should have a run directory containing a ``performance.sqlite`` file. -If you have MUSCLE3 available in your environment (only the Python installation -is needed) then you have the ``muscle3 profile`` command available to show +The simplest way of examining performance data gathered by MUSCLE3 is through +the ``muscle3 profile`` command from the shell. If you have done a run, then +you should have a run directory containing a ``performance.sqlite`` file. If +you have MUSCLE3 available in your environment (only the Python installation is +needed) then you have the ``muscle3 profile`` command available to show per-instance and per-core statistics as well as a timeline of events. Per-instance time spent @@ -80,10 +80,10 @@ Per-instance time spent muscle3 profile --instances /path/to/performance.sqlite -With ``--instances``, the plot will show for each instance how much time it -spent in total on computing, communicating and waiting. This plot gives an idea -of where most of the computing is done, and which components you need to -optimise to get an answer sooner. +With ``--instances`` or ``-i``, the plot will show for each instance how much +time it spent in total on computing, communicating and waiting. This plot gives +an idea of where most of the computing is done, and which components you need +to optimise to get an answer sooner. In many models, you will find that there's one component that takes up most of the compute time, and others that spend most of their time waiting and then do a @@ -128,9 +128,10 @@ Resource usage If you are running on a large computer, then it may be interesting to see how you are using the resources allocated to you. The command ``muscle3 profile --resources performance.sqlite`` will produce a plot showing for each core how -much time it spent running the various instances. This gives an idea of which -component used the most resources, and tells you what you should optimise if -you're trying to reduce the number of core hours spent. +much time it spent running the various instances (``-r`` for short also works). +This gives an idea of which component used the most resources, and tells you +what you should optimise if you're trying to reduce the number of core hours +spent. The total time shown per core doesn't necessarily match the total run time, as cores may be idle during the simulation. This can happen for example if @@ -150,14 +151,11 @@ Event timeline muscle3 profile --timeline /path/to/performance.sqlite -If you really want to get into the details, ``--timeline`` shows a timeline of -profiling events. This visualises the raw data from the database, showing -exactly when each instance sent and received data, when it was waiting for -input, and when it computed. The meaning of the event types shown is as follows: - -RUNNING - The instance was running, meaning that it was actively computing or doing - non-MUSCLE3 communication. +If you really want to get into the details, ``--timeline`` or ``-t`` shows a +timeline of profiling events. This visualises the raw data from the database, +showing exactly when each instance sent and received data, when it was waiting +for input, and when it computed. The meaning of the event types shown is as +follows: REGISTER The instance contacted the manager to share its location on the network, so @@ -167,6 +165,22 @@ CONNECT The instance asked the manager who to communicate with, and set up connections to these other instances. +RUNNING + The instance was running, meaning that it was actively computing or doing + non-MUSCLE3 communication. + +SHUTDOWN_WAIT + The instance was waiting to receive the information it needed to determine + that it should shut down, rather than run the reuse loop again. + +DISCONNECT_WAIT + The instance was waiting for the instances it communicates with to + acknowledge that it would be shutting down. This may take a while if those + other instances are busy doing calculations or talking to someone else. + +SHUTDOWN + The instance was shutting down its MUSCLE3 communications. + DEREGISTER The instance contacted the manager to say that it was ending it run. @@ -204,7 +218,7 @@ Analysis with Python If you want to get quantitative data, or just want to make your own plots, then you can use MUSCLE3's Python API. It contains several useful functions for extracting information and statistics from a profiling database. They are -collected in the :py:class:`libmuscle.ProfileDatabasa` class. +collected in the :py:class:`libmuscle.ProfileDatabase` class. Per-instance statistics ``````````````````````` @@ -420,12 +434,21 @@ Database format version +----------------+-------------------+ This table stores a single row containing the version of the database format -used in this file. The current version is 1.0. This uses semantic versioning, so -incompatible future formats will have a higher major version. Compatible -changes, including addition of columns to existing tables, will increment the -minor version number. Note that this means that ``SELECT * FROM ...`` may give a -different result for different minor versions. If that's not acceptable, specify -the columns you want explicitly. +used in this file. This uses semantic versioning, so incompatible future formats +will have a higher major version. Compatible changes, including addition of +columns to existing tables, will increment the minor version number. Note that +this means that ``SELECT * FROM ...`` may give a different result for different +minor versions. To make your code compatible with future minor versions, it's a +good idea to specify the columns you want explicitly. + +Here is a brief version history: + +Version 1.0 (written by MUSCLE3 0.7.0) + Initial release. + +Version 1.1 (written by MUSCLE3 0.7.1) + Added new ``SHUTDOWN_WAIT``, ``DISCONNECT_WAIT`` and ``SHUTDOWN`` events. + No changes to the tables. Formatted events ```````````````` diff --git a/libmuscle/cpp/src/libmuscle/bindings/libmuscle_fortran_c.cpp b/libmuscle/cpp/src/libmuscle/bindings/libmuscle_fortran_c.cpp index fdfd1dbe..f9d7d847 100644 --- a/libmuscle/cpp/src/libmuscle/bindings/libmuscle_fortran_c.cpp +++ b/libmuscle/cpp/src/libmuscle/bindings/libmuscle_fortran_c.cpp @@ -83,7 +83,7 @@ std::intptr_t LIBMUSCLE_DataConstRef_create_copy_(std::intptr_t value) { std::intptr_t LIBMUSCLE_DataConstRef_create_grid_logical_a_(bool * data_array, std::size_t * data_array_shape, std::size_t data_array_ndims) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); DataConstRef * result = new DataConstRef(DataConstRef::grid(data_array_p, data_array_shape_v, {}, libmuscle::StorageOrder::first_adjacent)); return reinterpret_cast(result); } @@ -91,7 +91,7 @@ std::intptr_t LIBMUSCLE_DataConstRef_create_grid_logical_a_(bool * data_array, s std::intptr_t LIBMUSCLE_DataConstRef_create_grid_int4_a_(int32_t * data_array, std::size_t * data_array_shape, std::size_t data_array_ndims) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); DataConstRef * result = new DataConstRef(DataConstRef::grid(data_array_p, data_array_shape_v, {}, libmuscle::StorageOrder::first_adjacent)); return reinterpret_cast(result); } @@ -99,7 +99,7 @@ std::intptr_t LIBMUSCLE_DataConstRef_create_grid_int4_a_(int32_t * data_array, s std::intptr_t LIBMUSCLE_DataConstRef_create_grid_int8_a_(int64_t * data_array, std::size_t * data_array_shape, std::size_t data_array_ndims) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); DataConstRef * result = new DataConstRef(DataConstRef::grid(data_array_p, data_array_shape_v, {}, libmuscle::StorageOrder::first_adjacent)); return reinterpret_cast(result); } @@ -107,7 +107,7 @@ std::intptr_t LIBMUSCLE_DataConstRef_create_grid_int8_a_(int64_t * data_array, s std::intptr_t LIBMUSCLE_DataConstRef_create_grid_real4_a_(float * data_array, std::size_t * data_array_shape, std::size_t data_array_ndims) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); DataConstRef * result = new DataConstRef(DataConstRef::grid(data_array_p, data_array_shape_v, {}, libmuscle::StorageOrder::first_adjacent)); return reinterpret_cast(result); } @@ -115,7 +115,7 @@ std::intptr_t LIBMUSCLE_DataConstRef_create_grid_real4_a_(float * data_array, st std::intptr_t LIBMUSCLE_DataConstRef_create_grid_real8_a_(double * data_array, std::size_t * data_array_shape, std::size_t data_array_ndims) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); DataConstRef * result = new DataConstRef(DataConstRef::grid(data_array_p, data_array_shape_v, {}, libmuscle::StorageOrder::first_adjacent)); return reinterpret_cast(result); } @@ -134,7 +134,7 @@ std::intptr_t LIBMUSCLE_DataConstRef_create_grid_logical_n_( ) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); std::vector names_v; names_v.emplace_back(index_name_1, index_name_1_size); @@ -171,7 +171,7 @@ std::intptr_t LIBMUSCLE_DataConstRef_create_grid_int4_n_( ) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); std::vector names_v; names_v.emplace_back(index_name_1, index_name_1_size); @@ -208,7 +208,7 @@ std::intptr_t LIBMUSCLE_DataConstRef_create_grid_int8_n_( ) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); std::vector names_v; names_v.emplace_back(index_name_1, index_name_1_size); @@ -245,7 +245,7 @@ std::intptr_t LIBMUSCLE_DataConstRef_create_grid_real4_n_( ) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); std::vector names_v; names_v.emplace_back(index_name_1, index_name_1_size); @@ -282,7 +282,7 @@ std::intptr_t LIBMUSCLE_DataConstRef_create_grid_real8_n_( ) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); std::vector names_v; names_v.emplace_back(index_name_1, index_name_1_size); @@ -1333,7 +1333,7 @@ std::intptr_t LIBMUSCLE_Data_create_copy_(std::intptr_t value) { std::intptr_t LIBMUSCLE_Data_create_grid_logical_a_(bool * data_array, std::size_t * data_array_shape, std::size_t data_array_ndims) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); Data * result = new Data(Data::grid(data_array_p, data_array_shape_v, {}, libmuscle::StorageOrder::first_adjacent)); return reinterpret_cast(result); } @@ -1341,7 +1341,7 @@ std::intptr_t LIBMUSCLE_Data_create_grid_logical_a_(bool * data_array, std::size std::intptr_t LIBMUSCLE_Data_create_grid_int4_a_(int32_t * data_array, std::size_t * data_array_shape, std::size_t data_array_ndims) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); Data * result = new Data(Data::grid(data_array_p, data_array_shape_v, {}, libmuscle::StorageOrder::first_adjacent)); return reinterpret_cast(result); } @@ -1349,7 +1349,7 @@ std::intptr_t LIBMUSCLE_Data_create_grid_int4_a_(int32_t * data_array, std::size std::intptr_t LIBMUSCLE_Data_create_grid_int8_a_(int64_t * data_array, std::size_t * data_array_shape, std::size_t data_array_ndims) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); Data * result = new Data(Data::grid(data_array_p, data_array_shape_v, {}, libmuscle::StorageOrder::first_adjacent)); return reinterpret_cast(result); } @@ -1357,7 +1357,7 @@ std::intptr_t LIBMUSCLE_Data_create_grid_int8_a_(int64_t * data_array, std::size std::intptr_t LIBMUSCLE_Data_create_grid_real4_a_(float * data_array, std::size_t * data_array_shape, std::size_t data_array_ndims) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); Data * result = new Data(Data::grid(data_array_p, data_array_shape_v, {}, libmuscle::StorageOrder::first_adjacent)); return reinterpret_cast(result); } @@ -1365,7 +1365,7 @@ std::intptr_t LIBMUSCLE_Data_create_grid_real4_a_(float * data_array, std::size_ std::intptr_t LIBMUSCLE_Data_create_grid_real8_a_(double * data_array, std::size_t * data_array_shape, std::size_t data_array_ndims) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); Data * result = new Data(Data::grid(data_array_p, data_array_shape_v, {}, libmuscle::StorageOrder::first_adjacent)); return reinterpret_cast(result); } @@ -1384,7 +1384,7 @@ std::intptr_t LIBMUSCLE_Data_create_grid_logical_n_( ) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); std::vector names_v; names_v.emplace_back(index_name_1, index_name_1_size); @@ -1421,7 +1421,7 @@ std::intptr_t LIBMUSCLE_Data_create_grid_int4_n_( ) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); std::vector names_v; names_v.emplace_back(index_name_1, index_name_1_size); @@ -1458,7 +1458,7 @@ std::intptr_t LIBMUSCLE_Data_create_grid_int8_n_( ) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); std::vector names_v; names_v.emplace_back(index_name_1, index_name_1_size); @@ -1495,7 +1495,7 @@ std::intptr_t LIBMUSCLE_Data_create_grid_real4_n_( ) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); std::vector names_v; names_v.emplace_back(index_name_1, index_name_1_size); @@ -1532,7 +1532,7 @@ std::intptr_t LIBMUSCLE_Data_create_grid_real8_n_( ) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); std::vector names_v; names_v.emplace_back(index_name_1, index_name_1_size); diff --git a/libmuscle/cpp/src/libmuscle/bindings/libmuscle_mpi_fortran_c.cpp b/libmuscle/cpp/src/libmuscle/bindings/libmuscle_mpi_fortran_c.cpp index 7fde7582..7d5f01e8 100644 --- a/libmuscle/cpp/src/libmuscle/bindings/libmuscle_mpi_fortran_c.cpp +++ b/libmuscle/cpp/src/libmuscle/bindings/libmuscle_mpi_fortran_c.cpp @@ -83,7 +83,7 @@ std::intptr_t LIBMUSCLE_MPI_DataConstRef_create_copy_(std::intptr_t value) { std::intptr_t LIBMUSCLE_MPI_DataConstRef_create_grid_logical_a_(bool * data_array, std::size_t * data_array_shape, std::size_t data_array_ndims) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); DataConstRef * result = new DataConstRef(DataConstRef::grid(data_array_p, data_array_shape_v, {}, libmuscle::StorageOrder::first_adjacent)); return reinterpret_cast(result); } @@ -91,7 +91,7 @@ std::intptr_t LIBMUSCLE_MPI_DataConstRef_create_grid_logical_a_(bool * data_arra std::intptr_t LIBMUSCLE_MPI_DataConstRef_create_grid_int4_a_(int32_t * data_array, std::size_t * data_array_shape, std::size_t data_array_ndims) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); DataConstRef * result = new DataConstRef(DataConstRef::grid(data_array_p, data_array_shape_v, {}, libmuscle::StorageOrder::first_adjacent)); return reinterpret_cast(result); } @@ -99,7 +99,7 @@ std::intptr_t LIBMUSCLE_MPI_DataConstRef_create_grid_int4_a_(int32_t * data_arra std::intptr_t LIBMUSCLE_MPI_DataConstRef_create_grid_int8_a_(int64_t * data_array, std::size_t * data_array_shape, std::size_t data_array_ndims) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); DataConstRef * result = new DataConstRef(DataConstRef::grid(data_array_p, data_array_shape_v, {}, libmuscle::StorageOrder::first_adjacent)); return reinterpret_cast(result); } @@ -107,7 +107,7 @@ std::intptr_t LIBMUSCLE_MPI_DataConstRef_create_grid_int8_a_(int64_t * data_arra std::intptr_t LIBMUSCLE_MPI_DataConstRef_create_grid_real4_a_(float * data_array, std::size_t * data_array_shape, std::size_t data_array_ndims) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); DataConstRef * result = new DataConstRef(DataConstRef::grid(data_array_p, data_array_shape_v, {}, libmuscle::StorageOrder::first_adjacent)); return reinterpret_cast(result); } @@ -115,7 +115,7 @@ std::intptr_t LIBMUSCLE_MPI_DataConstRef_create_grid_real4_a_(float * data_array std::intptr_t LIBMUSCLE_MPI_DataConstRef_create_grid_real8_a_(double * data_array, std::size_t * data_array_shape, std::size_t data_array_ndims) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); DataConstRef * result = new DataConstRef(DataConstRef::grid(data_array_p, data_array_shape_v, {}, libmuscle::StorageOrder::first_adjacent)); return reinterpret_cast(result); } @@ -134,7 +134,7 @@ std::intptr_t LIBMUSCLE_MPI_DataConstRef_create_grid_logical_n_( ) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); std::vector names_v; names_v.emplace_back(index_name_1, index_name_1_size); @@ -171,7 +171,7 @@ std::intptr_t LIBMUSCLE_MPI_DataConstRef_create_grid_int4_n_( ) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); std::vector names_v; names_v.emplace_back(index_name_1, index_name_1_size); @@ -208,7 +208,7 @@ std::intptr_t LIBMUSCLE_MPI_DataConstRef_create_grid_int8_n_( ) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); std::vector names_v; names_v.emplace_back(index_name_1, index_name_1_size); @@ -245,7 +245,7 @@ std::intptr_t LIBMUSCLE_MPI_DataConstRef_create_grid_real4_n_( ) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); std::vector names_v; names_v.emplace_back(index_name_1, index_name_1_size); @@ -282,7 +282,7 @@ std::intptr_t LIBMUSCLE_MPI_DataConstRef_create_grid_real8_n_( ) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); std::vector names_v; names_v.emplace_back(index_name_1, index_name_1_size); @@ -1333,7 +1333,7 @@ std::intptr_t LIBMUSCLE_MPI_Data_create_copy_(std::intptr_t value) { std::intptr_t LIBMUSCLE_MPI_Data_create_grid_logical_a_(bool * data_array, std::size_t * data_array_shape, std::size_t data_array_ndims) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); Data * result = new Data(Data::grid(data_array_p, data_array_shape_v, {}, libmuscle::StorageOrder::first_adjacent)); return reinterpret_cast(result); } @@ -1341,7 +1341,7 @@ std::intptr_t LIBMUSCLE_MPI_Data_create_grid_logical_a_(bool * data_array, std:: std::intptr_t LIBMUSCLE_MPI_Data_create_grid_int4_a_(int32_t * data_array, std::size_t * data_array_shape, std::size_t data_array_ndims) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); Data * result = new Data(Data::grid(data_array_p, data_array_shape_v, {}, libmuscle::StorageOrder::first_adjacent)); return reinterpret_cast(result); } @@ -1349,7 +1349,7 @@ std::intptr_t LIBMUSCLE_MPI_Data_create_grid_int4_a_(int32_t * data_array, std:: std::intptr_t LIBMUSCLE_MPI_Data_create_grid_int8_a_(int64_t * data_array, std::size_t * data_array_shape, std::size_t data_array_ndims) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); Data * result = new Data(Data::grid(data_array_p, data_array_shape_v, {}, libmuscle::StorageOrder::first_adjacent)); return reinterpret_cast(result); } @@ -1357,7 +1357,7 @@ std::intptr_t LIBMUSCLE_MPI_Data_create_grid_int8_a_(int64_t * data_array, std:: std::intptr_t LIBMUSCLE_MPI_Data_create_grid_real4_a_(float * data_array, std::size_t * data_array_shape, std::size_t data_array_ndims) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); Data * result = new Data(Data::grid(data_array_p, data_array_shape_v, {}, libmuscle::StorageOrder::first_adjacent)); return reinterpret_cast(result); } @@ -1365,7 +1365,7 @@ std::intptr_t LIBMUSCLE_MPI_Data_create_grid_real4_a_(float * data_array, std::s std::intptr_t LIBMUSCLE_MPI_Data_create_grid_real8_a_(double * data_array, std::size_t * data_array_shape, std::size_t data_array_ndims) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); Data * result = new Data(Data::grid(data_array_p, data_array_shape_v, {}, libmuscle::StorageOrder::first_adjacent)); return reinterpret_cast(result); } @@ -1384,7 +1384,7 @@ std::intptr_t LIBMUSCLE_MPI_Data_create_grid_logical_n_( ) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); std::vector names_v; names_v.emplace_back(index_name_1, index_name_1_size); @@ -1421,7 +1421,7 @@ std::intptr_t LIBMUSCLE_MPI_Data_create_grid_int4_n_( ) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); std::vector names_v; names_v.emplace_back(index_name_1, index_name_1_size); @@ -1458,7 +1458,7 @@ std::intptr_t LIBMUSCLE_MPI_Data_create_grid_int8_n_( ) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); std::vector names_v; names_v.emplace_back(index_name_1, index_name_1_size); @@ -1495,7 +1495,7 @@ std::intptr_t LIBMUSCLE_MPI_Data_create_grid_real4_n_( ) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); std::vector names_v; names_v.emplace_back(index_name_1, index_name_1_size); @@ -1532,7 +1532,7 @@ std::intptr_t LIBMUSCLE_MPI_Data_create_grid_real8_n_( ) { std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast(data_array); + auto data_array_p = const_cast(data_array); std::vector names_v; names_v.emplace_back(index_name_1, index_name_1_size); diff --git a/libmuscle/cpp/src/libmuscle/communicator.cpp b/libmuscle/cpp/src/libmuscle/communicator.cpp index 254d0290..03907899 100644 --- a/libmuscle/cpp/src/libmuscle/communicator.cpp +++ b/libmuscle/cpp/src/libmuscle/communicator.cpp @@ -314,10 +314,14 @@ void Communicator::shutdown() { for (auto & client : clients_) client.second->close(); + ProfileEvent wait_event(ProfileEventType::disconnect_wait, ProfileTimestamp()); post_office_.wait_for_receivers(); + profiler_.record_event(std::move(wait_event)); + ProfileEvent shutdown_event(ProfileEventType::shutdown, ProfileTimestamp()); for (auto & server : servers_) server->close(); + profiler_.record_event(std::move(shutdown_event)); } Communicator::PortMessageCounts Communicator::get_message_counts() { diff --git a/libmuscle/cpp/src/libmuscle/data.hpp b/libmuscle/cpp/src/libmuscle/data.hpp index d4e83a3b..30adcebd 100644 --- a/libmuscle/cpp/src/libmuscle/data.hpp +++ b/libmuscle/cpp/src/libmuscle/data.hpp @@ -623,6 +623,18 @@ class Data : public DataConstRef { // create from scalar type using DataConstRef::DataConstRef; + /** Copy-construct a Data object. + * + * Explicit default avoids a compiler warning on some compilers. + */ + Data(Data const &) = default; + + /** Move-construct a Data object. + * + * Explicit default avoids a compiler warning on some compilers. + */ + Data(Data &&) = default; + /** Create a Data object containing a grid object. * * This creates a DataConstRef that represents a grid or array of a diff --git a/libmuscle/cpp/src/libmuscle/instance.cpp b/libmuscle/cpp/src/libmuscle/instance.cpp index 19de8f47..a3d4f5f5 100644 --- a/libmuscle/cpp/src/libmuscle/instance.cpp +++ b/libmuscle/cpp/src/libmuscle/instance.cpp @@ -849,11 +849,16 @@ bool Instance::Impl::have_f_init_connections_() { * @return true iff no ClosePort messages were received. */ bool Instance::Impl::pre_receive_() { + ProfileEvent sw_event(ProfileEventType::shutdown_wait, ProfileTimestamp()); + bool all_ports_open = receive_settings_(); pre_receive_f_init_(); for (auto const & ref_msg : f_init_cache_) if (is_close_port(ref_msg.second.data())) all_ports_open = false; + + if (!all_ports_open) + profiler_->record_event(std::move(sw_event)); return all_ports_open; } diff --git a/libmuscle/cpp/src/libmuscle/logger.cpp b/libmuscle/cpp/src/libmuscle/logger.cpp index c23b1a0a..5f1d9795 100644 --- a/libmuscle/cpp/src/libmuscle/logger.cpp +++ b/libmuscle/cpp/src/libmuscle/logger.cpp @@ -37,7 +37,7 @@ void Logger::set_local_level(LogLevel level) { local_level_ = level; } -void Logger::append_args_(std::ostringstream & s) {} +void Logger::append_args_(std::ostringstream &) {} } } diff --git a/libmuscle/cpp/src/libmuscle/mcp/transport_client.cpp b/libmuscle/cpp/src/libmuscle/mcp/transport_client.cpp index 3ffbd62b..bacc1c15 100644 --- a/libmuscle/cpp/src/libmuscle/mcp/transport_client.cpp +++ b/libmuscle/cpp/src/libmuscle/mcp/transport_client.cpp @@ -5,7 +5,7 @@ namespace libmuscle { namespace _MUSCLE_IMPL_NS { namespace mcp { -bool TransportClient::can_connect_to(std::string const & location) { +bool TransportClient::can_connect_to(std::string const &) { return false; } diff --git a/libmuscle/cpp/src/libmuscle/mmp_client.cpp b/libmuscle/cpp/src/libmuscle/mmp_client.cpp index 5b82c9de..b5a99fe1 100644 --- a/libmuscle/cpp/src/libmuscle/mmp_client.cpp +++ b/libmuscle/cpp/src/libmuscle/mmp_client.cpp @@ -33,7 +33,6 @@ using ymmsl::SettingValue; namespace { - const float connection_timeout = 300.0f; const std::chrono::milliseconds peer_timeout(600000); const int peer_interval_min = 5000; // milliseconds const int peer_interval_max = 10000; // milliseconds @@ -325,6 +324,8 @@ void MMPClient::deregister_instance() { } DataConstRef MMPClient::call_manager_(DataConstRef const & request) { + std::lock_guard lock(mutex_); + msgpack::sbuffer sbuf; msgpack::pack(sbuf, request); diff --git a/libmuscle/cpp/src/libmuscle/mmp_client.hpp b/libmuscle/cpp/src/libmuscle/mmp_client.hpp index 0ba9245e..c082809b 100644 --- a/libmuscle/cpp/src/libmuscle/mmp_client.hpp +++ b/libmuscle/cpp/src/libmuscle/mmp_client.hpp @@ -6,6 +6,7 @@ #include +#include #include #include #include @@ -28,7 +29,9 @@ namespace libmuscle { namespace _MUSCLE_IMPL_NS { * This class connects to the Manager and communicates with it on behalf of the * rest of libmuscle. * - * It manages the connection, and encodes and decodes MsgPack. + * It manages the connection, and encodes and decodes MsgPack. Communication is + * protected by an internal mutex, so this class can be called simultaneously + * from different threads. */ class MMPClient { public: @@ -123,6 +126,7 @@ class MMPClient { private: ymmsl::Reference instance_id_; mcp::TcpTransportClient transport_client_; + mutable std::mutex mutex_; /* Helper function that encodes/decodes and calls the manager. */ diff --git a/libmuscle/cpp/src/libmuscle/profiler.cpp b/libmuscle/cpp/src/libmuscle/profiler.cpp index 0ced4c19..5869bf62 100644 --- a/libmuscle/cpp/src/libmuscle/profiler.cpp +++ b/libmuscle/cpp/src/libmuscle/profiler.cpp @@ -31,8 +31,8 @@ Profiler::Profiler(MMPClient & manager) : manager_(manager) , enabled_(true) , events_() - , thread_(communicate_, this) , done_(false) + , thread_(communicate_, this) {} Profiler::~Profiler() { diff --git a/libmuscle/cpp/src/libmuscle/profiler.hpp b/libmuscle/cpp/src/libmuscle/profiler.hpp index 96570f65..618fb034 100644 --- a/libmuscle/cpp/src/libmuscle/profiler.hpp +++ b/libmuscle/cpp/src/libmuscle/profiler.hpp @@ -72,9 +72,9 @@ class Profiler { std::vector events_; std::chrono::steady_clock::time_point next_send_; - std::thread thread_; - std::condition_variable done_cv_; bool done_; + std::condition_variable done_cv_; + std::thread thread_; /* Background thread that ensures regular communication. * diff --git a/libmuscle/cpp/src/libmuscle/profiling.hpp b/libmuscle/cpp/src/libmuscle/profiling.hpp index f46b8db9..91a7f6df 100644 --- a/libmuscle/cpp/src/libmuscle/profiling.hpp +++ b/libmuscle/cpp/src/libmuscle/profiling.hpp @@ -20,12 +20,15 @@ namespace libmuscle { namespace _MUSCLE_IMPL_NS { enum class ProfileEventType { register_ = 0, connect = 4, - deregister = 1, send = 2, receive = 3, receive_wait = 5, receive_transfer = 6, - receive_decode = 7 + receive_decode = 7, + shutdown_wait = 9, + disconnect_wait = 8, + shutdown = 10, + deregister = 1 }; diff --git a/libmuscle/cpp/src/libmuscle/tests/test_profiler.cpp b/libmuscle/cpp/src/libmuscle/tests/test_profiler.cpp index 64c56cbe..1824c653 100644 --- a/libmuscle/cpp/src/libmuscle/tests/test_profiler.cpp +++ b/libmuscle/cpp/src/libmuscle/tests/test_profiler.cpp @@ -29,6 +29,7 @@ std::chrono::steady_clock::duration communication_interval_() { #include #include +#include #include #include #include @@ -211,7 +212,17 @@ TEST(libmuscle_profiler, test_send_to_mock_mmp_client) { TEST(libmuscle_profiler, test_send_timeout) { reset_mocks(); - communication_interval = 40ms; + + std::chrono::steady_clock::duration wait_time; + + if (getenv("CI")) { + communication_interval = 40ms; + wait_time = 500ms; + } + else { + communication_interval = 40ms; + wait_time = 60ms; + } MockMMPClient mock_mmp_client(Reference("test_instance"), ""); Profiler profiler(mock_mmp_client); @@ -220,7 +231,7 @@ TEST(libmuscle_profiler, test_send_timeout) { ProfileEventType::receive, ProfileTimestamp(), ProfileTimestamp()); profiler.record_event(ProfileEvent(e1)); - std::this_thread::sleep_for(50ms); + std::this_thread::sleep_for(wait_time); ASSERT_EQ(mock_mmp_client.last_submitted_profile_events.size(), 1u); ASSERT_EQ(mock_mmp_client.last_submitted_profile_events.at(0), e1); diff --git a/libmuscle/python/libmuscle/communicator.py b/libmuscle/python/libmuscle/communicator.py index 8be5cb88..4ca3c436 100644 --- a/libmuscle/python/libmuscle/communicator.py +++ b/libmuscle/python/libmuscle/communicator.py @@ -413,10 +413,14 @@ def shutdown(self) -> None: for client in self._clients.values(): client.close() + wait_event = ProfileEvent(ProfileEventType.DISCONNECT_WAIT, ProfileTimestamp()) self._post_office.wait_for_receivers() + self._profiler.record_event(wait_event) + shutdown_event = ProfileEvent(ProfileEventType.SHUTDOWN, ProfileTimestamp()) for server in self._servers: server.close() + self._profiler.record_event(shutdown_event) def restore_message_counts(self, port_message_counts: Dict[str, List[int]] ) -> None: diff --git a/libmuscle/python/libmuscle/instance.py b/libmuscle/python/libmuscle/instance.py index db0f2f6b..f4aa85fb 100644 --- a/libmuscle/python/libmuscle/instance.py +++ b/libmuscle/python/libmuscle/instance.py @@ -6,7 +6,6 @@ from typing import cast, Dict, List, Optional, Tuple, overload # TODO: import from typing module when dropping support for python 3.7 from typing_extensions import Literal -import warnings from ymmsl import (Identifier, Operator, SettingValue, Port, Reference, Settings) @@ -116,11 +115,6 @@ def __init__( self.__is_shut_down = False self._flags = InstanceFlags(flags) - if InstanceFlags.USES_CHECKPOINT_API in self._flags: - warnings.warn( - 'Checkpointing in MUSCLE3 version 0.6.0 is still in' - ' development: the API may change in a future MUSCLE3' - ' release.') # Note that these are accessed by Muscle3, but otherwise private. self._name, self._index = self.__make_full_name() @@ -246,10 +240,7 @@ def reuse_instance(self) -> bool: self._save_snapshot(None, True, self.__f_init_max_timestamp) if not do_reuse: - self.__close_ports() - self._communicator.shutdown() - self._deregister() - self.__manager.close() + self.__shutdown() self._api_guard.reuse_instance_done(do_reuse) return do_reuse @@ -1014,11 +1005,17 @@ def _pre_receive(self) -> bool: Returns: True iff no ClosePort messages were received. """ + sw_event = ProfileEvent(ProfileEventType.SHUTDOWN_WAIT, ProfileTimestamp()) + all_ports_open = self.__receive_settings() self.__pre_receive_f_init() for message in self._f_init_cache.values(): if isinstance(message.data, ClosePort): all_ports_open = False + + if not all_ports_open: + self._profiler.record_event(sw_event) + return all_ports_open def __receive_settings(self) -> bool: @@ -1251,14 +1248,15 @@ def __close_ports(self) -> None: self.__close_outgoing_ports() self.__close_incoming_ports() - def __shutdown(self, message: str) -> None: + def __shutdown(self, message: Optional[str] = None) -> None: """Shuts down simulation. - This logs the given error message, communicates to the peers - that we're shutting down, and deregisters from the manager. + This logs the given error message, if any, communicates to the + peers that we're shutting down, and deregisters from the manager. """ if not self.__is_shut_down: - _logger.critical(message) + if message is not None: + _logger.critical(message) self.__close_ports() self._communicator.shutdown() self._deregister() diff --git a/libmuscle/python/libmuscle/manager/instance_manager.py b/libmuscle/python/libmuscle/manager/instance_manager.py index 2dffec06..7b619db0 100644 --- a/libmuscle/python/libmuscle/manager/instance_manager.py +++ b/libmuscle/python/libmuscle/manager/instance_manager.py @@ -1,7 +1,8 @@ import logging +from pathlib import Path from textwrap import indent from threading import Thread -from typing import Dict, Optional, Union +from typing import Dict, List, Optional, Tuple, Union from multiprocessing import Queue import queue @@ -156,6 +157,9 @@ def cancel_all() -> None: self._requests_out.put(CancelAllRequest()) all_seemingly_okay = False + # Get all results + results: List[Process] = list() + while self._num_running > 0: result = self._results_in.get() @@ -165,48 +169,98 @@ def cancel_all() -> None: ' a bug report.') return False - if result.exit_code != 0: - if result.status == ProcessStatus.CANCELED: - _logger.info( - f'Instance {result.instance} was shut down by' - f' MUSCLE3 because an error occurred elsewhere') - else: - _logger.error( - f'Instance {result.instance} quit with error' - f' {result.exit_code}') - - stderr_file = ( - self._run_dir.instance_dir(result.instance) / - 'stderr.txt') - _logger.error( - 'The last error output of this instance was:') - _logger.error( - '\n' + indent(last_lines(stderr_file, 20), ' ')) - _logger.error( - 'More output may be found in' - f' {self._run_dir.instance_dir(result.instance)}\n' - ) + results.append(result) + if result.status != ProcessStatus.CANCELED: + registered = self._instance_registry.did_register(result.instance) + if result.exit_code != 0 or not registered: cancel_all() + self._num_running -= 1 - elif not self._instance_registry.did_register(result.instance): - _logger.error( - f'Instance {result.instance} quit with no error' - ' (exit code 0), but it never registered with the' - ' manager. Maybe it never created an Instance' - ' object?') - cancel_all() - else: - if result.status == ProcessStatus.CANCELED: + # Summarise outcome + crashes: List[Tuple[Process, Path]] = list() + indirect_crashes: List[Tuple[Process, Path]] = list() + + for result in results: + if result.status == ProcessStatus.CANCELED: + if result.exit_code == 0: _logger.info( f'Instance {result.instance} was not started' f' because of an error elsewhere') else: - _logger.debug(f'Instance {result.instance} finished') - _logger.debug(f'States: {result.status}') - _logger.debug(f'Exit code: {result.exit_code}') - _logger.debug(f'Error msg: {result.error_msg}') + _logger.info( + f'Instance {result.instance} was shut down by' + f' MUSCLE3 because an error occurred elsewhere') + else: + stderr_file = ( + self._run_dir.instance_dir(result.instance) / + 'stderr.txt') + if result.exit_code == 0: + if self._instance_registry.did_register(result.instance): + _logger.info( + f'Instance {result.instance} finished with' + ' exit code 0') + else: + _logger.error( + f'Instance {result.instance} quit with no error' + ' (exit code 0), but it never registered with the' + ' manager. Maybe it never created an Instance' + ' object?') + crashes.append((result, stderr_file)) + else: + with stderr_file.open() as f: + peer_crash = any(['peer crash?' in line for line in f]) + + if peer_crash: + _logger.warning( + f'Instance {result.instance} crashed, likely because' + f' an error occurred elsewhere.') + indirect_crashes.append((result, stderr_file)) + else: + _logger.error( + f'Instance {result.instance} quit with exit code' + f' {result.exit_code}') + crashes.append((result, stderr_file)) + + _logger.debug(f'Status: {result.status}') + _logger.debug(f'Exit code: {result.exit_code}') + _logger.debug(f'Error msg: {result.error_msg}') + + # Show errors from crashed components + if crashes: + for result, stderr_file in crashes: + _logger.error( + f'The last error output of {result.instance} was:') + _logger.error( + '\n' + indent(last_lines(stderr_file, 20), ' ')) + _logger.error( + 'More output may be found in' + f' {self._run_dir.instance_dir(result.instance)}\n' + ) + else: + # Possibly a component exited without error, but prematurely. If this + # caused ancillary crashes due to dropped connections, then the logs + # of those will give a hint as to what the problem may be, so print + # those instead. + _logger.error( + 'At this point, one or more instances crashed because they' + ' lost their connection to another instance, but no other' + ' crashing instance was found that could have caused this.') + _logger.error( + 'This means that either another instance quit before it was' + ' supposed to, but with exit code 0, or there was an actual' + ' network problem that caused the connection to drop.') + _logger.error( + 'Here is the output of the instances that lost connection:') + for result, stderr_file in indirect_crashes: + _logger.error( + f'The last error output of {result.instance} was:') + _logger.error( + '\n' + indent(last_lines(stderr_file, 20), ' ')) + _logger.error( + 'More output may be found in' + f' {self._run_dir.instance_dir(result.instance)}\n' + ) - self._num_running -= 1 return all_seemingly_okay def shutdown(self) -> None: diff --git a/libmuscle/python/libmuscle/manager/profile_database.py b/libmuscle/python/libmuscle/manager/profile_database.py index 9ee9618d..a1c7a535 100644 --- a/libmuscle/python/libmuscle/manager/profile_database.py +++ b/libmuscle/python/libmuscle/manager/profile_database.py @@ -1,5 +1,4 @@ from collections import defaultdict -import logging from pathlib import Path import sqlite3 import threading @@ -7,9 +6,6 @@ from typing import Any, cast, Dict, List, Optional, Tuple, Type, Union -_logger = logging.getLogger(__name__) - - class ProfileDatabase: """Accesses a profiling database. @@ -107,9 +103,16 @@ def instance_stats( cur.execute( "SELECT instance, start_time" " FROM all_events" - " WHERE type = 'DEREGISTER'") + " WHERE type = 'SHUTDOWN_WAIT'") stop_run = dict(cur.fetchall()) + if not stop_run: + cur.execute( + "SELECT instance, start_time" + " FROM all_events" + " WHERE type = 'DEREGISTER'") + stop_run = dict(cur.fetchall()) + cur.execute( "SELECT instance, SUM(stop_time - start_time)" " FROM all_events" @@ -164,8 +167,6 @@ def resource_stats(self) -> Dict[str, Dict[str, float]]: i: r + c for i, r, c in zip(instances, run_times, comm_times)} - _logger.info(active_times) - cur = self._get_cursor() cur.execute("BEGIN TRANSACTION") cur.execute( @@ -176,8 +177,6 @@ def resource_stats(self) -> Dict[str, Dict[str, float]]: for name, node, core in cur.fetchall(): instances_by_core[':'.join([node, str(core)])].append(name) - _logger.info(instances_by_core) - cur.execute("COMMIT") cur.close() @@ -290,10 +289,11 @@ def time_taken( Args: etype: Type of event to get the starting point from. - Possible values: `'REGISTER'`, `'CONNECT'`, - `'DEREGISTER'`, `'SEND'`, `'RECEIVE'`, `'RECEIVE_WAIT'`, - `'RECEIVE_TRANSFER'`, `'RECEIVE_DECODE'`. See the - documentation for a description of each. + Possible values: `'REGISTER'`, `'CONNECT'`, `'SHUTDOWN_WAIT'`, + `'DISCONNECT_WAIT'`, `'SHUTDOWN'`, `'DEREGISTER'`, `'SEND'`, + `'RECEIVE'`, `'RECEIVE_WAIT'`, `'RECEIVE_TRANSFER'`, + `'RECEIVE_DECODE'`. See the documentation for a description + of each. instance: Name of the instance to get the event from. You can use `%` as a wildcard matching anything. For example, `'macro[%'` will match all instances of the diff --git a/libmuscle/python/libmuscle/manager/profile_store.py b/libmuscle/python/libmuscle/manager/profile_store.py index 5efa69bd..036dea85 100644 --- a/libmuscle/python/libmuscle/manager/profile_store.py +++ b/libmuscle/python/libmuscle/manager/profile_store.py @@ -194,7 +194,7 @@ def _init_database(self) -> None: " minor_version INTEGER NOT NULL)") cur.execute( "INSERT INTO muscle3_format(major_version, minor_version)" - " VALUES (1, 0)") + " VALUES (1, 1)") cur.execute( "CREATE TABLE instances (" @@ -241,6 +241,8 @@ def _init_database(self) -> None: cur.execute("CREATE INDEX instances_oid_idx ON instances(oid)") + cur.execute("CREATE INDEX events_start_time_idx ON events(start_time)") + cur.execute( "CREATE VIEW all_events" " AS SELECT" diff --git a/libmuscle/python/libmuscle/manager/test/test_profile_store.py b/libmuscle/python/libmuscle/manager/test/test_profile_store.py index 63795d90..2961b6d9 100644 --- a/libmuscle/python/libmuscle/manager/test/test_profile_store.py +++ b/libmuscle/python/libmuscle/manager/test/test_profile_store.py @@ -18,7 +18,7 @@ def test_create_profile_store(tmp_path): cur.execute("SELECT major_version, minor_version FROM muscle3_format") major, minor = cur.fetchone() assert major == 1 - assert minor == 0 + assert minor == 1 cur.execute("SELECT oid, name FROM event_types") etypes = cur.fetchall() diff --git a/libmuscle/python/libmuscle/mmp_client.py b/libmuscle/python/libmuscle/mmp_client.py index d9ebe698..34238a87 100644 --- a/libmuscle/python/libmuscle/mmp_client.py +++ b/libmuscle/python/libmuscle/mmp_client.py @@ -1,6 +1,7 @@ import dataclasses from pathlib import Path from random import uniform +from threading import Lock from time import perf_counter, sleep from typing import Any, Dict, Iterable, List, Optional, Tuple @@ -17,7 +18,6 @@ from libmuscle.snapshot import SnapshotMetadata -CONNECTION_TIMEOUT = 300 PEER_TIMEOUT = 600 PEER_INTERVAL_MIN = 5.0 PEER_INTERVAL_MAX = 10.0 @@ -106,6 +106,9 @@ class MMPClient(): It manages the connection, and converts between our native types and the gRPC generated types. + + Communication is protected by an internal lock, so this class can + be called simultaneously from different threads. """ def __init__(self, instance_id: Reference, location: str) -> None: """Create an MMPClient @@ -115,6 +118,7 @@ def __init__(self, instance_id: Reference, location: str) -> None: """ self._instance_id = instance_id self._transport_client = TcpTransportClient(location) + self._mutex = Lock() def close(self) -> None: """Close the connection @@ -280,6 +284,7 @@ def _call_manager(self, request: Any) -> Any: Returns: The decoded response """ - encoded_request = msgpack.packb(request, use_bin_type=True) - response, _ = self._transport_client.call(encoded_request) - return msgpack.unpackb(response, raw=False) + with self._mutex: + encoded_request = msgpack.packb(request, use_bin_type=True) + response, _ = self._transport_client.call(encoded_request) + return msgpack.unpackb(response, raw=False) diff --git a/libmuscle/python/libmuscle/profiling.py b/libmuscle/python/libmuscle/profiling.py index 9398ec95..d3c1f9a5 100644 --- a/libmuscle/python/libmuscle/profiling.py +++ b/libmuscle/python/libmuscle/profiling.py @@ -9,12 +9,15 @@ class ProfileEventType(Enum): """Profiling event types for MUSCLE3.""" REGISTER = 0 CONNECT = 4 - DEREGISTER = 1 SEND = 2 RECEIVE = 3 RECEIVE_WAIT = 5 RECEIVE_TRANSFER = 6 RECEIVE_DECODE = 7 + SHUTDOWN_WAIT = 9 + DISCONNECT_WAIT = 8 + SHUTDOWN = 10 + DEREGISTER = 1 class ProfileTimestamp: diff --git a/libmuscle/python/libmuscle/py.typed b/libmuscle/python/libmuscle/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/libmuscle/python/libmuscle/test/test_mmp_client.py b/libmuscle/python/libmuscle/test/test_mmp_client.py index 138efa0a..721e26bd 100644 --- a/libmuscle/python/libmuscle/test/test_mmp_client.py +++ b/libmuscle/python/libmuscle/test/test_mmp_client.py @@ -18,11 +18,10 @@ def test_init() -> None: def test_connection_fail() -> None: - with patch('libmuscle.mmp_client.CONNECTION_TIMEOUT', 1): - with pytest.raises(RuntimeError): - # Port 255 is reserved and privileged, so there's probably - # nothing there. - MMPClient(Reference([]), 'tcp:localhost:255') + with pytest.raises(RuntimeError): + # Port 255 is reserved and privileged, so there's probably + # nothing there. + MMPClient(Reference([]), 'tcp:localhost:255') def test_submit_log_message(mocked_mmp_client, profile_data) -> None: diff --git a/muscle3/muscle_manager.py b/muscle3/muscle_manager.py index 8463bc27..e7453f2e 100644 --- a/muscle3/muscle_manager.py +++ b/muscle3/muscle_manager.py @@ -143,7 +143,7 @@ def manage_simulation( print('Here are the final lines of the manager log:') print() print('-' * 80) - print(last_lines(log_file, 30), ' ') + print(last_lines(log_file, 50), ' ') print('-' * 80) print() print('You can find the full log at') @@ -151,6 +151,11 @@ def manage_simulation( print() else: print('Simulation completed successfully.') + try: + rel_run_dir = run_dir_path.relative_to(Path.cwd()) + print(f'Output may be found in {rel_run_dir}') + except ValueError: + print(f'Output may be found in {run_dir_path}') sys.exit(0 if success else 1) diff --git a/muscle3/profiling.py b/muscle3/profiling.py index 116bb465..b705a8c5 100644 --- a/muscle3/profiling.py +++ b/muscle3/profiling.py @@ -1,8 +1,11 @@ import sqlite3 from pathlib import Path +from typing import List, Optional, Tuple import numpy as np +from matplotlib.axes import Axes from matplotlib import pyplot as plt +from matplotlib.patches import Rectangle from libmuscle import ProfileDatabase @@ -33,8 +36,12 @@ def plot_instances(performance_file: Path) -> None: ax.bar(instances, wait, width, label='Wait', bottom=bottom) ax.set_title('Simulation component time breakdown') ax.set_xlabel('Instance') + ax.tick_params(axis='x', labelrotation = 45) + for label in ax.xaxis.get_ticklabels(): + label.set_horizontalalignment('right') ax.set_ylabel('Total time (s)') ax.legend(loc='upper right') + plt.subplots_adjust(bottom=0.30) def plot_resources(performance_file: Path) -> None: @@ -63,7 +70,7 @@ def plot_resources(performance_file: Path) -> None: seen_instances = set() for i, core in enumerate(sorted(stats.keys())): - bottom = 0 + bottom = 0.0 for instance, time in sorted(stats[core].items(), key=lambda x: -x[1]): if instance not in seen_instances: label: Optional[str] = instance @@ -72,7 +79,7 @@ def plot_resources(performance_file: Path) -> None: label = '_' ax.bar( - i, time, 0.8, + i, time, _BAR_WIDTH, label=label, bottom=bottom, color=palette[instance]) bottom += time @@ -81,18 +88,25 @@ def plot_resources(performance_file: Path) -> None: ax.set_title('Per-core time breakdown') ax.set_xlabel('Core') + ax.tick_params(axis='x', labelrotation = 45) + for tick_label in ax.xaxis.get_ticklabels(): + tick_label.set_horizontalalignment('right') ax.set_ylabel('Total time (s)') ax.legend(loc='upper right') + plt.subplots_adjust(bottom=0.30) _EVENT_TYPES = ( - 'REGISTER', 'CONNECT', 'DEREGISTER', - 'SEND', 'RECEIVE_WAIT', 'RECEIVE_TRANSFER', 'RECEIVE_DECODE') + 'REGISTER', 'CONNECT', 'SHUTDOWN_WAIT', 'DISCONNECT_WAIT', 'SHUTDOWN', + 'DEREGISTER', 'SEND', 'RECEIVE_WAIT', 'RECEIVE_TRANSFER', 'RECEIVE_DECODE') _EVENT_PALETTE = { 'REGISTER': '#910f33', 'CONNECT': '#c85172', + 'SHUTDOWN_WAIT': '#ffdddd', + 'DISCONNECT_WAIT': '#eedddd', + 'SHUTDOWN': '#c85172', 'DEREGISTER': '#910f33', 'RECEIVE_WAIT': '#cccccc', 'RECEIVE_TRANSFER': '#ff7d00', @@ -100,75 +114,227 @@ def plot_resources(performance_file: Path) -> None: 'SEND': '#0095bf'} -_MAX_EVENTS = 2000 +_MAX_EVENTS = 1000 -def plot_timeline(performance_file: Path) -> None: - with sqlite3.connect(performance_file) as conn: - cur = conn.cursor() +_CUTOFF_TEXT = ( + 'Warning: data was omitted from the plot in the\n crosshatched' + ' areas to improve performance.\n Please zoom or pan using the' + ' tools at the bottom\n of the window to see the missing events.' + ) + + +_BAR_WIDTH = 0.8 + + +class TimelinePlot: + """Manages an interactive timeline + + This implements on-demand loading of events as the user pans and + zooms. + """ + def __init__(self, performance_file: Path) -> None: + """Create a TimelinePlot + + This plots the dark gray background bars, and then plots the + rest on top on demand. - cur.execute("SELECT oid, name FROM instances ORDER BY oid") - instance_ids, instance_names = zip(*cur.fetchall()) + Args: + performance_file: The database to plot + """ + _, ax = plt.subplots() + self._ax = ax - cur.execute("SELECT MIN(start_time) FROM events") - min_time = cur.fetchall()[0][0] + # Y axis + self._cur = sqlite3.connect(performance_file).cursor() + self._cur.execute("SELECT oid, name FROM instances ORDER BY oid") + instance_ids, instance_names = zip(*self._cur.fetchall()) - cur.execute( - "SELECT instance, (start_time - ?)" + ax.set_yticks(instance_ids) + ax.set_yticklabels(instance_names) + + # Instances + self._cur.execute("SELECT MIN(start_time) FROM events") + self._min_db_time = self._cur.fetchall()[0][0] + + self._cur.execute( + "SELECT instance_oid, (start_time - ?) * 1e-9" " FROM events AS e" " JOIN event_types AS et ON (e.event_type_oid = et.oid)" - " WHERE et.name = 'REGISTER'", (min_time,)) - begin_times = dict(cur.fetchall()) + " WHERE et.name = 'REGISTER'", (self._min_db_time,)) + begin_times = dict(self._cur.fetchall()) - cur.execute( - "SELECT instance, (stop_time - ?)" + self._cur.execute( + "SELECT instance_oid, (stop_time - ?) * 1e-9" " FROM events AS e" " JOIN event_types AS et ON (e.event_type_oid = et.oid)" - " WHERE et.name = 'DEREGISTER'", (min_time,)) - end_times = dict(cur.fetchall()) - - fig, ax = plt.subplots() + " WHERE et.name = 'DEREGISTER'", (self._min_db_time,)) + end_times = dict(self._cur.fetchall()) instances = sorted(begin_times.keys()) - ax.barh( + self._instances = instances + + if not begin_times: + raise RuntimeError( + 'This database appears to be empty. Did the simulation crash' + ' before any data were generated?') + + # Rest of plot + ax.set_title('Execution timeline') + ax.set_xlabel('Wallclock time (s)') + + # Background + running_artist = ax.barh( instances, - [(end_times[i] - begin_times[i]) * 1e-9 for i in instances], - 0.8, - left=[begin_times[i] * 1e-9 for i in instances], + [end_times[i] - begin_times[i] for i in instances], + _BAR_WIDTH, + left=[begin_times[i] for i in instances], label='RUNNING', color='#444444' ) + # Initial events plot + xmin = min(begin_times.values()) + self._global_xmax = max(end_times.values()) + + first_cutoff = float('inf') + self._bars = dict() for event_type in _EVENT_TYPES: - cur.execute( - "SELECT" - " instance, (start_time - ?) * 1e-9," - " (stop_time - start_time) * 1e-9" - " FROM events AS e" - " JOIN event_types AS et ON (e.event_type_oid = et.oid)" - " WHERE et.name = ?" - " ORDER BY start_time ASC" - " LIMIT ?", - (min_time, event_type, _MAX_EVENTS)) - instances, start_times, durations = zip(*cur.fetchall()) - - if len(instances) == _MAX_EVENTS: - print( - 'Warning: event data truncated. Sorry, we cannot yet show' - ' this amount of data efficiently enough.') - ax.barh( - instances, durations, 0.8, + instances, start_times, durations, cutoff = self.get_data( + event_type, xmin, self._global_xmax) + + if not instances: + # Work around https://github.com/matplotlib/matplotlib/issues/21506 + instances = [''] + start_times = [float('NaN')] + durations = [float('NaN')] + + self._bars[event_type] = ax.barh( + instances, durations, _BAR_WIDTH, label=event_type, left=start_times, color=_EVENT_PALETTE[event_type]) + if cutoff: + first_cutoff = min(first_cutoff, cutoff) + + # Initial cut-off area + if first_cutoff != float('inf'): + self._bars['_CUTOFF'] = ax.barh( + self._instances, self._global_xmax - first_cutoff, _BAR_WIDTH, + label='Not shown', left=first_cutoff, + color='#FFFFFF', hatch='x') + self._cutoff_warning = ax.text( + 0.02, 0.02, _CUTOFF_TEXT, transform=ax.transAxes, fontsize=12, + verticalalignment='bottom', horizontalalignment='left', wrap=True, + bbox={ + 'facecolor': '#ffcccc', 'alpha': 0.75}) + + ax.set_autoscale_on(True) + ax.callbacks.connect('xlim_changed', self.update_data) + + ordered_artists = [self._bars[event_type] for event_type in _EVENT_TYPES] + ordered_names = list(_EVENT_TYPES) + + ordered_artists.insert(6, running_artist) + ordered_names.insert(6, 'RUNNING') + + ax.legend(ordered_artists, ordered_names, loc='upper right') + ax.figure.canvas.draw_idle() + + def close(self) -> None: + """Closes the database connection""" + self._cur.close() + + def get_data( + self, event_type: str, xmin: float, xmax: float + ) -> Tuple[List[int], List[float], List[float], Optional[float]]: + """Get events from the database + + Returns three lists with instance oid, start time and duration, and + the last timepoint returned in case we had too much data to show and + data got cut off, or None if all matching data was returned. + + Args: + event_type: Type of events to get + xmin: Time point after which the event must have stopped + xmax: Time point before which the event must have started + """ + self._cur.execute( + "SELECT" + " instance_oid, (start_time - ?) * 1e-9," + " (stop_time - start_time) * 1e-9" + " FROM events AS e" + " JOIN event_types AS et ON (e.event_type_oid = et.oid)" + " WHERE et.name = ?" + " AND (start_time - ?) * 1e-9 <= ?" + " AND ? <= (stop_time - ?) * 1e-9" + " ORDER BY start_time ASC" + " LIMIT ?", + ( + self._min_db_time, event_type, self._min_db_time, xmax, + xmin, self._min_db_time, _MAX_EVENTS)) + results = self._cur.fetchall() + if not results: + return list(), list(), list(), None + + if len(results) == _MAX_EVENTS: + return tuple(zip(*results)) + (results[-1][1],) # type: ignore + + return tuple(zip(*results)) + (None,) # type: ignore + + def update_data(self, ax: Axes) -> None: + """Update the plot after the axes have changed + + This is called after the user has panned or zoomed, and refreshes the + plot. + + Args: + ax: The Axes object we are drawing in + """ + xmin, xmax = ax.viewLim.intervalx - ax.set_yticks(instance_ids) - ax.set_yticklabels(instance_names) + for event_type in _EVENT_TYPES: + instances, start_times, durations, cutoff = self.get_data( + event_type, xmin, xmax) + if instances: + # update existing rectangles + bars = self._bars[event_type].patches + n_cur = len(instances) + n_avail = len(bars) + + for i in range(min(n_cur, n_avail)): + bars[i].set_y(instances[i] - _BAR_WIDTH * 0.5) + bars[i].set_x(start_times[i]) + bars[i].set_width(durations[i]) + bars[i].set_visible(True) + + # set any superfluous ones invisible + for i in range(n_cur, n_avail): + bars[i].set_visible(False) + + # update cutoff bars, if any + if '_CUTOFF' in self._bars: + bars = self._bars['_CUTOFF'].patches + if cutoff: + for bar in bars: + bar.set_x(cutoff) + bar.set_width(self._global_xmax - cutoff) + bar.set_visible(True) + self._cutoff_warning.set_visible(True) + else: + for bar in bars: + bar.set_visible(False) + self._cutoff_warning.set_visible(False) + + +tplot = None # type: Optional[TimelinePlot] - ax.set_title('Execution timeline') - ax.set_xlabel('Wallclock time (s)') - ax.legend(loc='upper right') +def plot_timeline(performance_file: Path) -> None: + global tplot + tplot = TimelinePlot(performance_file) def show_plots() -> None: """Actually show the plots on screen""" - plt.show() + plt.show() # type: ignore + if tplot: + tplot.close() diff --git a/muscle3/py.typed b/muscle3/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/scripts/api_generator.py b/scripts/api_generator.py index dac141b9..3746ad96 100644 --- a/scripts/api_generator.py +++ b/scripts/api_generator.py @@ -755,7 +755,7 @@ def fc_convert_input(self) -> str: result = ( 'std::vector {0}_shape_v(\n' ' {0}_shape, {0}_shape + {0}_ndims);\n' - 'auto {0}_p = const_cast<{1} const * const>({0});\n' + 'auto {0}_p = const_cast<{1} const *>({0});\n' ).format( self.name, self.elem_type.fc_cpp_type()) diff --git a/scripts/convert_fortran_source.py b/scripts/convert_fortran_source.py index dd937b49..8e021b93 100644 --- a/scripts/convert_fortran_source.py +++ b/scripts/convert_fortran_source.py @@ -5,7 +5,7 @@ import click -@click.command(no_args_is_help=True) +@click.command(no_args_is_help=True) # type: ignore @click.argument("fortran_files", nargs=-1, required=True, type=click.Path( exists=True, file_okay=True, dir_okay=False, readable=True, allow_dash=True, resolve_path=True, path_type=pathlib.Path)) @@ -42,4 +42,4 @@ def convert(fortran_files: List[pathlib.Path]) -> None: if __name__ == "__main__": - convert() + convert() # type: ignore diff --git a/scripts/make_libmuscle_api.py b/scripts/make_libmuscle_api.py index 9cf47d34..e49ead74 100755 --- a/scripts/make_libmuscle_api.py +++ b/scripts/make_libmuscle_api.py @@ -81,7 +81,7 @@ def __init__(self, with_names: bool) -> None: ) {{ std::vector data_array_shape_v( data_array_shape, data_array_shape + data_array_ndims); - auto data_array_p = const_cast<{1} const * const>(data_array); + auto data_array_p = const_cast<{1} const *>(data_array); std::vector names_v; names_v.emplace_back(index_name_1, index_name_1_size); diff --git a/setup.cfg b/setup.cfg index 547e9306..5b1cf6aa 100644 --- a/setup.cfg +++ b/setup.cfg @@ -4,7 +4,7 @@ addopts = --cov --cov-report xml --cov-report term-missing -s # -vv --log-cli-level=DEBUG [mypy] -files = libmuscle/python/**/*.py, scripts/*.py +files = libmuscle/python/**/*.py, scripts/*.py, muscle3/*.py mypy_path = libmuscle/python warn_unused_configs = True disallow_subclassing_any = True @@ -31,6 +31,9 @@ ignore_missing_imports = True [mypy-pytest] ignore_missing_imports = True +[mypy-matplotlib.*] +ignore_missing_imports = True + [mypy-msgpack.*] ignore_missing_imports = True diff --git a/setup.py b/setup.py index c64ec76f..a997e6ff 100644 --- a/setup.py +++ b/setup.py @@ -36,13 +36,15 @@ 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', - 'Programming Language :: Python :: 3.10'], + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11'], packages=_muscle3_packages, package_dir={ 'muscle3': 'muscle3', 'libmuscle': 'libmuscle/python/libmuscle' }, + include_package_data=True, entry_points={ 'console_scripts': [ 'muscle_manager=muscle3.muscle_manager:manage_simulation', diff --git a/tox.ini b/tox.ini index e7310a7a..9b28adea 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py37, py38, py39, py310 +envlist = py37, py38, py39, py310, py311 skip_missing_interpreters = true [testenv] @@ -27,6 +27,7 @@ python = 3.8: py38 3.9: py39 3.10: py310 + 3.11: py311 [pycodestyle] max-doc-length = 88