diff --git a/.github/test_and_build.yml b/.github/test_and_build.yml new file mode 100644 index 000000000..e2dcd08be --- /dev/null +++ b/.github/test_and_build.yml @@ -0,0 +1,20 @@ +channels: + - conda-forge + - bioconda +dependencies: + - python >= 3.7 + - numpy + - scipy + - flake8 + - pysam + - deeptoolsintervals + - pytest + - samtools + - py2bit + - pyBigWig + - twine + - pip + - tomli # remove dependency when lowest supported version is py 3.11 + - pip: + - build + - planemo \ No newline at end of file diff --git a/.github/workflows/planemo.yml b/.github/workflows/planemo.yml index 44baae708..bc7e4b0bb 100644 --- a/.github/workflows/planemo.yml +++ b/.github/workflows/planemo.yml @@ -1,7 +1,17 @@ name: Planemo on: [push, pull_request] + env: - GALAXY_BRANCH: release_22.05 + GALAXY_BRANCH: release_23.1 + +defaults: + run: + shell: bash -l {0} + +# setup micromamba doesn't work as galaxy setup by planemo requires conda. +# installing conda over into micromamba built env screws up the PATH +# setup-miniconda + changing over to libmamba to solve is the easiest workaround + jobs: planemo_test: name: Planemo test @@ -10,12 +20,24 @@ jobs: matrix: chunk: [1, 2, 3] steps: - - uses: actions/checkout@v1 - - uses: "dpryan79/github-actions/@master" + - uses: actions/checkout@v3 + - uses: conda-incubator/setup-miniconda@v2 + with: + miniconda-version: "latest" + auto-activate-base: true + - name: setup env + run: | + conda env list + conda install -n base conda-libmamba-solver + conda config --set solver libmamba + conda env create -f .github/test_and_build.yml -n test_and_build + - name: pip install + run: | + conda activate test_and_build + pip install . - name: planemo run: | - source activate foo - conda update -c conda-forge -c bioconda samtools + conda activate test_and_build ./.planemo.sh ${{ matrix.chunk }} ${{ env.GALAXY_BRANCH }} - uses: actions/upload-artifact@v3 with: @@ -27,7 +49,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ['3.7'] + python-version: ['3.7', '3.11'] steps: - uses: actions/download-artifact@v3 with: diff --git a/.github/workflows/pypi.yml b/.github/workflows/pypi.yml index e30e57712..e97cfbdb2 100644 --- a/.github/workflows/pypi.yml +++ b/.github/workflows/pypi.yml @@ -1,35 +1,33 @@ name: pypi + on: create: tags: + +defaults: + run: + shell: bash -l {0} + jobs: pypi: name: upload to pypi runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 - - name: Setup conda + - uses: actions/checkout@v3 + - uses: mamba-org/setup-micromamba@main + with: + environment-file: .github/test_and_build.yml + cache-downloads: true + environment-name: test_and_build + - name: build run: | - curl https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -o miniconda.sh - bash miniconda.sh -b -p $HOME/miniconda - export PATH="$HOME/miniconda/bin:$PATH" - hash -r - conda config --set always_yes yes --set changeps1 no - - name: create env - run: | - export PATH=$HOME/miniconda/bin:$PATH - conda create -n foo -q --yes -c conda-forge -c bioconda python=3.7 twine - - name: sdist - run: | - export PATH=$HOME/miniconda/bin:$PATH - source activate foo + micromamba activate test_and_build rm -f dist/* - python setup.py sdist + python -m build - name: upload env: TWINE_USERNAME: "__token__" TWINE_PASSWORD: ${{ secrets.pypi_password }} run: | - export PATH=$HOME/miniconda/bin:$PATH - source activate foo + micromamba activate test_and_build twine upload dist/* diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index d35d5517d..8a4789073 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,16 +1,21 @@ name: Test on: [push, pull_request] + +defaults: + run: + shell: bash -l {0} + jobs: check_versions_matches: name: Check deeptools version matches galaxy tools runs-on: ubuntu-latest if: github.base_ref == 'master' steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Check path - run: find /home/runner/work/deepTools/deepTools -name "_version.py" + run: find /home/runner/work/deepTools/deepTools -name "pyproject.toml" - name: Get Version of Deeptools - run: echo "deeptools_version=$(grep "__version__" /home/runner/work/deepTools/deepTools/deeptools/_version.py | awk '{print substr($NF, 2, length($NF) - 2)}')" >> $GITHUB_ENV + run: echo "deeptools_version=$(grep "version" /home/runner/work/deepTools/deepTools/pyproject.toml | awk '{print substr($NF, 2, length($NF) - 2)}')" >> $GITHUB_ENV - name: Get Version of Galaxy tools run: echo "galaxy_deeptools_version=$(grep "token.*TOOL_VERSION" /home/runner/work/deepTools/deepTools/galaxy/wrapper/deepTools_macros.xml | awk -F '>|<' '{print $3}')" >> $GITHUB_ENV - name: Versions @@ -35,32 +40,73 @@ jobs: name: Test on Linux runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 - - uses: "dpryan79/github-actions/@master" + - uses: actions/checkout@v3 + - uses: mamba-org/setup-micromamba@main + with: + environment-file: .github/test_and_build.yml + cache-downloads: true + environment-name: test_and_build + - name: pip install + run: | + micromamba activate test_and_build + pip install . - name: PEP8 run: | - source activate foo + micromamba activate test_and_build flake8 . --exclude=.venv,.build,build --ignore=E501,F403,E402,F999,F405,E722,W504,W605 - name: Test deepTools run: | - source activate foo - nosetests --with-doctest -sv deeptools + micromamba activate test_and_build + pytest -v - name: make an artifact run: | - source activate foo + micromamba activate test_and_build rm -f dist/* - python setup.py sdist + python -m build - uses: actions/upload-artifact@master with: name: "Dist files" path: "dist" + test-wheels: + name: test wheel + runs-on: ubuntu-latest + needs: build-linux + strategy: + matrix: + python-version: ['3.7','3.8','3.9','3.10', '3.11'] + steps: + - uses: actions/checkout@v3 + - uses: actions/download-artifact@v3 + with: + name: "Dist files" + path: ~/dist/ + - uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' + - name: install wheel + run: | + pip install ~/dist/*whl + deeptools -h + - name: pytest + run: | + pip install pytest + pytest -v build-osx: name: Test on OSX runs-on: macOS-latest steps: - - uses: actions/checkout@v1 - - uses: "dpryan79/github-actions/@master" + - uses: actions/checkout@v3 + - uses: mamba-org/setup-micromamba@main + with: + environment-file: .github/test_and_build.yml + cache-downloads: true + environment-name: test_and_build + - name: pip install + run: | + micromamba activate test_and_build + pip install . - name: Test deepTools run: | - source activate foo - nosetests --with-doctest -sv deeptools + micromamba activate test_and_build + pytest -v diff --git a/.planemo.sh b/.planemo.sh index 1dfc67222..0a10b8830 100755 --- a/.planemo.sh +++ b/.planemo.sh @@ -28,6 +28,7 @@ else galaxy/wrapper/plotProfiler.xml" fi +planemo --version planemo lint ${wrappers} planemo test --no_dependency_resolution --galaxy_branch $2 --install_galaxy ${wrappers} 2>&1 mkdir upload diff --git a/CHANGES.txt b/CHANGES.txt index d17ab90f2..335dbc80b 100755 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,3 +1,13 @@ +3.5.3 +* requirement cap for matplotlib lifted (changes in plotting can occur) +* nose has been deprecated in favor of pytests +* pytests run with python 3.7 - 3.11 +* toml file for installation, requirements, versioning and executables +* planemo tests updated to galaxy 23.1 +* custom github action runner deprecated +* deprecation of np types for builtin types +* stricter label checks and validator in galaxy + 3.5.2 * new subcommand: Bigwig average #1169 * dendogram of plotCorrelation now matches each cell correctly diff --git a/bin/alignmentSieve b/bin/alignmentSieve deleted file mode 100755 index 6d35603ce..000000000 --- a/bin/alignmentSieve +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python -#-*- coding: utf-8 -*- - -import deeptools.misc -from deeptools.alignmentSieve import main -import sys - -if __name__ == "__main__": - args = None - if len(sys.argv) == 1: - args = ["--help"] - main(args) diff --git a/bin/bamCompare b/bin/bamCompare deleted file mode 100755 index 18cc2c12c..000000000 --- a/bin/bamCompare +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python -#-*- coding: utf-8 -*- - -import deeptools.misc -from deeptools.bamCompare import main -import sys - -if __name__ == "__main__": - args = None - if len(sys.argv) == 1: - args = ["--help"] - main(args) diff --git a/bin/bamCoverage b/bin/bamCoverage deleted file mode 100755 index 75fde9899..000000000 --- a/bin/bamCoverage +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python -#-*- coding: utf-8 -*- - -import deeptools.misc -from deeptools.bamCoverage import main -import sys - -if __name__ == "__main__": - args = None - if len(sys.argv) == 1: - args = ["--help"] - main(args) diff --git a/bin/bamPEFragmentSize b/bin/bamPEFragmentSize deleted file mode 100755 index f38d51fa6..000000000 --- a/bin/bamPEFragmentSize +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python -#-*- coding: utf-8 -*- - -import deeptools.misc -from deeptools.bamPEFragmentSize import main -import sys - -if __name__ == "__main__": - args = None - if len(sys.argv) == 1: - args = ["--help"] - main(args) diff --git a/bin/bigwigAverage b/bin/bigwigAverage deleted file mode 100755 index 3c01a85ef..000000000 --- a/bin/bigwigAverage +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python -#-*- coding: utf-8 -*- - -import deeptools.misc -from deeptools.bigwigAverage import main -import sys - -if __name__ == "__main__": - args = None - if len(sys.argv) == 1: - args = ["--help"] - main(args) diff --git a/bin/bigwigCompare b/bin/bigwigCompare deleted file mode 100755 index 61d0ce112..000000000 --- a/bin/bigwigCompare +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python -#-*- coding: utf-8 -*- - -import deeptools.misc -from deeptools.bigwigCompare import main -import sys - -if __name__ == "__main__": - args = None - if len(sys.argv) == 1: - args = ["--help"] - main(args) diff --git a/bin/computeGCBias b/bin/computeGCBias deleted file mode 100755 index bd1726bd0..000000000 --- a/bin/computeGCBias +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python -#-*- coding: utf-8 -*- - -import deeptools.misc -from deeptools.computeGCBias import main -import sys - -if __name__ == "__main__": - args = None - if len(sys.argv) == 1: - args = ["--help"] - main(args) diff --git a/bin/computeMatrix b/bin/computeMatrix deleted file mode 100755 index 0e4ed9096..000000000 --- a/bin/computeMatrix +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env python -#-*- coding: utf-8 -*- - -import deeptools.misc -from deeptools.computeMatrix import main -import sys - -if __name__ == "__main__": - args = None - if len(sys.argv) == 1: - args = ["--help"] - elif len(sys.argv) == 2 and sys.argv[1] != '--version': - sys.argv.append("--help") - main(args) diff --git a/bin/computeMatrixOperations b/bin/computeMatrixOperations deleted file mode 100755 index 4e72fbea2..000000000 --- a/bin/computeMatrixOperations +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python -#-*- coding: utf-8 -*- - -import deeptools.misc -from deeptools.computeMatrixOperations import main -import sys - -if __name__ == "__main__": - args = None - if len(sys.argv) == 1: - args = ["--help"] - main(args) diff --git a/bin/correctGCBias b/bin/correctGCBias deleted file mode 100755 index 2906632c8..000000000 --- a/bin/correctGCBias +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python -#-*- coding: utf-8 -*- - -import deeptools.misc -from deeptools.correctGCBias import main -import sys - -if __name__ == "__main__": - args = None - if len(sys.argv) == 1: - args = ["--help"] - main(args) diff --git a/bin/deeptools b/bin/deeptools deleted file mode 100755 index f5cd8a02e..000000000 --- a/bin/deeptools +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python -#-*- coding: utf-8 -*- - -import deeptools.misc -from deeptools.deeptools_list_tools import main -import sys - -if __name__ == "__main__": - args = None - if len(sys.argv) == 1: - args = ["--help"] - main(args) diff --git a/bin/estimateReadFiltering b/bin/estimateReadFiltering deleted file mode 100755 index 4948d4897..000000000 --- a/bin/estimateReadFiltering +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -import deeptools.misc -from deeptools.estimateReadFiltering import main -import sys - -if __name__ == "__main__": - args = None - if len(sys.argv) == 1: - args = ["--help"] - main(args) diff --git a/bin/multiBamSummary b/bin/multiBamSummary deleted file mode 100755 index 1dc376ea4..000000000 --- a/bin/multiBamSummary +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env python -#-*- coding: utf-8 -*- - -import deeptools.misc -from deeptools.multiBamSummary import main -import sys - -if __name__ == "__main__": - args = None - if len(sys.argv) == 1: - args = ["--help"] - if len(sys.argv) == 2 and sys.argv[1] != "--version": - sys.argv.append("--help") - main(args) diff --git a/bin/multiBigwigSummary b/bin/multiBigwigSummary deleted file mode 100755 index 37108ed93..000000000 --- a/bin/multiBigwigSummary +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env python -#-*- coding: utf-8 -*- - -import deeptools.misc -from deeptools.multiBigwigSummary import main -import sys - -if __name__ == "__main__": - args = None - if len(sys.argv) == 1: - args = ["--help"] - if len(sys.argv) == 2 and sys.argv[1] != "--version": - sys.argv.append("--help") - main(args) diff --git a/bin/plotCorrelation b/bin/plotCorrelation deleted file mode 100755 index 560c15b7e..000000000 --- a/bin/plotCorrelation +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python -#-*- coding: utf-8 -*- - -import deeptools.misc -from deeptools.plotCorrelation import main -import sys - -if __name__ == "__main__": - args = None - if len(sys.argv) == 1: - args = ["--help"] - main(args) diff --git a/bin/plotCoverage b/bin/plotCoverage deleted file mode 100755 index 6957ff57e..000000000 --- a/bin/plotCoverage +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -import deeptools.misc -from deeptools.plotCoverage import main -import sys - -if __name__ == "__main__": - args = None - if len(sys.argv) == 1: - args = ["--help"] - main(args) diff --git a/bin/plotEnrichment b/bin/plotEnrichment deleted file mode 100755 index c1debcb76..000000000 --- a/bin/plotEnrichment +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -import deeptools.misc -from deeptools.plotEnrichment import main -import sys - -if __name__ == "__main__": - args = None - if len(sys.argv) == 1: - args = ["--help"] - main(args) diff --git a/bin/plotFingerprint b/bin/plotFingerprint deleted file mode 100755 index c170dc005..000000000 --- a/bin/plotFingerprint +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python -#-*- coding: utf-8 -*- - -import deeptools.misc -from deeptools.plotFingerprint import main -import sys - -if __name__ == "__main__": - args = None - if len(sys.argv) == 1: - args = ["--help"] - main(args) diff --git a/bin/plotHeatmap b/bin/plotHeatmap deleted file mode 100755 index cb38dff76..000000000 --- a/bin/plotHeatmap +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python -#-*- coding: utf-8 -*- - -import deeptools.misc -from deeptools.plotHeatmap import main -import sys - -if __name__ == "__main__": - args = None - if len(sys.argv) == 1: - args = ["--help"] - main(args) diff --git a/bin/plotPCA b/bin/plotPCA deleted file mode 100755 index 636dbcbb8..000000000 --- a/bin/plotPCA +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python -#-*- coding: utf-8 -*- - -import deeptools.misc -from deeptools.plotPCA import main -import sys - -if __name__ == "__main__": - args = None - if len(sys.argv) == 1: - args = ["--help"] - main(args) diff --git a/bin/plotProfile b/bin/plotProfile deleted file mode 100755 index b39f5261e..000000000 --- a/bin/plotProfile +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python -#-*- coding: utf-8 -*- - -import deeptools.misc -from deeptools.plotProfile import main -import sys - -if __name__ == "__main__": - args = None - if len(sys.argv) == 1: - args = ["--help"] - main(args) diff --git a/deeptools/_version.py b/deeptools/_version.py deleted file mode 100755 index cb8316dc0..000000000 --- a/deeptools/_version.py +++ /dev/null @@ -1,5 +0,0 @@ - -# This file is originally generated from Git information by running 'setup.py -# version'. Distribution tarballs contain a pre-generated copy of this file. - -__version__ = '3.5.2' diff --git a/deeptools/alignmentSieve.py b/deeptools/alignmentSieve.py index 5e28b2ceb..4f2aa1879 100644 --- a/deeptools/alignmentSieve.py +++ b/deeptools/alignmentSieve.py @@ -7,7 +7,10 @@ from deeptools import parserCommon from deeptools.bamHandler import openBam from deeptools.mapReduce import mapReduce -from deeptools._version import __version__ +try: # keep python 3.7 support. + from importlib.metadata import version +except ModuleNotFoundError: + from importlib_metadata import version from deeptools.utilities import getTLen, smartLabels, getTempFileName @@ -15,7 +18,8 @@ def parseArguments(): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description="This tool filters alignments in a BAM/CRAM file according the the specified parameters. It can optionally output to BEDPE format.", - usage='Example usage: alignmentSieve.py -b sample1.bam -o sample1.filtered.bam --minMappingQuality 10 --filterMetrics log.txt') + usage='alignmentSieve -b sample1.bam -o sample1.filtered.bam --minMappingQuality 10 --filterMetrics log.txt\n' + 'help: alignmentSieve -h / alignmentSieve --help') required = parser.add_argument_group('Required arguments') required.add_argument('--bam', '-b', @@ -60,7 +64,7 @@ def parseArguments(): action='store_true') general.add_argument('--version', action='version', - version='%(prog)s {}'.format(__version__)) + version='%(prog)s {}'.format(version('deeptools'))) general.add_argument('--shift', nargs='+', diff --git a/deeptools/bamCompare.py b/deeptools/bamCompare.py index 9f19321f9..223bc06c9 100644 --- a/deeptools/bamCompare.py +++ b/deeptools/bamCompare.py @@ -44,7 +44,8 @@ def parseArguments(): 'independently. If this is undesirable, then use the --samFlagInclude ' 'or --samFlagExclude options.', - usage=' bamCompare -b1 treatment.bam -b2 control.bam -o log2ratio.bw', + usage='bamCompare -b1 treatment.bam -b2 control.bam -o log2ratio.bw\n' + 'help: bamCompare -h / bamCompare --help', add_help=False) diff --git a/deeptools/bamCoverage.py b/deeptools/bamCoverage.py index c0002a59d..acca196fc 100644 --- a/deeptools/bamCoverage.py +++ b/deeptools/bamCoverage.py @@ -36,8 +36,8 @@ def parseArguments(): 'Million mapped reads (RPKM), counts per million (CPM), bins per ' 'million mapped reads (BPM) and 1x depth (reads per genome ' 'coverage, RPGC).\n', - usage='An example usage is:' - '$ bamCoverage -b reads.bam -o coverage.bw', + usage='bamCoverage -b reads.bam -o coverage.bw\n' + 'help: bamCoverage -h / bamCoverage --help', add_help=False) return parser diff --git a/deeptools/bamPEFragmentSize.py b/deeptools/bamPEFragmentSize.py index 646b51cae..ad63fa14f 100755 --- a/deeptools/bamPEFragmentSize.py +++ b/deeptools/bamPEFragmentSize.py @@ -18,7 +18,10 @@ # own tools from deeptools.parserCommon import writableFile from deeptools.getFragmentAndReadSize import get_read_and_fragment_length -from deeptools._version import __version__ +try: # keep python 3.7 support. + from importlib.metadata import version +except ModuleNotFoundError: + from importlib_metadata import version def parse_arguments(): @@ -30,7 +33,10 @@ def parse_arguments(): 'Properly paired reads are preferred for computation, i.e., ' 'it will only use discordant pairs if no concordant alignments ' 'overlap with a given region. ' - 'The default setting simply prints the summary statistics to the screen.') + 'The default setting simply prints the summary statistics to the screen.', + usage='bamPEFragmentSize -b sample1.bam sample2.bam -o hist.png\n' + 'help: bamPEFragmentSize -h / bamPEFragmentSize --help' + ) parser.add_argument('--bamfiles', '-b', help='List of BAM files to process', nargs='+', @@ -109,7 +115,7 @@ def parse_arguments(): action='store_true', required=False) parser.add_argument('--version', action='version', - version='%(prog)s {}'.format(__version__)) + version='%(prog)s {}'.format(version('deeptools'))) return parser @@ -290,6 +296,10 @@ def printTable(args, fragDict, readDict): def main(args=None): args = parse_arguments().parse_args(args) + if len(sys.argv) == 1: + parse_arguments().print_help() + sys.exit() + fraglengths = {} readlengths = {} of = None diff --git a/deeptools/bigwigAverage.py b/deeptools/bigwigAverage.py index 5cc41553b..7153d98f4 100644 --- a/deeptools/bigwigAverage.py +++ b/deeptools/bigwigAverage.py @@ -23,7 +23,9 @@ def parse_arguments(args=None): 'of mapped reads. To average the bigWig files, the genome is ' 'partitioned into bins of equal size, then the scores ' 'in each bigwig file are computed per bin.' - 'These scores are averaged and scaleFactors can be applied before the average.') + 'These scores are averaged and scaleFactors can be applied before the average.', + usage='bigwigAverage -b sample1.bw sample2.bw -o outfile.bw\n' + 'help: bigwigAverage -h / bigwigAverage --help') # define the arguments parser.add_argument('--bigwigs', '-b', @@ -94,6 +96,9 @@ def average(tileCoverage, args): def main(args=None): args = parse_arguments().parse_args(args) + if len(sys.argv) == 1: + parse_arguments().print_help() + sys.exit() nFiles = len(args.bigwigs) diff --git a/deeptools/bigwigCompare.py b/deeptools/bigwigCompare.py index dc1a70e08..4e15c7df8 100644 --- a/deeptools/bigwigCompare.py +++ b/deeptools/bigwigCompare.py @@ -24,7 +24,9 @@ def parse_arguments(args=None): 'partitioned into bins of equal size, then the number of reads found ' 'in each BAM file are counted per bin and finally a summary ' 'value is reported. This value can be the ratio of the number of reads' - 'per bin, the log2 of the ratio, the sum or the difference.') + 'per bin, the log2 of the ratio, the sum or the difference.', + usage='bigwigCompare -b1 sample1.bw -b2 sample2.bw -o log2.bw\n' + 'help: bigwigCompare -h / bigwigCompare --help') # define the arguments parser.add_argument('--bigwig1', '-b1', diff --git a/deeptools/cm.py b/deeptools/cm.py index fcb7c20ff..47bcf1628 100644 --- a/deeptools/cm.py +++ b/deeptools/cm.py @@ -30,7 +30,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from matplotlib import colors, cm as mpl_cm +from matplotlib import colors, colormaps as mpl_cm _rocket_lut = [ @@ -1084,5 +1084,5 @@ _cmap_r = colors.ListedColormap(_lut[::-1], _name + "_r") locals()[_name + "_r"] = _cmap_r - mpl_cm.register_cmap(_name, _cmap) - mpl_cm.register_cmap(_name + "_r", _cmap_r) + mpl_cm.register(_cmap, name=_name) + mpl_cm.register(_cmap_r, name=_name + "_r") diff --git a/deeptools/computeGCBias.py b/deeptools/computeGCBias.py index 0c51f906b..f261a9fc1 100755 --- a/deeptools/computeGCBias.py +++ b/deeptools/computeGCBias.py @@ -30,8 +30,9 @@ def parse_arguments(args=None): '[Benjamini & Speed (2012). Nucleic Acids Research, 40(10). doi: 10.1093/nar/gks001]. ' 'The GC-bias is visualized and the resulting table can be used to' 'correct the bias with `correctGCBias`.', - usage='\n computeGCBias ' - '-b file.bam --effectiveGenomeSize 2150570000 -g mm9.2bit -l 200 --GCbiasFrequenciesFile freq.txt [options]', + usage='computeGCBias ' + '-b file.bam --effectiveGenomeSize 2150570000 -g mm9.2bit -l 200 --GCbiasFrequenciesFile freq.txt\n' + 'help: computeGCBias -h / computeGCBias --help', conflict_handler='resolve', add_help=False) diff --git a/deeptools/computeMatrix.py b/deeptools/computeMatrix.py index 99c128324..440358c9b 100644 --- a/deeptools/computeMatrix.py +++ b/deeptools/computeMatrix.py @@ -7,7 +7,10 @@ import multiprocessing from deeptools.parserCommon import writableFile, numberOfProcessors -from deeptools._version import __version__ +try: # keep python 3.7 support. + from importlib.metadata import version +except ModuleNotFoundError: + from importlib_metadata import version from deeptools import parserCommon from deeptools import heatmapper import deeptools.computeMatrixOperations as cmo @@ -37,7 +40,7 @@ def parse_arguments(args=None): ' -R -b 1000\n \n') parser.add_argument('--version', action='version', - version='%(prog)s {}'.format(__version__)) + version='%(prog)s {}'.format(version('deeptools'))) subparsers = parser.add_subparsers( title='Commands', @@ -137,7 +140,7 @@ def computeMatrixOptArgs(case=['scale-regions', 'reference-point'][0]): parser = argparse.ArgumentParser(add_help=False) optional = parser.add_argument_group('Optional arguments') optional.add_argument('--version', action='version', - version='%(prog)s {}'.format(__version__)) + version='%(prog)s {}'.format(version('deeptools'))) if case == 'scale-regions': optional.add_argument('--regionBodyLength', '-m', @@ -353,6 +356,10 @@ def computeMatrixOptArgs(case=['scale-regions', 'reference-point'][0]): def process_args(args=None): args = parse_arguments().parse_args(args) + if len(sys.argv) == 1: + parse_arguments().print_help() + sys.exit() + if args.quiet is True: args.verbose = False diff --git a/deeptools/computeMatrixOperations.py b/deeptools/computeMatrixOperations.py index deb62076c..b246b9ce0 100755 --- a/deeptools/computeMatrixOperations.py +++ b/deeptools/computeMatrixOperations.py @@ -6,7 +6,10 @@ import sys import os import csv -from deeptools._version import __version__ +try: # keep python 3.7 support. + from importlib.metadata import version +except ModuleNotFoundError: + from importlib_metadata import version def parse_arguments(): @@ -138,7 +141,7 @@ def parse_arguments(): usage='Example usage:\n computeMatrixOperations dataRange -m input.mat.gz\n\n') parser.add_argument('--version', action='version', - version='%(prog)s {}'.format(__version__)) + version='%(prog)s {}'.format(version('deeptools'))) return parser @@ -786,10 +789,13 @@ def sortMatrix(hm, regionsFileName, transcriptID, transcript_id_designator, verb def main(args=None): - if len(sys.argv) == 1: - args = ["-h"] - if len(sys.argv) == 2: - args = [sys.argv[1], "-h"] + # if args none is need since otherwise pytest passes 'pytest' as sys.argv + if args is None: + if len(sys.argv) == 1: + args = ["-h"] + if len(sys.argv) == 2: + args = [sys.argv[1], "-h"] + args = parse_arguments().parse_args(args) hm = heatmapper.heatmapper() diff --git a/deeptools/correctGCBias.py b/deeptools/correctGCBias.py index ba6d893a9..1154b9368 100755 --- a/deeptools/correctGCBias.py +++ b/deeptools/correctGCBias.py @@ -38,10 +38,10 @@ def parse_arguments(args=None): '(typically AT-rich regions). ' 'The tool ``computeGCBias`` needs to be run first to generate the ' 'frequency table needed here.', - usage='An example usage is:\n correctGCBias ' + usage='correctGCBias ' '-b file.bam --effectiveGenomeSize 2150570000 -g mm9.2bit ' - '--GCbiasFrequenciesFile freq.txt -o gc_corrected.bam ' - '[options]', + '--GCbiasFrequenciesFile freq.txt -o gc_corrected.bam\n' + 'help: correctGCBias -h / correctGCBias --help', conflict_handler='resolve', add_help=False) return parser diff --git a/deeptools/deeptools_list_tools.py b/deeptools/deeptools_list_tools.py index 15b0e6a8d..0e4b6a387 100644 --- a/deeptools/deeptools_list_tools.py +++ b/deeptools/deeptools_list_tools.py @@ -3,7 +3,10 @@ import argparse import sys -from deeptools._version import __version__ +try: # keep python 3.7 support. + from importlib.metadata import version +except ModuleNotFoundError: + from importlib_metadata import version def parse_arguments(args=None): @@ -61,7 +64,7 @@ def parse_arguments(args=None): """) parser.add_argument('--version', action='version', - version='%(prog)s {}'.format(__version__)) + version='%(prog)s {}'.format(version('deeptools'))) return parser diff --git a/deeptools/estimateReadFiltering.py b/deeptools/estimateReadFiltering.py index 464fe0999..52fded538 100644 --- a/deeptools/estimateReadFiltering.py +++ b/deeptools/estimateReadFiltering.py @@ -5,7 +5,10 @@ from deeptools import parserCommon, bamHandler, utilities from deeptools.mapReduce import mapReduce from deeptools.utilities import smartLabels -from deeptools._version import __version__ +try: # keep python 3.7 support. + from importlib.metadata import version +except ModuleNotFoundError: + from importlib_metadata import version def parseArguments(): @@ -31,7 +34,9 @@ def parseArguments(): The sum of these may be more than the total number of reads. Note that alignments are sampled from bins of size --binSize spaced --distanceBetweenBins apart. """, - usage='Example usage: estimateReadFiltering.py -b sample1.bam sample2.bam > log.txt') + usage='estimateReadFiltering -b sample1.bam sample2.bam\n' + 'help: estimateReadFiltering -h / estimateReadFiltering --help' + ) required = parser.add_argument_group('Required arguments') required.add_argument('--bamfiles', '-b', @@ -92,7 +97,7 @@ def parseArguments(): action='store_true') general.add_argument('--version', action='version', - version='%(prog)s {}'.format(__version__)) + version='%(prog)s {}'.format(version('deeptools'))) filtering = parser.add_argument_group('Optional arguments') diff --git a/bin/estimateScaleFactor b/deeptools/estimateScaleFactor.py old mode 100755 new mode 100644 similarity index 76% rename from bin/estimateScaleFactor rename to deeptools/estimateScaleFactor.py index 58b6e4ed3..31acea3f5 --- a/bin/estimateScaleFactor +++ b/deeptools/estimateScaleFactor.py @@ -1,22 +1,27 @@ #!/usr/bin/env python -#-*- coding: utf-8 -*- +# -*- coding: utf-8 -*- -import deeptools.misc import argparse import sys from deeptools.SES_scaleFactor import estimateScaleFactor from deeptools.parserCommon import numberOfProcessors -from deeptools._version import __version__ +try: # keep python 3.7 support. + from importlib.metadata import version +except ModuleNotFoundError: + from importlib_metadata import version debug = 0 def parseArguments(args=None): parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - description='Given two BAM files, this estimates scaling factors ' - '(bigger to smaller).') + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description='Given two BAM files, this estimates scaling factors ' + '(bigger to smaller).', + usage='estimateScaleFactor -b sample1.bam sample2.bam\n' + 'help: estimateScaleFactor -h / estimateScaleFactor --help' + ) # define the arguments parser.add_argument('--bamfiles', '-b', @@ -25,7 +30,6 @@ def parseArguments(args=None): nargs='+', required=True) - parser.add_argument('--ignoreForNormalization', '-ignore', help='A comma-separated list of chromosome names, ' 'limited by quotes, ' @@ -74,28 +78,33 @@ def parseArguments(args=None): required=False) parser.add_argument('--verbose', '-v', - help='Set to see processing messages.', - action='store_true') + help='Set to see processing messages.', + action='store_true') - parser.add_argument('--version', action='version', - version='%(prog)s {}'.format(__version__)) + parser.add_argument('--version', + action='version', + version='%(prog)s {}'.format(version('deeptools'))) - args=parser.parse_args(args) + args = parser.parse_args(args) if args.ignoreForNormalization: - args.ignoreForNormalization=[x.strip() for x in args.ignoreForNormalization.split(',')] + args.ignoreForNormalization = [ + x.strip() for x in args.ignoreForNormalization.split(',') + ] else: - args.ignoreForNormalization = [] + args.ignoreForNormalization = [] return args -def main(args): + +def main(args=None): """ The algorithm samples the genome a number of times as specified by the --numberOfSamples parameter to estimate scaling factors of - betweeen to samples + between to samples """ + args = parseArguments().parse_args(args) if len(args.bamfiles) > 2: - print("SES method to stimate scale factors only works for two samples") + print("SES method to estimate scale factors only works for two samples") exit(0) sys.stderr.write("{:,} number of samples will be computed.\n".format(args.numberOfSamples)) @@ -107,9 +116,4 @@ def main(args): verbose=args.verbose) for k, v in sizeFactorsDict.items(): - print("{}: {}".format(k, v)) - - -if __name__ == "__main__": - args = parseArguments() - main(args) + print("{}: {}".format(k, v)) diff --git a/deeptools/heatmapper.py b/deeptools/heatmapper.py index 1a2e5f03c..f67afaf4d 100644 --- a/deeptools/heatmapper.py +++ b/deeptools/heatmapper.py @@ -777,7 +777,7 @@ def read_matrix_file(self, matrix_file): # split the line into bed interval and matrix values region = line.split('\t') chrom, start, end, name, score, strand = region[0:6] - matrix_row = np.ma.masked_invalid(np.fromiter(region[6:], np.float64)) + matrix_row = np.ma.masked_invalid(np.fromiter(region[6:], float)) matrix_rows.append(matrix_row) starts = start.split(",") ends = end.split(",") @@ -852,7 +852,7 @@ def save_matrix(self, file_name): # join np_array values # keeping nans while converting them to strings if not np.ma.is_masked(score_list[idx]): - np.float64(score_list[idx]) + float(score_list[idx]) matrix_values = "\t".join( np.char.mod('%f', self.matrix.matrix[idx, :])) starts = ["{0}".format(x[0]) for x in region[1]] @@ -1253,10 +1253,10 @@ def hmcluster(self, k, evaluate_silhouette=True, method='kmeans', clustering_sam matrix = np.asarray(self.matrix) matrix_to_cluster = matrix if clustering_samples is not None: - assert all(i > 0 for i in clustering_samples),\ + assert all(i > 0 for i in clustering_samples), \ "all indices should be bigger than or equal to 1." assert all(i <= len(self.sample_labels) for i in - clustering_samples),\ + clustering_samples), \ "each index should be smaller than or equal to {}(total "\ "number of samples.)".format(len(self.sample_labels)) @@ -1345,7 +1345,7 @@ def removeempty(self): to_keep = [] score_list = np.ma.masked_invalid(np.mean(self.matrix, axis=1)) for idx, region in enumerate(self.regions): - if np.ma.is_masked(score_list[idx]) or np.float64(score_list[idx]) == 0: + if np.ma.is_masked(score_list[idx]) or float(score_list[idx]) == 0: continue else: to_keep.append(idx) diff --git a/deeptools/multiBamSummary.py b/deeptools/multiBamSummary.py index a588c09c9..b010001ff 100644 --- a/deeptools/multiBamSummary.py +++ b/deeptools/multiBamSummary.py @@ -9,7 +9,10 @@ import deeptools.countReadsPerBin as countR from deeptools import parserCommon from deeptools.utilities import smartLabels -from deeptools._version import __version__ +try: # keep python 3.7 support. + from importlib.metadata import version +except ModuleNotFoundError: + from importlib_metadata import version old_settings = np.seterr(all='ignore') @@ -44,7 +47,7 @@ def parse_arguments(args=None): conflict_handler='resolve') parser.add_argument('--version', action='version', - version='%(prog)s {}'.format(__version__)) + version='%(prog)s {}'.format(version('deeptools'))) subparsers = parser.add_subparsers( title="commands", dest='command', @@ -70,7 +73,8 @@ def parse_arguments(args=None): add_help=False, usage='%(prog)s ' '--bamfiles file1.bam file2.bam ' - '-o results.npz \n') + '-o results.npz \n' + 'help: multiBamSummary bins -h / multiBamSummary bins --help\n') # BED file arguments subparsers.add_parser( @@ -84,7 +88,8 @@ def parse_arguments(args=None): "that should be considered for the coverage analysis. A " "common use is to compare ChIP-seq coverages between two " "different samples for a set of peak regions.", - usage='%(prog)s --BED selection.bed --bamfiles file1.bam file2.bam -o results.npz\n', + usage='%(prog)s --BED selection.bed --bamfiles file1.bam file2.bam -o results.npz\n' + 'help: multiBamSummary BED-file -h / multiBamSummary bins --help\n', add_help=False) return parser @@ -192,6 +197,10 @@ def bamcorrelate_args(case='bins'): def process_args(args=None): args = parse_arguments().parse_args(args) + if len(sys.argv) == 1: + parse_arguments().print_help() + sys.exit() + if args.labels and len(args.bamfiles) != len(args.labels): print("The number of labels does not match the number of bam files.") exit(0) diff --git a/deeptools/multiBigwigSummary.py b/deeptools/multiBigwigSummary.py index 8d89421f1..50f40beef 100644 --- a/deeptools/multiBigwigSummary.py +++ b/deeptools/multiBigwigSummary.py @@ -7,10 +7,13 @@ import numpy as np import multiprocessing from deeptools import parserCommon -from deeptools._version import __version__ from deeptools.utilities import smartLabels import deeptools.getScorePerBigWigBin as score_bw import deeptools.deepBlue as db +try: # keep python 3.7 support. + from importlib.metadata import version +except ModuleNotFoundError: + from importlib_metadata import version old_settings = np.seterr(all='ignore') @@ -43,7 +46,7 @@ def parse_arguments(args=None): conflict_handler='resolve') parser.add_argument('--version', action='version', - version='multiBigwigSummary {}'.format(__version__)) + version='multiBigwigSummary {}'.format(version('deeptools'))) subparsers = parser.add_subparsers( title="commands", dest='command', @@ -69,7 +72,8 @@ def parse_arguments(args=None): add_help=False, usage='multiBigwigSummary bins ' '-b file1.bw file2.bw ' - '-o results.npz\n') + '-o results.npz\n' + 'help: multiBigwigSummary bins -h / multiBigwigSummary bins --help\n') # BED file arguments subparsers.add_parser( @@ -86,7 +90,8 @@ def parse_arguments(args=None): "different samples over a set of pre-defined peak regions.", usage='multiBigwigSummary BED-file ' '-b file1.bw file2.bw ' - '-o results.npz --BED selection.bed\n', + '-o results.npz --BED selection.bed\n' + 'help: multiBigwigSummary BED-file -h / multiBigwigSummary BED-file --help\n', add_help=False) return parser @@ -95,6 +100,10 @@ def parse_arguments(args=None): def process_args(args=None): args = parse_arguments().parse_args(args) + if len(sys.argv) == 1: + parse_arguments().print_help() + sys.exit() + if not args.labels and args.smartLabels: args.labels = smartLabels(args.bwfiles) elif not args.labels: diff --git a/deeptools/parserCommon.py b/deeptools/parserCommon.py index ef4f4d074..37e9f359a 100755 --- a/deeptools/parserCommon.py +++ b/deeptools/parserCommon.py @@ -1,6 +1,9 @@ import argparse import os -from deeptools._version import __version__ +try: # keep python 3.7 support. + from importlib.metadata import version +except ModuleNotFoundError: + from importlib_metadata import version def check_float_0_1(value): @@ -297,7 +300,7 @@ def getParentArgParse(args=None, binSize=True, blackList=True): optional = parser.add_argument_group('Optional arguments') optional.add_argument('--version', action='version', - version='%(prog)s {}'.format(__version__)) + version='%(prog)s {}'.format(version('deeptools'))) if binSize: optional.add_argument('--binSize', '-bs', @@ -521,7 +524,7 @@ def heatmapperOptionalArgs(mode=['heatmap', 'profile'][0]): optional.add_argument("--help", "-h", action="help", help="show this help message and exit") optional.add_argument('--version', action='version', - version='%(prog)s {}'.format(__version__)) + version='%(prog)s {}'.format(version('deeptools'))) if mode == 'profile': optional.add_argument( '--averageType', diff --git a/deeptools/plotCorrelation.py b/deeptools/plotCorrelation.py index a03839baa..2b8d9f790 100644 --- a/deeptools/plotCorrelation.py +++ b/deeptools/plotCorrelation.py @@ -13,7 +13,10 @@ from deeptools.correlation import Correlation from deeptools.parserCommon import writableFile -from deeptools._version import __version__ +try: # keep python 3.7 support. + from importlib.metadata import version +except ModuleNotFoundError: + from importlib_metadata import version old_settings = np.seterr(all='ignore') @@ -41,7 +44,9 @@ def parse_arguments(args=None): epilog='example usages:\n' 'plotCorrelation -in results_file --whatToPlot heatmap --corMethod pearson -o heatmap.png\n\n' ' \n\n', - parents=[basic_args, heatmap_parser, scatter_parser]) + parents=[basic_args, heatmap_parser, scatter_parser], + usage='plotCorrelation -in matrix.gz -c spearman -p heatmap -o plot.png\n' + 'help: plotCorrelation -h / plotCorrelation --help\n') return parser @@ -117,7 +122,7 @@ def plot_correlation_args(): action='store_true') optional.add_argument('--version', action='version', - version='%(prog)s {}'.format(__version__)) + version='%(prog)s {}'.format(version('deeptools'))) group = parser.add_argument_group('Output optional options') diff --git a/deeptools/plotCoverage.py b/deeptools/plotCoverage.py index 02ce25dad..e233dcb71 100755 --- a/deeptools/plotCoverage.py +++ b/deeptools/plotCoverage.py @@ -18,7 +18,10 @@ import deeptools.countReadsPerBin as countR from deeptools import parserCommon from deeptools.utilities import smartLabels -from deeptools._version import __version__ +try: # keep python 3.7 support. + from importlib.metadata import version +except ModuleNotFoundError: + from importlib_metadata import version old_settings = np.seterr(all='ignore') @@ -46,10 +49,12 @@ def parse_arguments(args=None): epilog='example usages:\nplotCoverage ' '--bamfiles file1.bam file2.bam -o results.png\n\n' ' \n\n', - conflict_handler='resolve') + conflict_handler='resolve', + usage='plotCoverage -b sample1.bam sample2.bam -o coverage.png \n' + 'help: plotCoverage -h / plotCoverage --help\n') parser.add_argument('--version', action='version', - version='plotCoverage {}'.format(__version__)) + version='plotCoverage {}'.format(version('deeptools'))) return parser diff --git a/deeptools/plotEnrichment.py b/deeptools/plotEnrichment.py index 7ef474eff..bbd53f90d 100755 --- a/deeptools/plotEnrichment.py +++ b/deeptools/plotEnrichment.py @@ -51,7 +51,9 @@ def parse_arguments(args=None): epilog='example usages:\n' 'plotEnrichment -b file1.bam file2.bam --BED peaks.bed -o enrichment.png\n\n' ' \n\n', - parents=[basic_args, parent_parser, read_options]) + parents=[basic_args, parent_parser, read_options], + usage='plotEnrichment -b sample1.bam sample2.bam --BED peaks.bed -o enrichment.png\n' + 'help: plotEnrichment -h / plotEnrichment --help\n') return parser diff --git a/deeptools/plotFingerprint.py b/deeptools/plotFingerprint.py index 4aee5b470..3adce87ac 100755 --- a/deeptools/plotFingerprint.py +++ b/deeptools/plotFingerprint.py @@ -42,8 +42,9 @@ def parse_arguments(args=None): 'these counts are sorted ' 'and the cumulative sum is finally plotted. ', conflict_handler='resolve', - usage='An example usage is: plotFingerprint -b treatment.bam control.bam ' - '-plot fingerprint.png', + usage='plotFingerprint -b treatment.bam control.bam ' + '-plot fingerprint.png\n' + 'help: plotFingerprint -h / plotFingerprint --help', add_help=False) return parser @@ -240,7 +241,7 @@ def getSyntheticJSD(vec): lamb = np.mean(vec) # Average coverage coverage = np.sum(vec) - chip = np.zeros(MAXLEN, dtype=np.int) + chip = np.zeros(MAXLEN, dtype=int) for val in vec: # N.B., we need to clip past the end of the array if val >= MAXLEN: @@ -277,8 +278,8 @@ def getJSD(args, idx, mat): return np.NAN # These will hold the coverage histograms - chip = np.zeros(MAXLEN, dtype=np.int) - input = np.zeros(MAXLEN, dtype=np.int) + chip = np.zeros(MAXLEN, dtype=int) + input = np.zeros(MAXLEN, dtype=int) for row in mat: # ChIP val = row[idx] @@ -343,8 +344,8 @@ def signalAndBinDist(x): # Compute the JSD from the PMFs M = (PMFinput + PMFchip) / 2.0 JSD = 0.5 * (np.nansum(PMFinput * np.log2(PMFinput / M))) + 0.5 * (np.nansum(PMFchip * np.log2(PMFchip / M))) - - return np.sqrt(JSD) + # Round sqrt of JSD to 15 decimals, as planemo test has issue with rounding ? + return round(np.sqrt(JSD), 15) def getExpected(mu): @@ -393,7 +394,7 @@ def main(args=None): sys.stderr.write( "\nNo reads were found in {} regions sampled. Check that the\n" "min mapping quality is not overly high and that the \n" - "chromosome names between bam files are consistant.\n" + "chromosome names between bam files are consistent.\n" "For small genomes, decrease the --numberOfSamples.\n" "\n".format(num_reads_per_bin.shape[0])) exit(1) diff --git a/deeptools/plotHeatmap.py b/deeptools/plotHeatmap.py index bc4bbcc2a..ad666998e 100755 --- a/deeptools/plotHeatmap.py +++ b/deeptools/plotHeatmap.py @@ -41,7 +41,9 @@ def parse_arguments(args=None): 'scores associated with genomic regions. ' 'The program requires a matrix file ' 'generated by the tool ``computeMatrix``.', - epilog='An example usage is: plotHeatmap -m ', + epilog='An example usage is: plotHeatmap -m matrix.gz', + usage='plotHeatmap -m matrix.gz\n' + 'help: plotHeatmap -h / plotHeatmap --help', add_help=False) return parser @@ -180,21 +182,21 @@ def addProfilePlot(hm, plt, fig, grids, iterNum, iterNum2, perGroup, averageType ticks[0].label1.set_horizontalalignment('left') ticks[-1].label1.set_horizontalalignment('right') - globalYmin = min(np.float64(globalYmin), ax_profile.get_ylim()[0]) + globalYmin = min(float(globalYmin), ax_profile.get_ylim()[0]) globalYmax = max(globalYmax, ax_profile.get_ylim()[1]) - # It turns out that set_ylim only takes np.float64s + # It turns out that set_ylim only takes float64s for sample_id, subplot in enumerate(ax_list): localYMin = yMin[sample_id % len(yMin)] localYMax = yMax[sample_id % len(yMax)] lims = [globalYmin, globalYmax] if localYMin: if localYMax: - lims = (np.float64(localYMin), np.float64(localYMax)) + lims = (float(localYMin), float(localYMax)) else: - lims = (np.float64(localYMin), lims[1]) + lims = (float(localYMin), lims[1]) elif localYMax: - lims = (lims[0], np.float64(localYMax)) + lims = (lims[0], float(localYMax)) if lims[0] >= lims[1]: lims = (lims[0], lims[0] + 1) ax_list[sample_id].set_ylim(lims) diff --git a/deeptools/plotPCA.py b/deeptools/plotPCA.py index d12eac8d4..c43942b85 100644 --- a/deeptools/plotPCA.py +++ b/deeptools/plotPCA.py @@ -11,7 +11,10 @@ from deeptools.correlation import Correlation from deeptools.parserCommon import writableFile -from deeptools._version import __version__ +try: # keep python 3.7 support. + from importlib.metadata import version +except ModuleNotFoundError: + from importlib_metadata import version def parse_arguments(args=None): @@ -30,7 +33,9 @@ def parse_arguments(args=None): epilog='example usages:\n' 'plotPCA -in coverages.npz -o pca.png\n\n' ' \n\n', - parents=[basic_args, ]) + parents=[basic_args, ], + usage='plotPCA -in coverage.npz -o pca.png\n' + 'help: plotPCA -h / plotPCA --help\n') return parser @@ -133,7 +138,7 @@ def plotCorrelationArgs(): help="A list of markers for the symbols. (e.g., '<','>','o') are accepted. The marker values should be space separated. For example, --markers 's' 'o' 's' 'o'. If not specified, the symbols will be given automatic shapes.") optional.add_argument('--version', action='version', - version='%(prog)s {}'.format(__version__)) + version='%(prog)s {}'.format(version('deeptools'))) optionalEx = optional.add_mutually_exclusive_group() optionalEx.add_argument('--transpose', diff --git a/deeptools/plotProfile.py b/deeptools/plotProfile.py index 087b4f025..7497875f2 100755 --- a/deeptools/plotProfile.py +++ b/deeptools/plotProfile.py @@ -44,8 +44,10 @@ def parse_arguments(args=None): 'any other regions defined in BED ' ' will work. A matrix generated ' 'by computeMatrix is required.', - epilog='An example usage is: plotProfile -m ', - add_help=False) + epilog='An example usage is: plotProfile -m matrix.gz', + add_help=False, + usage='plotProfile -m matrix.gz\n' + 'help: plotProfile -h / plotProfile --help') return parser @@ -750,7 +752,7 @@ def plot_profile(self): self.color_list[coloridx], label, plot_type=self.plot_type) - globalYmin = min(np.float64(globalYmin), ax.get_ylim()[0]) + globalYmin = min(float(globalYmin), ax.get_ylim()[0]) globalYmax = max(globalYmax, ax.get_ylim()[1]) # Exclude ticks from all but one subplot by default @@ -783,18 +785,18 @@ def plot_profile(self): first = False ax_list.append(ax) - # It turns out that set_ylim only takes np.float64s + # It turns out that set_ylim only takes float64s for sample_id, subplot in enumerate(ax_list): localYMin = self.y_min[sample_id % len(self.y_min)] localYMax = self.y_max[sample_id % len(self.y_max)] lims = [globalYmin, globalYmax] if localYMin is not None: if localYMax is not None: - lims = (np.float64(localYMin), np.float64(localYMax)) + lims = (float(localYMin), float(localYMax)) else: - lims = (np.float64(localYMin), lims[1]) + lims = (float(localYMin), lims[1]) elif localYMax is not None: - lims = (lims[0], np.float64(localYMax)) + lims = (lims[0], float(localYMax)) if lims[0] >= lims[1]: lims = (lims[0], lims[0] + 1) ax_list[sample_id].set_ylim(lims) diff --git a/deeptools/sumCoveragePerBin.py b/deeptools/sumCoveragePerBin.py index b6a722a20..9cde45552 100644 --- a/deeptools/sumCoveragePerBin.py +++ b/deeptools/sumCoveragePerBin.py @@ -90,7 +90,7 @@ def get_coverage_of_region(self, bamHandle, chrom, regions, except: # bigWig input, as used by plotFingerprint if bamHandle.chroms(chrom): - _ = np.array(bamHandle.stats(chrom, regStart, regEnd, type="mean", nBins=nRegBins), dtype=np.float64) + _ = np.array(bamHandle.stats(chrom, regStart, regEnd, type="mean", nBins=nRegBins), dtype=float) _[np.isnan(_)] = 0.0 _ = _ * tileSize coverages += _ diff --git a/deeptools/test/testskip_heatmapper_images.py b/deeptools/test/skiptest_heatmapper_images.py similarity index 93% rename from deeptools/test/testskip_heatmapper_images.py rename to deeptools/test/skiptest_heatmapper_images.py index 8c8d56e95..1e1026504 100644 --- a/deeptools/test/testskip_heatmapper_images.py +++ b/deeptools/test/skiptest_heatmapper_images.py @@ -14,10 +14,6 @@ ROOT = os.path.dirname(os.path.abspath(__file__)) + "/test_heatmapper/" tolerance = 30 -skip = False -if matplotlib.__version__ != "3.1.1": - skip = True - def test_plotHeatmap_simple_plot(): """ @@ -28,8 +24,6 @@ def test_plotHeatmap_simple_plot(): -R {test_path}/test.bed -o /tmp/mat.gz -bs 25 """ - if skip: - return outfile = NamedTemporaryFile(suffix='.png', prefix='plotHeatmap_test_', delete=False) args = "-m {}/master.mat.gz --outFileName {}".format(ROOT, outfile.name).split() deeptools.plotHeatmap.main(args) @@ -39,8 +33,6 @@ def test_plotHeatmap_simple_plot(): def test_plotHeatmap_rename_labels(): - if skip: - return outfile = NamedTemporaryFile(suffix='.png', prefix='plotHeatmap_test_', delete=False) args = "-m {}/master.mat.gz --outFileName {} --regionsLabel uno dos".format(ROOT, outfile.name).split() @@ -51,8 +43,6 @@ def test_plotHeatmap_rename_labels(): def test_plotHeatmap_scale_regions(): - if skip: - return outfile = NamedTemporaryFile(suffix='.png', prefix='plotHeatmap_test_', delete=False) args = "-m {}/master_scale_reg.mat.gz --outFileName {}".format(ROOT, outfile.name).split() deeptools.plotHeatmap.main(args) @@ -62,8 +52,6 @@ def test_plotHeatmap_scale_regions(): def test_plotHeatmap_multi_bigwig_pergroup(): - if skip: - return outfile = NamedTemporaryFile(suffix='.png', prefix='plotHeatmap_test_', delete=False) args = "-m {}/master_multi.mat.gz --perGroup --samplesLabel file1 file2 file3 file4 " \ "--outFileName {}".format(ROOT, outfile.name).split() @@ -74,8 +62,6 @@ def test_plotHeatmap_multi_bigwig_pergroup(): def test_plotHeatmap_multiple_colors_muti_scales(): - if skip: - return outfile = NamedTemporaryFile(suffix='.png', prefix='plotHeatmap_test_', delete=False) args = "-m {}/master_multi.mat.gz --colorList white,blue white,red --zMin 1 0 --zMax 4 5 " \ "--outFileName {}".format(ROOT, outfile.name).split() @@ -86,8 +72,6 @@ def test_plotHeatmap_multiple_colors_muti_scales(): def test_plotHeatmap_multiple_colormap_no_boxes(): - if skip: - return outfile = NamedTemporaryFile(suffix='.png', prefix='plotHeatmap_test_', delete=False) args = "-m {}/master_multi.mat.gz --colorMap Reds binary terrain --boxAroundHeatmaps no " \ "--outFileName {}".format(ROOT, outfile.name).split() @@ -98,8 +82,6 @@ def test_plotHeatmap_multiple_colormap_no_boxes(): def test_plotHeatmap_interpolation(): - if skip: - return outfile = NamedTemporaryFile(suffix='.png', prefix='plotHeatmap_test_', delete=False) args = "-m {}/large_matrix.mat.gz --interpolation bilinear " \ "--outFileName {}".format(ROOT, outfile.name).split() @@ -110,8 +92,6 @@ def test_plotHeatmap_interpolation(): def test_plotProfiler(): - if skip: - return outfile = NamedTemporaryFile(suffix='.png', prefix='plotHeatmap_test_', delete=False) args = "-m {}/master.mat.gz --outFileName {} --regionsLabel uno dos " \ "--plotType std".format(ROOT, outfile.name).split() @@ -122,8 +102,6 @@ def test_plotProfiler(): def test_plotProfiler_heatmap(): - if skip: - return outfile = NamedTemporaryFile(suffix='.png', prefix='plotHeatmap_test_', delete=False) args = "-m {}/master.mat.gz --outFileName {} --plotType heatmap".format(ROOT, outfile.name).split() deeptools.plotProfile.main(args) @@ -133,8 +111,6 @@ def test_plotProfiler_heatmap(): def test_plotProfiler_overlapped_lines(): - if skip: - return outfile = NamedTemporaryFile(suffix='.png', prefix='plotHeatmap_test_', delete=False) args = "-m {}/master.mat.gz --outFileName {} " \ "--plotType overlapped_lines --yMin -1".format(ROOT, outfile.name).split() @@ -145,8 +121,6 @@ def test_plotProfiler_overlapped_lines(): def test_plotProfiler_multibigwig(): - if skip: - return outfile = NamedTemporaryFile(suffix='.png', prefix='plotHeatmap_test_', delete=False) args = "-m {}/master_multi.mat.gz --outFileName {} " \ "--numPlotsPerRow 2 --yMax 1.5".format(ROOT, outfile.name).split() @@ -157,8 +131,6 @@ def test_plotProfiler_multibigwig(): def test_plotProfiler_multibigwig_pergroup(): - if skip: - return outfile = NamedTemporaryFile(suffix='.png', prefix='plotHeatmap_test_', delete=False) args = "-m {}/master_multi.mat.gz --outFileName {} " \ "--perGroup --yMax 1.5".format(ROOT, outfile.name).split() diff --git a/deeptools/test/test_bamCoverage_and_bamCompare.py b/deeptools/test/test_bamCoverage_and_bamCompare.py index fbc8b9fd6..ac1f23ce2 100644 --- a/deeptools/test/test_bamCoverage_and_bamCompare.py +++ b/deeptools/test/test_bamCoverage_and_bamCompare.py @@ -1,4 +1,3 @@ -from nose.tools import assert_equal import deeptools.bamCoverage as bam_cov import deeptools.bamCompare as bam_comp import deeptools.getScaleFactor as gs @@ -46,7 +45,7 @@ def test_bam_coverage_arguments(): resp = _foo.readlines() _foo.close() expected = ['3R\t0\t50\t0\n', '3R\t50\t150\t1\n', '3R\t150\t200\t2\n'] - assert_equal(resp, expected) + assert f"{resp}" == f"{expected}", f"{resp} != {expected}" unlink(outfile) @@ -59,7 +58,7 @@ def test_bam_coverage_extend(): resp = _foo.readlines() _foo.close() expected = ['3R\t0\t150\t1\n', '3R\t150\t200\t3\n'] - assert_equal(resp, expected) + assert f"{resp}" == f"{expected}", f"{resp} != {expected}" unlink(outfile) @@ -76,7 +75,7 @@ def test_bam_coverage_extend_and_normalizeUsingRPGC(): # the scale factor should be 0.5, thus the result is similar to # that of the previous test divided by 0.5 expected = ['3R\t0\t150\t0.5\n', '3R\t150\t200\t1.5\n'] - assert_equal(resp, expected) + assert f"{resp}" == f"{expected}", f"{resp} != {expected}" unlink(outfile) @@ -90,7 +89,7 @@ def test_bam_coverage_skipnas(): resp = _foo.readlines() _foo.close() expected = ['3R\t50\t150\t1\n', '3R\t150\t200\t2\n'] - assert_equal(resp, expected) + assert f"{resp}" == f"{expected}", f"{resp} != {expected}" unlink(outfile) @@ -104,7 +103,7 @@ def test_bam_coverage_filtering(): resp = _foo.readlines() _foo.close() expected = ['3R\t0\t50\t0\n', '3R\t50\t200\t1\n'] - assert_equal(resp, expected) + assert resp == expected, "{} != {}".format(resp, expected) unlink(outfile) @@ -124,7 +123,7 @@ def test_bam_compare_arguments(): resp = _foo.readlines() _foo.close() expected = ['3R\t0\t200\t1\n'] - assert_equal(resp, expected) + assert resp == expected, "{} != {}".format(resp, expected) unlink(outfile) @@ -142,7 +141,7 @@ def test_bam_compare_diff_files(): resp = _foo.readlines() _foo.close() expected = ['3R\t0\t50\t0\n', '3R\t50\t100\t-1\n', '3R\t100\t150\t0\n', '3R\t150\t200\t-1\n'] - assert_equal(resp, expected) + assert resp == expected, "{} != {}".format(resp, expected) unlink(outfile) @@ -159,7 +158,7 @@ def test_bam_compare_pseudocounts(): resp = _foo.readlines() _foo.close() expected = ['3R\t0\t50\tinf\n', '3R\t50\t100\t0\n', '3R\t100\t150\t1\n', '3R\t150\t200\t0\n'] - assert_equal(resp, expected) + assert resp == expected, "{} != {}".format(resp, expected) unlink(outfile) @@ -176,7 +175,7 @@ def test_bam_compare_ZoverZ(): resp = _foo.readlines() _foo.close() expected = ['3R\t50\t100\t-1\n', '3R\t100\t150\t0\n', '3R\t150\t200\t-0.584963\n'] - assert_equal(resp, expected) + assert f"{resp}" == f"{expected}", f"{resp} != {expected}" unlink(outfile) @@ -228,7 +227,7 @@ def test_bam_compare_diff_files_skipnas(): resp = _foo.readlines() _foo.close() expected = ['3R\t100\t150\t0\n', '3R\t150\t200\t-1\n'] - assert_equal(resp, expected) + assert f"{resp}" == f"{expected}", f"{resp} != {expected}" unlink(outfile) @@ -246,7 +245,7 @@ def test_bam_compare_extend(): resp = _foo.readlines() _foo.close() expected = ['3R\t0\t100\t-1\n', '3R\t100\t150\t1\n', '3R\t150\t200\t-1\n'] - assert_equal(resp, expected) + assert f"{resp}" == f"{expected}", f"{resp} != {expected}" unlink(outfile) @@ -286,7 +285,7 @@ def test_bam_compare_scale_factors_ratio(): """ expected = ['3R\t0\t50\t1\n', '3R\t50\t100\t0.666667\n', '3R\t100\t150\t1.33333\n', '3R\t150\t200\t1\n'] - assert_equal(resp, expected) + assert f"{resp}" == f"{expected}", f"{resp} != {expected}" unlink(outfile) @@ -328,7 +327,7 @@ def test_bam_compare_scale_factors_subtract(): """ expected = ['3R\t0\t50\t0\n', '3R\t50\t100\t-250000\n', '3R\t100\t150\t250000\n', '3R\t150\t200\t0\n'] - assert_equal(resp, expected) + assert f"{resp}" == f"{expected}", f"{resp} != {expected}" unlink(outfile) @@ -358,7 +357,7 @@ def test_bam_coverage_filter_blacklist(): '3R\t950\t1000\t1.62672\n', '3R\t1000\t1050\t0.813362\n', '3R\t1050\t1500\t0\n'] - assert_equal(resp, expected) + assert f"{resp}" == f"{expected}", f"{resp} != {expected}" unlink(outfile) @@ -459,5 +458,5 @@ def test_bam_compare_filter_blacklist(): '3R\t750\t800\t-0.123451\n', '3R\t900\t950\t0.212545\n', '3R\t950\t1000\t0.199309\n', '3R\t1000\t1050\t0.167945\n', '3R\t1050\t1500\t0\n'] - assert_equal(resp, expected) + assert f"{resp}" == f"{expected}", f"{resp} != {expected}" unlink(outfile) diff --git a/deeptools/test/test_bigwigAverage.py b/deeptools/test/test_bigwigAverage.py index 22f5427eb..fed8f2162 100644 --- a/deeptools/test/test_bigwigAverage.py +++ b/deeptools/test/test_bigwigAverage.py @@ -42,7 +42,7 @@ def test_bigwigAverage(): resp = _foo.readlines() _foo.close() expected = ['3R\t0\t50\t0\n', '3R\t50\t100\t0.5\n', '3R\t100\t150\t1\n', '3R\t150\t200\t1.5\n'] - assert resp == expected, "{} != {}".format(resp, expected) + assert f"{resp}" == f"{expected}", f"{resp} != {expected}" unlink(outfile) @@ -55,7 +55,7 @@ def test_bigwigAverage_skipnas(): resp = _foo.readlines() _foo.close() expected = ['3R\t100\t150\t1\n', '3R\t150\t200\t1.5\n'] - assert resp == expected, "{} != {}".format(resp, expected) + assert f"{resp}" == f"{expected}", f"{resp} != {expected}" unlink(outfile) @@ -67,5 +67,5 @@ def test_bigwigAverageWithScale(): resp = _foo.readlines() _foo.close() expected = ['3R\t0\t50\t0\n', '3R\t50\t100\t0.25\n', '3R\t100\t150\t0.75\n', '3R\t150\t200\t1\n'] - assert resp == expected, "{} != {}".format(resp, expected) + assert f"{resp}" == f"{expected}", f"{resp} != {expected}" unlink(outfile) diff --git a/deeptools/test/test_bigwigCompare_and_multiBigwigSummary.py b/deeptools/test/test_bigwigCompare_and_multiBigwigSummary.py index 831924277..076baa219 100644 --- a/deeptools/test/test_bigwigCompare_and_multiBigwigSummary.py +++ b/deeptools/test/test_bigwigCompare_and_multiBigwigSummary.py @@ -45,7 +45,7 @@ def test_bigwigCompare(): resp = _foo.readlines() _foo.close() expected = ['3R\t0\t50\t0\n', '3R\t50\t100\t1\n', '3R\t100\t150\t2\n', '3R\t150\t200\t3\n'] - assert resp == expected, "{} != {}".format(resp, expected) + assert f"{resp}" == f"{expected}", f"{resp} != {expected}" unlink(outfile) @@ -58,7 +58,7 @@ def test_bigwigCompare_skipnas(): resp = _foo.readlines() _foo.close() expected = ['3R\t100\t150\t2\n', '3R\t150\t200\t3\n'] - assert resp == expected, "{} != {}".format(resp, expected) + assert f"{resp}" == f"{expected}", f"{resp} != {expected}" unlink(outfile) @@ -70,7 +70,7 @@ def test_bigwigCompare_skipZeroOverZero(): resp = _foo.readlines() _foo.close() expected = ['3R\t100\t200\t-1\n'] - assert resp == expected, "{} != {}".format(resp, expected) + assert f"{resp}" == f"{expected}", f"{resp} != {expected}" unlink(outfile) @@ -105,7 +105,7 @@ def test_multiBigwigSummary_outrawcounts(): 3R 100 150 1.0 1.0 3R 150 200 1.0 2.0 """ - assert resp == expected, "{} != {}".format(resp, expected) + assert f"{resp}" == f"{expected}", f"{resp} != {expected}" unlink(outfile) unlink("/tmp/null") diff --git a/deeptools/test/test_computeMatrixOperations.py b/deeptools/test/test_computeMatrixOperations.py index 27f2e633e..c253431d7 100644 --- a/deeptools/test/test_computeMatrixOperations.py +++ b/deeptools/test/test_computeMatrixOperations.py @@ -8,8 +8,6 @@ __author__ = 'Devon' -ROOT = os.path.dirname(os.path.abspath(__file__)) + "/test_data/" - def getHeader(fp): s = fp.readline() @@ -20,17 +18,17 @@ def getHeader(fp): class TestComputeMatrixOperations(object): - def setUp(self): - self.root = ROOT - self.matrix = self.root + "computeMatrixOperations.mat.gz" - self.bed = self.root + "computeMatrixOperations.bed" - self.rbindMatrix1 = self.root + "somegenes.txt.gz" - self.rbindMatrix2 = self.root + "othergenes.txt.gz" + root = os.path.dirname(os.path.abspath(__file__)) + "/test_data/" + matrix = root + "computeMatrixOperations.mat.gz" + bed = root + "computeMatrixOperations.bed" + rbindMatrix1 = root + "somegenes.txt.gz" + rbindMatrix2 = root + "othergenes.txt.gz" def testSubset(self): """ computeMatrixOperations subset """ + dCorrect = {"verbose": True, "scale": 1, "skip zeros": False, "nan after end": False, "sort using": "mean", "unscaled 5 prime": [0, 0, 0, 0], "body": [1000, 1000, 1000, 1000], "sample_labels": ["SRR648667.forward", "SRR648668.forward", "SRR648669.forward", "SRR648670.forward"], "downstream": [0, 0, 0, 0], "unscaled 3 prime": [0, 0, 0, 0], "group_labels": ["genes"], "bin size": [10, 10, 10, 10], "upstream": [0, 0, 0, 0], "group_boundaries": [0, 196], "sample_boundaries": [0, 100, 200, 300, 400], "max threshold": None, "ref point": [None, None, None, None], "min threshold": None, "sort regions": "no", "proc number": 20, "bin avg type": "mean", "missing data as zero": False} oname = "/tmp/subset.mat.gz" args = "subset -m {} --sample SRR648667.forward SRR648668.forward SRR648669.forward SRR648670.forward -o {}".format(self.matrix, oname) @@ -41,7 +39,8 @@ def testSubset(self): h = hashlib.md5(f.read()).hexdigest() f.close() assert d == dCorrect - assert h == "edb3c8506c3f27ebb8c7ddf94d5ba594" + expectedh = 'edb3c8506c3f27ebb8c7ddf94d5ba594' + assert f'{h}' == f'{expectedh}' os.remove(oname) def testRelabel(self): @@ -68,14 +67,15 @@ def testfilterStrand(self): dCorrect = {"verbose": True, "scale": 1, "skip zeros": False, "nan after end": False, "sort using": "mean", "unscaled 5 prime": [0, 0, 0, 0, 0, 0, 0, 0], "body": [1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000], "sample_labels": ["SRR648667.forward", "SRR648668.forward", "SRR648669.forward", "SRR648670.forward", "SRR648667.reverse", "SRR648668.reverse", "SRR648669.reverse", "SRR648670.reverse"], "downstream": [0, 0, 0, 0, 0, 0, 0, 0], "unscaled 3 prime": [0, 0, 0, 0, 0, 0, 0, 0], "group_labels": ["genes"], "bin size": [10, 10, 10, 10, 10, 10, 10, 10], "upstream": [0, 0, 0, 0, 0, 0, 0, 0], "group_boundaries": [0, 107], "sample_boundaries": [0, 100, 200, 300, 400, 500, 600, 700, 800], "max threshold": None, "ref point": [None, None, None, None, None, None, None, None], "min threshold": None, "sort regions": "no", "proc number": 20, "bin avg type": "mean", "missing data as zero": False} oname = "/tmp/filterStrand1.mat.gz" args = "filterStrand -m {} -o {} --strand +".format(self.matrix, oname) - args = args.split() + args = args.split(' ') cmo.main(args) f = gzip.GzipFile(oname) d = getHeader(f) # Skip the header, which can be in a different order h = hashlib.md5(f.read()).hexdigest() f.close() assert d == dCorrect - assert h == "300f8000be5b5f51e803b57ef08f1c9e" + expectedh = '300f8000be5b5f51e803b57ef08f1c9e' + assert f'{h}' == f'{expectedh}' os.remove(oname) dCorrect = {u'verbose': True, u'scale': 1, u'skip zeros': False, u'nan after end': False, u'sort using': u'mean', u'unscaled 5 prime': [0, 0, 0, 0, 0, 0, 0, 0], u'body': [1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000], u'sample_labels': [u'SRR648667.forward', u'SRR648668.forward', u'SRR648669.forward', u'SRR648670.forward', u'SRR648667.reverse', u'SRR648668.reverse', u'SRR648669.reverse', u'SRR648670.reverse'], u'downstream': [0, 0, 0, 0, 0, 0, 0, 0], u'unscaled 3 prime': [0, 0, 0, 0, 0, 0, 0, 0], u'group_labels': [u'genes'], u'bin size': [10, 10, 10, 10, 10, 10, 10, 10], u'upstream': [0, 0, 0, 0, 0, 0, 0, 0], u'group_boundaries': [0, 89], u'sample_boundaries': [0, 100, 200, 300, 400, 500, 600, 700, 800], u'missing data as zero': False, u'ref point': [None, None, None, None, None, None, None, None], u'min threshold': None, u'sort regions': u'no', u'proc number': 20, u'bin avg type': u'mean', u'max threshold': None} @@ -88,7 +88,8 @@ def testfilterStrand(self): h = hashlib.md5(f.read()).hexdigest() f.close() assert d == dCorrect - assert h == "0a6ca070a5ba4564f1ab950ac3b7c8f1" + expectedh = '0a6ca070a5ba4564f1ab950ac3b7c8f1' + assert f'{h}' == f'{expectedh}' os.remove(oname) def testrbind(self): @@ -105,7 +106,8 @@ def testrbind(self): h = hashlib.md5(f.read()).hexdigest() f.close() assert d == dCorrect - assert h == "3dd96c7b05e0ca5ada21212defe57fba" + expectedh = '3dd96c7b05e0ca5ada21212defe57fba' + assert f'{h}' == f'{expectedh}' os.remove(oname) def testrbind2(self): @@ -122,7 +124,8 @@ def testrbind2(self): h = hashlib.md5(f.read()).hexdigest() f.close() assert d == dCorrect - assert h == "5d8b1517fc4c63d000b6b37f70ee163b" + expectedh = '5d8b1517fc4c63d000b6b37f70ee163b' + assert f'{h}' == f'{expectedh}' os.remove(oname) def testcbind(self): @@ -139,7 +142,8 @@ def testcbind(self): h = hashlib.md5(f.read()).hexdigest() f.close() assert d == dCorrect - assert h == "e55d89704bb16a11f366663a8fd90a47" + expectedh = 'e55d89704bb16a11f366663a8fd90a47' + assert f'{h}' == f'{expectedh}' os.remove(oname) def testsort(self): @@ -156,5 +160,6 @@ def testsort(self): h = hashlib.md5(f.read()).hexdigest() f.close() assert d == dCorrect - assert h == "10ea07d1aa58f44625abe2142ef76094" + expectedh = '10ea07d1aa58f44625abe2142ef76094' + assert f'{h}' == f'{expectedh}' os.remove(oname) diff --git a/deeptools/test/test_countReadsPerBin.py b/deeptools/test/test_countReadsPerBin.py index 52941071d..eb09a9684 100644 --- a/deeptools/test/test_countReadsPerBin.py +++ b/deeptools/test/test_countReadsPerBin.py @@ -4,157 +4,168 @@ import numpy as np import numpy.testing as nt import os.path +import pytest __author__ = 'Fidel' -ROOT = os.path.dirname(os.path.abspath(__file__)) + "/test_data/" +@pytest.mark.parametrize("bc", ["bam", 'cram']) +class TestCountReadsPerBin(): -class TestCountReadsPerBin(object): - - def setUp(self): - """ - The distribution of reads between the two bam files is as follows. - - They cover 200 bp:: + def ifiles(self, ext='bam'): + root = os.path.dirname(os.path.abspath(__file__)) + "/test_data/" + bamFile1 = root + "testA." + ext + bamFile2 = root + "testB." + ext + bamFile_PE = root + "test_paired2." + ext + chrom = '3R' + step_size = 50 + bin_length = 25 + c = cr.CountReadsPerBin( + [bamFile1, bamFile2], + binLength=bin_length, + stepSize=step_size + ) + return c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length + """ + The distribution of reads between the two bam files is as follows. - 0 100 200 - |------------------------------------------------------------| - A ==============> - <============== + They cover 200 bp:: + 0 100 200 + |------------------------------------------------------------| + A ==============> + <============== - B <============== ==============> - ==============> - ==============> - """ - self.root = ROOT - self.bamFile1 = self.root + "testA.bam" - self.bamFile2 = self.root + "testB.bam" - self.bamFile_PE = self.root + "test_paired2.bam" - self.chrom = '3R' - step_size = 50 - bin_length = 25 - self.c = cr.CountReadsPerBin([self.bamFile1, self.bamFile2], - binLength=bin_length, - stepSize=step_size) + B <============== ==============> + ==============> + ==============> + """ - def test_count_reads_in_region(self): - self.c.skipZeros = False - resp, _ = self.c.count_reads_in_region(self.chrom, 0, 200) + def test_count_reads_in_region(self, bc): + c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length = self.ifiles(bc) + c.skipZeros = False + resp, _ = c.count_reads_in_region(chrom, 0, 200) nt.assert_equal(resp, np.array([[0, 0.], [0, 1.], [1, 1.], [1, 2.]])) - def test_count_reads_in_region_extension_1(self): + def test_count_reads_in_region_extension_1(self, bc): """ In this case when read extension is smaller than read length extension is turned off and a warning is printed. """ - self.c = cr.CountReadsPerBin([self.bamFile1, self.bamFile2], - binLength=1, - stepSize=50, - extendReads=25) + c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length = self.ifiles(bc) + c = cr.CountReadsPerBin( + [bamFile1, bamFile2], + binLength=1, + stepSize=50, + extendReads=25 + ) - resp, _ = self.c.count_reads_in_region(self.chrom, 0, 200) + resp, _ = c.count_reads_in_region(chrom, 0, 200) nt.assert_equal(resp, np.array([[0, 0.], [0, 1.], [1, 1.], [1, 2.]])) - def test_count_reads_in_region_total(self): + def test_count_reads_in_region_total(self, bc): """ count the reads over the whole region 2 for the first case, and 4 for the second """ - self.c.skipZeros = False - self.c.stepSize = 200 - self.c.binLength = 200 - resp, _ = self.c.count_reads_in_region(self.chrom, 0, 200) + c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length = self.ifiles(bc) + c.skipZeros = False + c.stepSize = 200 + c.binLength = 200 + resp, _ = c.count_reads_in_region(chrom, 0, 200) nt.assert_equal(resp, np.array([[2, 4.]])) - def test_countReadsInRegions_min_mapping_quality(self): + def test_countReadsInRegions_min_mapping_quality(self, bc): + c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length = self.ifiles(bc) # Test min mapping quality. - self.c.minMappingQuality = 40 - self.c.skipZeros = False + c.minMappingQuality = 40 + c.skipZeros = False - resp, _ = self.c.count_reads_in_region(self. chrom, 0, 200) + resp, _ = c.count_reads_in_region(chrom, 0, 200) nt.assert_equal(resp, np.array([[0, 0, 0, 1.], [0, 0, 0, 1.]]).T) - def test_count_reads_in_region_ignore_duplicates(self): - + def test_count_reads_in_region_ignore_duplicates(self, bc): + c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length = self.ifiles(bc) # Test ignore duplicates - self.c.skipZeros = False - self.c.ignoreDuplicates = True - resp, _ = self.c.count_reads_in_region(self.chrom, 0, 200) + c.skipZeros = False + c.ignoreDuplicates = True + resp, _ = c.count_reads_in_region(chrom, 0, 200) nt.assert_equal(resp, np.array([[0, 0, 1, 1.], [0, 1, 1, 1.]]).T) - def test_count_reads_in_region_ignore_bed_regions(self): + def test_count_reads_in_region_ignore_bed_regions(self, bc): + c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length = self.ifiles(bc) # Test bed regions: - bed_regions = [[self.chrom, [(10, 20)], "."], [self.chrom, [(150, 160)], "."]] - self.c.skipZeros = False - self.c.binLength = 10 - resp, _ = self.c.count_reads_in_region(self.chrom, 0, 200, bed_regions_list=bed_regions) + bed_regions = [[chrom, [(10, 20)], "."], [chrom, [(150, 160)], "."]] + c.skipZeros = False + c.binLength = 10 + resp, _ = c.count_reads_in_region(chrom, 0, 200, bed_regions_list=bed_regions) nt.assert_equal(resp, np.array([[0, 1.], [0, 2.]]).T) - def test_get_coverage_of_region_sam_flag_include(self): - - self.c.samFlag_include = 16 # include reverse reads only - self.c.bamFilesList = [self.bamFile1] - resp, _ = self.c.count_reads_in_region('3R', 0, 200) + def test_get_coverage_of_region_sam_flag_include(self, bc): + c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length = self.ifiles(bc) + c.samFlag_include = 16 # include reverse reads only + c.bamFilesList = [bamFile1] + resp, _ = c.count_reads_in_region(chrom, 0, 200) nt.assert_array_equal(resp, np.array([[0], [0], [0], [1]])) - def test_get_coverage_of_region_sam_flag_exclude(self): - - self.c.samFlag_exclude = 16 # exclude reverse reads - self.c.bamFilesList = [self.bamFile1] - resp, _ = self.c.count_reads_in_region('3R', 0, 200) + def test_get_coverage_of_region_sam_flag_exclude(self, bc): + c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length = self.ifiles(bc) + c.samFlag_exclude = 16 # exclude reverse reads + c.bamFilesList = [bamFile1] + resp, _ = c.count_reads_in_region(chrom, 0, 200) nt.assert_array_equal(resp, np.array([[0], [0], [1], [0]])) - def test_get_coverage_of_region_large_bin(self): - self.c.bamFilesList = [self.bamFile2] - self.c.binLength = 200 - self.c.stepSize = 200 - resp, _ = self.c.count_reads_in_region('3R', 0, 200) + def test_get_coverage_of_region_large_bin(self, bc): + c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length = self.ifiles(bc) + c.bamFilesList = [bamFile2] + c.binLength = 200 + c.stepSize = 200 + resp, _ = c.count_reads_in_region(chrom, 0, 200) nt.assert_array_equal(resp, np.array([[4]])) - def test_get_coverage_of_region_ignore_duplicates(self): - self.c.ignoreDuplicates = True - self.c.bamFilesList = [self.bamFile2] - resp, _ = self.c.count_reads_in_region('3R', 0, 200) + def test_get_coverage_of_region_ignore_duplicates(self, bc): + c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length = self.ifiles(bc) + c.ignoreDuplicates = True + c.bamFilesList = [bamFile2] + resp, _ = c.count_reads_in_region(chrom, 0, 200) nt.assert_array_equal(resp, np.array([[0.], [1.], [1.], [1.]])) # check zero to nans - self.c.zerosToNans = True - resp, _ = self.c.count_reads_in_region('3R', 0, 200) + c.zerosToNans = True + resp, _ = c.count_reads_in_region(chrom, 0, 200) nt.assert_array_equal(resp, np.array([[np.nan], [1.], [1.], [1.]])) - def test_get_coverage_of_region_split_read(self): + def test_get_coverage_of_region_split_read(self, bc): """ The bamFile1 contains a read at position 10 with the following CIGAR: 10S20M10N10M10S that maps to a chromosome named chr_cigar. """ - + c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length = self.ifiles(bc) # turn of read extension - self.c.extendPairedEnds = False - self.c.bamFilesList = [self.bamFile1] - self.c.binLength = 10 - self.c.stepSize = 10 - resp, _ = self.c.count_reads_in_region('chr_cigar', 0, 100) + c.extendPairedEnds = False + c.bamFilesList = [bamFile1] + c.binLength = 10 + c.stepSize = 10 + resp, _ = c.count_reads_in_region('chr_cigar', 0, 100) nt.assert_array_equal(resp, np.array([[0.], [1.], [1.], @@ -166,60 +177,33 @@ def test_get_coverage_of_region_split_read(self): [0.], [0.]])) - def test_get_coverage_of_region_zeros_to_nan(self): - self.c.zerosToNans = True - resp, _ = self.c.count_reads_in_region(self.chrom, 0, 200) + def test_get_coverage_of_region_zeros_to_nan(self, bc): + c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length = self.ifiles(bc) + c.zerosToNans = True + resp, _ = c.count_reads_in_region(chrom, 0, 200) nt.assert_equal(resp, np.array([[np.nan, np.nan], [np.nan, 1], [1, 1], [1, 2]])) - def test_bed_file(self): + def test_bed_file(self, bc): + c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length = self.ifiles(bc) bed = "chr3R\t0\t10\nchr3R\t110\t120\nchr3R\t160\t180" import tempfile bed_file = tempfile.NamedTemporaryFile(suffix=".bed", delete=False, mode="w") bed_file.write(bed) bed_file.close() - self.c = cr.CountReadsPerBin([self.bamFile2], - bedFile=[bed_file.name]) + c = cr.CountReadsPerBin( + [bamFile2], + bedFile=[bed_file.name] + ) - resp = self.c.run() + resp = c.run() nt.assert_equal(resp, np.array([[0.], [1.], [2.]])) import os os.unlink(bed_file.name) - - -class TestCountReadsPerBinCRAM(TestCountReadsPerBin): - def setUp(self): - """ - As above, but using CRAM rather than BAM - The distribution of reads between the two bam files is as follows. - - They cover 200 bp:: - - 0 100 200 - |------------------------------------------------------------| - A ==============> - <============== - - - B <============== ==============> - ==============> - ==============> - """ - self.root = ROOT - self.bamFile1 = self.root + "testA.cram" - self.bamFile2 = self.root + "testB.cram" - self.bamFile_PE = self.root + "test_paired2.cram" - self.chrom = '3R' - step_size = 50 - bin_length = 25 - - self.c = cr.CountReadsPerBin([self.bamFile1, self.bamFile2], - binLength=bin_length, - stepSize=step_size) diff --git a/deeptools/test/test_heatmapper.py b/deeptools/test/test_heatmapper.py index 800d4d7be..7eb9d6d78 100644 --- a/deeptools/test/test_heatmapper.py +++ b/deeptools/test/test_heatmapper.py @@ -142,107 +142,168 @@ def test_computeMatrix_metagene(): def test_chopRegions_body(): region = [(0, 200), (300, 400), (800, 900)] lbins, bodybins, rbins, padLeft, padRight = deeptools.heatmapper.chopRegions(region, left=0, right=0) - assert lbins == [] - assert rbins == [] - assert bodybins == region - assert padLeft == 0 - assert padRight == 0 + e_lbins = [] + e_rbins = [] + e_padLeft = 0 + e_padRight = 0 + assert f"{lbins}" == f"{e_lbins}" + assert f"{rbins}" == f"{e_rbins}" + assert f"{bodybins}" == f"{region}" + assert f"{padLeft}" == f"{e_padLeft}" + assert f"{padRight}" == f"{e_padRight}" # Unscaled 5', 3' lbins, bodybins, rbins, padLeft, padRight = deeptools.heatmapper.chopRegions(region, left=150, right=150) - assert lbins == [(0, 150)] - assert rbins == [(350, 400), (800, 900)] - assert bodybins == [(150, 200), (300, 350)] - assert padLeft == 0 - assert padRight == 0 + e_lbins = [(0, 150)] + e_rbins = [(350, 400), (800, 900)] + e_bodybins = [(150, 200), (300, 350)] + e_padLeft = 0 + e_padRight = 0 + assert f"{lbins}" == f"{e_lbins}" + assert f"{rbins}" == f"{e_rbins}" + assert f"{bodybins}" == f"{e_bodybins}" + assert f"{padLeft}" == f"{e_padLeft}" + assert f"{padRight}" == f"{e_padRight}" def test_chopRegions_TSS(): region = [(0, 200), (300, 400), (800, 900)] # + strand, 250 downstream downstream, body, unscaled3prime, padRight, _ = deeptools.heatmapper.chopRegions(region, left=250) - assert downstream == [(0, 200), (300, 350)] - assert body == [(350, 400), (800, 900)] - assert unscaled3prime == [] - assert padRight == 0 - assert _ == 0 + e_downstream = [(0, 200), (300, 350)] + e_body = [(350, 400), (800, 900)] + e_unscaled3prime = [] + e_padRight = 0 + e_ = 0 + assert f"{downstream}" == f"{e_downstream}" + assert f"{body}" == f"{e_body}" + assert f"{unscaled3prime}" == f"{e_unscaled3prime}" + assert f"{padRight}" == f"{e_padRight}" + assert f"{_}" == f"{e_}" # + strand, 500 downstream downstream, body, unscaled3prime, padRight, _ = deeptools.heatmapper.chopRegions(region, left=500) - assert downstream == region - assert body == [] - assert unscaled3prime == [] - assert padRight == 100 - assert _ == 0 + e_body = [] + e_unscaled3prime = [] + e_padRight = 100 + e_ = 0 + assert f"{downstream}" == f"{region}" + assert f"{body}" == f"{e_body}" + assert f"{unscaled3prime}" == f"{e_unscaled3prime}" + assert f"{padRight}" == f"{e_padRight}" + assert f"{_}" == f"{e_}" # - strand, 250 downstream (labeled "upstream" due to being on the - strand) unscaled5prime, body, upstream, _, padLeft = deeptools.heatmapper.chopRegions(region, right=250) - assert upstream == [(150, 200), (300, 400), (800, 900)] - assert body == [(0, 150)] - assert unscaled5prime == [] - assert padLeft == 0 - assert _ == 0 + e_upstream = [(150, 200), (300, 400), (800, 900)] + e_body = [(0, 150)] + e_unscaled5prime = [] + e_padLeft = 0 + e_ = 0 + assert f"{upstream}" == f"{e_upstream}" + assert f"{body}" == f"{e_body}" + assert f"{unscaled5prime}" == f"{e_unscaled5prime}" + assert f"{padLeft}" == f"{e_padLeft}" + assert f"{_}" == f"{e_}" # - strand, 500 downstream (labeled "upstream" due to being on the - strand) unscaled5prime, body, upstream, _, padLeft = deeptools.heatmapper.chopRegions(region, right=500) - assert upstream == region - assert body == [] - assert unscaled5prime == [] - assert padLeft == 100 - assert _ == 0 + e_body = [] + e_unscaled5prime = [] + e_padLeft = 100 + e_ = 0 + assert f"{upstream}" == f"{region}" + assert f"{body}" == f"{e_body}" + assert f"{unscaled5prime}" == f"{e_unscaled5prime}" + assert f"{padLeft}" == f"{e_padLeft}" + assert f"{_}" == f"{e_}" def test_chopRegions_TES(): region = [(0, 200), (300, 400), (800, 900)] # + strand, 250 upstream unscaled5prime, body, upstream, _, padLeft = deeptools.heatmapper.chopRegions(region, right=250) - assert unscaled5prime == [] - assert body == [(0, 150)] - assert upstream == [(150, 200), (300, 400), (800, 900)] - assert _ == 0 - assert padLeft == 0 + e_unscaled5prime = [] + e_body = [(0, 150)] + e_upstream = [(150, 200), (300, 400), (800, 900)] + e_ = 0 + e_padLeft = 0 + assert f"{unscaled5prime}" == f"{e_unscaled5prime}" + assert f"{body}" == f"{e_body}" + assert f"{upstream}" == f"{e_upstream}" + assert f"{_}" == f"{e_}" + assert f"{padLeft}" == f"{e_padLeft}" # + strand, 500 upstream unscaled5prime, body, upstream, _, padLeft = deeptools.heatmapper.chopRegions(region, right=500) - assert unscaled5prime == [] - assert body == [] - assert upstream == region - assert _ == 0 - assert padLeft == 100 + e_unscaled5prime = [] + e_body = [] + e_ = 0 + e_padLeft = 100 + assert f"{unscaled5prime}" == f"{e_unscaled5prime}" + assert f"{body}" == f"{e_body}" + assert f"{upstream}" == f"{region}" + assert f"{_}" == f"{e_}" + assert f"{padLeft}" == f"{e_padLeft}" # + strand, 250 downstream (labeled "upstream" due to being on the - strand) downstream, body, unscaled3prime, padRight, _ = deeptools.heatmapper.chopRegions(region, left=250) - assert downstream == [(0, 200), (300, 350)] - assert body == [(350, 400), (800, 900)] - assert unscaled3prime == [] - assert padRight == 0 - assert _ == 0 + e_downstream = [(0, 200), (300, 350)] + e_body = [(350, 400), (800, 900)] + e_unscaled3prime = [] + e_padRight = 0 + e_ = 0 + assert f"{downstream}" == f"{e_downstream}" + assert f"{body}" == f"{e_body}" + assert f"{unscaled3prime}" == f"{e_unscaled3prime}" + assert f"{padRight}" == f"{e_padRight}" + assert f"{_}" == f"{e_}" # + strand, 500 downstream (labeled "upstream" due to being on the - strand) downstream, body, unscaled3prime, padRight, _ = deeptools.heatmapper.chopRegions(region, left=500) - assert downstream == region - assert body == [] - assert unscaled3prime == [] - assert padRight == 100 - assert _ == 0 + e_body = [] + e_unscaled3prime = [] + e_padRight = 100 + e_ = 0 + assert f"{downstream}" == f"{region}" + assert f"{body}" == f"{e_body}" + assert f"{unscaled3prime}" == f"{e_unscaled3prime}" + assert f"{padRight}" == f"{e_padRight}" + assert f"{_}" == f"{e_}" def test_chopRegionsFromMiddle(): region = [(0, 200), (300, 400), (800, 900)] # + strand, 100 upstream/200 downstream upstream, downstream, padLeft, padRight = deeptools.heatmapper.chopRegionsFromMiddle(region, left=100, right=200) - assert upstream == [(100, 200)] - assert downstream == [(300, 400), (800, 900)] - assert padLeft == 0 - assert padRight == 0 + e_upstream = [(100, 200)] + e_downstream = [(300, 400), (800, 900)] + e_padLeft = 0 + e_padRight = 0 + assert f"{upstream}" == f"{e_upstream}" + assert f"{downstream}" == f"{e_downstream}" + assert f"{padLeft}" == f"{e_padLeft}" + assert f"{padRight}" == f"{e_padRight}" # + strand, 250 upstream/300 downstream upstream, downstream, padLeft, padRight = deeptools.heatmapper.chopRegionsFromMiddle(region, left=250, right=300) - assert upstream == [(0, 200)] - assert downstream == [(300, 400), (800, 900)] - assert padLeft == 50 - assert padRight == 100 + e_upstream = [(0, 200)] + e_downstream = [(300, 400), (800, 900)] + e_padLeft = 50 + e_padRight = 100 + assert f"{upstream}" == f"{e_upstream}" + assert f"{downstream}" == f"{e_downstream}" + assert f"{padLeft}" == f"{e_padLeft}" + assert f"{padRight}" == f"{e_padRight}" # - strand, 100 upstream/200 downstream upstream, downstream, padLeft, padRight = deeptools.heatmapper.chopRegionsFromMiddle(region, left=200, right=100) - assert upstream == [(0, 200)] - assert downstream == [(300, 400)] - assert padLeft == 0 - assert padRight == 0 + e_upstream = [(0, 200)] + e_downstream = [(300, 400)] + e_padLeft = 0 + e_padRight = 0 + assert f"{upstream}" == f"{e_upstream}" + assert f"{downstream}" == f"{e_downstream}" + assert f"{padLeft}" == f"{e_padLeft}" + assert f"{padRight}" == f"{e_padRight}" # - strand, 250 upstream/300 downstream upstream, downstream, padLeft, padRight = deeptools.heatmapper.chopRegionsFromMiddle(region, left=300, right=250) - assert upstream == [(0, 200)] - assert downstream == [(300, 400), (800, 900)] - assert padLeft == 100 - assert padRight == 50 + e_upstream = [(0, 200)] + e_downstream = [(300, 400), (800, 900)] + e_padLeft = 100 + e_padRight = 50 + assert f"{upstream}" == f"{e_upstream}" + assert f"{downstream}" == f"{e_downstream}" + assert f"{padLeft}" == f"{e_padLeft}" + assert f"{padRight}" == f"{e_padRight}" diff --git a/deeptools/test/test_readFiltering.py b/deeptools/test/test_readFiltering.py index 6e0b0e81a..8227530fb 100644 --- a/deeptools/test/test_readFiltering.py +++ b/deeptools/test/test_readFiltering.py @@ -1,4 +1,3 @@ -from nose.tools import assert_equal import deeptools.estimateReadFiltering as est import deeptools.alignmentSieve as sieve import os.path @@ -30,7 +29,7 @@ def test_estimate_read_filtering_minimal(): _ = resp[1].split("\t") _[0] = os.path.basename(_[0]) resp[1] = "\t".join(_) - assert_equal(resp, expected) + assert f"{resp}" == f"{expected}", f"{resp} != {expected}" unlink(outfile) @@ -51,7 +50,7 @@ def test_estimate_read_filtering_params(): resp[1] = "\t".join(_) expected = ['Sample\tTotal Reads\tMapped Reads\tAlignments in blacklisted regions\tEstimated mapped reads filtered\tBelow MAPQ\tMissing Flags\tExcluded Flags\tInternally-determined Duplicates\tMarked Duplicates\tSingletons\tWrong strand\n', 'test_filtering.bam\t193\t193\t7\t193\t41.4\t0.0\t186.5\t31.6\t0.0\t0.0\t0.0\n'] - assert_equal(resp, expected) + assert f"{resp}" == f"{expected}", f"{resp} != {expected}" unlink(outfile) @@ -72,14 +71,16 @@ def test_sieve(): expected = ['#bamFilterReads --filterMetrics\n', '#File\tReads Remaining\tTotal Initial Reads\n', 'test_filtering\t5\t193\n'] - assert_equal(resp, expected) + assert f"{resp}" == f"{expected}", f"{resp} != {expected}" unlink(outlog) h = hashlib.md5(pysam.view(outfile).encode('utf-8')).hexdigest() - assert h == "acbc4443fb0387bfd6c412af9d4fc414" + expectedh = 'acbc4443fb0387bfd6c412af9d4fc414' + assert f'{h}' == f'{expectedh}' unlink(outfile) h1 = hashlib.md5(pysam.view(outfiltered).encode('utf-8')).hexdigest() - assert h1 == "b90befdd5f073f14acb9a38661f301ad" + expectedh = 'b90befdd5f073f14acb9a38661f301ad' + assert f"{h1}" == f"{expectedh}" unlink(outfiltered) @@ -121,7 +122,7 @@ def test_sieve_BED(): 'chr2\t5001491\t5001527\n', 'chr2\t5001700\t5001736\n'] - assert_equal(resp, expected) + assert f"{resp}" == f"{expected}", f"{resp} != {expected}" unlink(outfile) @@ -161,5 +162,5 @@ def test_sieve_BED_shift(): 'chr2\t5001119\t5001266\n', 'chr2\t5001230\t5001600\n'] - assert_equal(resp, expected) + assert f"{resp}" == f"{expected}", f"{resp} != {expected}" unlink(outfile) diff --git a/deeptools/test/test_tools.py b/deeptools/test/test_tools.py index 6d4a5a07c..c9b7a8061 100644 --- a/deeptools/test/test_tools.py +++ b/deeptools/test/test_tools.py @@ -1,17 +1,29 @@ -import subprocess +from subprocess import PIPE, run import os +try: + import tomllib +except ModuleNotFoundError: + import tomli as tomllib -ROOT = os.path.dirname(os.path.abspath(__file__)) + "/../../bin" +TOMLFILE = os.path.dirname(os.path.abspath(__file__)) + "/../../pyproject.toml" def test_tools(): """ - Checks everything that is in /bin/ - and tries to run it - + Check every script in 'pyproject.toml' + makes sure the version of all tools == version set in toml file + makes sure exitcodes are all 0 """ - if os.path.isdir(ROOT): - for _file in os.listdir(ROOT): - print(_file) - if os.path.isfile(os.path.join(ROOT, _file)): - subprocess.check_call("{}/{} --version".format(ROOT, _file).split()) + with open(TOMLFILE, 'rb') as f: + _toml = tomllib.load(f) + for _p in _toml['project']['scripts'].keys(): + _res = run( + [_p, "--version"], + stdout=PIPE, + stderr=PIPE + ) + _version = _res.stdout.decode().splitlines()[0] + e_ver = _p + " " + _toml['project']['version'] + assert f"{_version}" == f"{e_ver}" + e_retc = 0 + assert f"{_res.returncode}" == f"{e_retc}" diff --git a/deeptools/test/test_writeBedGraph.py b/deeptools/test/test_writeBedGraph.py index c419684f3..1ca05e7c8 100644 --- a/deeptools/test/test_writeBedGraph.py +++ b/deeptools/test/test_writeBedGraph.py @@ -1,143 +1,117 @@ -from unittest import TestCase -from nose.tools import * import os - +import pytest import deeptools.writeBedGraph as wr from deeptools.writeBedGraph import scaleCoverage -ROOT = os.path.dirname(os.path.abspath(__file__)) + "/test_data/" - -__author__ = 'fidel' - - -class TestWriteBedGraph(TestCase): - - def setUp(self): - """ - The distribution of reads between the two bam files is as follows. - - They cover 200 bp:: - - 0 100 200 - |------------------------------------------------------------| - A ==============> - <============== - - - B <============== ==============> - ==============> - ==============> - """ - - self.root = ROOT - self.bamFile1 = self.root + "testA.bam" - self.bamFile2 = self.root + "testB.bam" - self.bamFile_PE = self.root + "test_paired2.bam" - self.chrom = '3R' - self.step_size = 50 - self.bin_length = 50 - self.func_args = {'scaleFactor': 1.0} - - self.c = wr.WriteBedGraph([self.bamFile1], - binLength=self.bin_length, - stepSize=self.step_size) - - def test_writeBedGraph_worker(self): - self.c.zerosToNans = False - self.c.skipZeros = False - - tempFile = self.c.writeBedGraph_worker('3R', 0, 200, scaleCoverage, self.func_args) +@pytest.mark.parametrize("bc", ["bam", 'cram']) +class TestWriteBedGraph(): + def ifiles(self, ext='bam'): + root = os.path.dirname(os.path.abspath(__file__)) + "/test_data/" + bamFile1 = root + "testA." + ext + bamFile2 = root + "testB." + ext + bamFile_PE = root + "test_paired2." + ext + chrom = '3R' + step_size = 50 + bin_length = 50 + func_args = {'scaleFactor': 1.0} + c = wr.WriteBedGraph( + [bamFile1], + binLength=bin_length, + stepSize=step_size + ) + return c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length, func_args + + def test_writeBedGraph_worker(self, bc): + c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length, func_args = self.ifiles(bc) + c.zerosToNans = False + c.skipZeros = False + + tempFile = c.writeBedGraph_worker(chrom, 0, 200, scaleCoverage, func_args) _foo = open(tempFile[3], 'r') res = _foo.readlines() _foo.close() - assert_equal(res, ['3R\t0\t100\t0\n', '3R\t100\t200\t1\n']) + expected = ['3R\t0\t100\t0\n', '3R\t100\t200\t1\n'] + assert f"{res}" == f"{expected}" os.remove(tempFile[3]) - def test_writeBedGraph_worker_zerotonan(self): + def test_writeBedGraph_worker_zerotonan(self, bc): + c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length, func_args = self.ifiles(bc) # turn on zeroToNan - self.c.zerosToNans = True - tempFile2 = self.c.writeBedGraph_worker('3R', 0, 200, scaleCoverage, self.func_args) + c.zerosToNans = True + tempFile2 = c.writeBedGraph_worker(chrom, 0, 200, scaleCoverage, func_args) _foo = open(tempFile2[3], 'r') res = _foo.readlines() _foo.close() - assert_equal(res, ['3R\t100\t200\t1\n']) + expected = ['3R\t100\t200\t1\n'] + assert f"{res}" == f"{expected}" os.remove(tempFile2[3]) - def test_writeBedGraph_worker_scaling(self): + def test_writeBedGraph_worker_scaling(self, bc): + c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length, func_args = self.ifiles(bc) func_args = {'scaleFactor': 3.0} - tempFile = self.c.writeBedGraph_worker('3R', 0, 200, scaleCoverage, func_args) + tempFile = c.writeBedGraph_worker(chrom, 0, 200, scaleCoverage, func_args) _foo = open(tempFile[3], 'r') res = _foo.readlines() _foo.close() - assert_equal(res, ['3R\t0\t100\t0\n', '3R\t100\t200\t3\n']) + expected = ['3R\t0\t100\t0\n', '3R\t100\t200\t3\n'] + assert f"{res}" == f"{expected}" os.remove(tempFile[3]) - def test_writeBedGraph_worker_ignore_duplicates(self): - self.c = wr.WriteBedGraph([self.bamFile2], - binLength=self.bin_length, - stepSize=self.step_size, ignoreDuplicates=True) - self.c.zerosToNans = True - - tempFile = self.c.writeBedGraph_worker('3R', 0, 200, scaleCoverage, self.func_args) + def test_writeBedGraph_worker_ignore_duplicates(self, bc): + c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length, func_args = self.ifiles(bc) + c = wr.WriteBedGraph( + [bamFile2], + binLength=bin_length, + stepSize=step_size, + ignoreDuplicates=True + ) + c.zerosToNans = True + + tempFile = c.writeBedGraph_worker(chrom, 0, 200, scaleCoverage, func_args) _foo = open(tempFile[3], 'r') res = _foo.readlines() _foo.close() - assert_equal(res, ['3R\t50\t200\t1\n']) + expected = ['3R\t50\t200\t1\n'] + assert f"{res}" == f"{expected}" os.remove(tempFile[3]) - def test_writeBedGraph_worker_smoothing(self): - self.c.binLength = 20 - self.c.stepSize = 20 - self.c.smoothLength = 60 - tempFile = self.c.writeBedGraph_worker('3R', 100, 200, scaleCoverage, self.func_args) + def test_writeBedGraph_worker_smoothing(self, bc): + c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length, func_args = self.ifiles(bc) + c.binLength = 20 + c.stepSize = 20 + c.smoothLength = 60 + tempFile = c.writeBedGraph_worker(chrom, 100, 200, scaleCoverage, func_args) _foo = open(tempFile[3], 'r') res = _foo.readlines() _foo.close() - assert_equal(res, ['3R\t100\t120\t1\n', '3R\t120\t180\t1.33333\n', '3R\t180\t200\t1\n']) + expected = ['3R\t100\t120\t1\n', '3R\t120\t180\t1.33333\n', '3R\t180\t200\t1\n'] + assert f"{res}" == f"{expected}" os.remove(tempFile[3]) - def test_writeBedGraph_cigar(self): + def test_writeBedGraph_cigar(self, bc): """ The bamFile1 contains a read at position 10 with the following CIGAR: 10S20M10N10M10S that maps to a chromosome named chr_cigar. """ - + c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length, func_args = self.ifiles(bc) # turn of read extension - self.c.extendPairedEnds = False - self.c.binLength = 10 - self.c.stepSize = 10 - tempFile = self.c.writeBedGraph_worker('chr_cigar', 0, 100, scaleCoverage, self.func_args) + c.extendPairedEnds = False + c.binLength = 10 + c.stepSize = 10 + tempFile = c.writeBedGraph_worker('chr_cigar', 0, 100, scaleCoverage, func_args) _foo = open(tempFile[3], 'r') res = _foo.readlines() _foo.close() - # the sigle read is split into bin 10-30, and then 40-50 - assert_equal(res, ['chr_cigar\t0\t10\t0\n', - 'chr_cigar\t10\t30\t1\n', - 'chr_cigar\t30\t40\t0\n', - 'chr_cigar\t40\t50\t1\n', - 'chr_cigar\t50\t100\t0\n']) + # the single read is split into bin 10-30, and then 40-50 + expected = [ + 'chr_cigar\t0\t10\t0\n', + 'chr_cigar\t10\t30\t1\n', + 'chr_cigar\t30\t40\t0\n', + 'chr_cigar\t40\t50\t1\n', + 'chr_cigar\t50\t100\t0\n' + ] + assert f"{res}" == f"{expected}" os.remove(tempFile[3]) - - -class TestWriteBedGraphCRAM(TestWriteBedGraph): - def setUp(self): - """ - As above, but for CRAM files - """ - - self.root = ROOT - self.bamFile1 = self.root + "testA.cram" - self.bamFile2 = self.root + "testB.cram" - self.bamFile_PE = self.root + "test_paired2.cram" - self.chrom = '3R' - - self.step_size = 50 - self.bin_length = 50 - self.func_args = {'scaleFactor': 1.0} - - self.c = wr.WriteBedGraph([self.bamFile1], - binLength=self.bin_length, - stepSize=self.step_size) diff --git a/docs/conf.py b/docs/conf.py index f88dd260f..8e647cfc2 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -14,6 +14,7 @@ import sys import os +import tomllib # to allow readthedocs to compile without installing some dependencies import mock @@ -81,17 +82,12 @@ def get_version(): - import re + with open('../pyproject.toml', 'rb') as f: + d = tomllib.load(f) try: - f = open("../deeptools/_version.py") - except EnvironmentError: + return d['project']['version'] + except: return None - for line in f.readlines(): - mo = re.match("__version__ = '([^']+)'", line) - if mo: - ver = mo.group(1) - return ver - return None version = get_version() diff --git a/docs/content/installation.rst b/docs/content/installation.rst index c45ecacfe..d0f77ac71 100644 --- a/docs/content/installation.rst +++ b/docs/content/installation.rst @@ -10,26 +10,26 @@ Remember -- deepTools are available for **command line usage** as well as for Requirements ------------- -* Python 2.7 or Python 3.x -* numpy >= 1.8.0 +* Python >= 3.7 +* numpy >= 1.9.0 * scipy >= 0.17.0 -* py2bit >= 0.1.0 +* matplotlib >= 3.3.0 +* pysam >= 0.14.0 +* numpydoc >= 0.5 * pyBigWig >= 0.2.1 -* pysam >= 0.8 -* matplotlib >= 1.4.0 +* py2bit >= 0.2.0 +* plotly >= 4.9 +* deeptoolsintervals >= 0.1.8 +* importlib-metadata (when running python 3.7) -The fastest way to obtain **Python 2.7 or Python 3.x together with numpy and scipy** is -via the `Anaconda Scientific Python -Distribution `_. -Just download the version that's suitable for your operating system and -follow the directions for its installation. All of the requirements for deepTools can be installed in Anaconda with: +DeepTools (including the requirements) can be installed with conda: .. code:: bash $ conda install -c bioconda deeptools -Command line installation using ``pip`` ------------------------------------------ +Command line installation using ``pip`` from pypi +-------------------------------------------------- Install deepTools using the following command: :: @@ -45,10 +45,10 @@ If you need to specify a specific path for the installation of the tools, make u $ pip install --install-option="--prefix=/MyPath/Tools/deepTools2.0" git+https://github.com/deeptools/deepTools.git -Command line installation without ``pip`` -------------------------------------------- +Command line installation using ``pip`` from source +--------------------------------------------------- -You are highly recommended to use `pip` rather than these more complicated steps. +You are highly recommended to use the 'pypi installation' rather than these more complicated steps. 1. Install the requirements listed above in the "requirements" section. This is done automatically by `pip`. @@ -63,11 +63,11 @@ or if you want a particular release, choose one from https://github.com/deeptool $ wget https://github.com/deeptools/deepTools/archive/1.5.12.tar.gz $ tar -xzvf -3. install the source code (if you don't have root permission, you can set -a specific folder using the ``--prefix`` option) +3. install the source code :: - $ python setup.py install --prefix /User/Tools/deepTools2.0 + $ python -m build + $ pip install dist/*whl Galaxy installation -------------------- diff --git a/galaxy/wrapper/deepTools_macros.xml b/galaxy/wrapper/deepTools_macros.xml index 0f16a03e0..2f8b8200d 100755 --- a/galaxy/wrapper/deepTools_macros.xml +++ b/galaxy/wrapper/deepTools_macros.xml @@ -1,8 +1,8 @@ --numberOfProcessors "\${GALAXY_SLOTS:-4}" - 3.5.2 - 20.01 + 3.5.3 + 22.05 deeptools @@ -441,12 +441,15 @@ is vital to you, select Yes below."> - - - - - - + + + + + + + + [A-Za-z0-9 =-_/+]+ + diff --git a/galaxy/wrapper/test-data/heatmapper_result1.png b/galaxy/wrapper/test-data/heatmapper_result1.png index 2ba154b08..fa3686351 100644 Binary files a/galaxy/wrapper/test-data/heatmapper_result1.png and b/galaxy/wrapper/test-data/heatmapper_result1.png differ diff --git a/galaxy/wrapper/test-data/heatmapper_result2.png b/galaxy/wrapper/test-data/heatmapper_result2.png index 6b49b4a36..fb3bd3675 100644 Binary files a/galaxy/wrapper/test-data/heatmapper_result2.png and b/galaxy/wrapper/test-data/heatmapper_result2.png differ diff --git a/galaxy/wrapper/test-data/plotCorrelation_result2.png b/galaxy/wrapper/test-data/plotCorrelation_result2.png index 58ade4f24..c144cf38a 100644 Binary files a/galaxy/wrapper/test-data/plotCorrelation_result2.png and b/galaxy/wrapper/test-data/plotCorrelation_result2.png differ diff --git a/galaxy/wrapper/test-data/plotFingerprint_quality_metrics.tabular b/galaxy/wrapper/test-data/plotFingerprint_quality_metrics.tabular index f2e160882..69cba649d 100644 --- a/galaxy/wrapper/test-data/plotFingerprint_quality_metrics.tabular +++ b/galaxy/wrapper/test-data/plotFingerprint_quality_metrics.tabular @@ -1,3 +1,3 @@ Sample AUC Synthetic AUC X-intercept Synthetic X-intercept Elbow Point Synthetic Elbow Point JS Distance Synthetic JS Distance % genome enriched diff. enrichment CHANCE divergence -bowtie2 test1.bam 0.00493632029863651 0.481650684757865 0.984443061605476 1.1531044350267195e-24 0.9849408836341008 0.5232688298112538 nan 0.2690044980681214 nan nan nan -bowtie2 test1.bam 0.00493632029863651 0.481650684757865 0.984443061605476 1.1531044350267195e-24 0.9849408836341008 0.5232688298112538 0.0 0.2690044980681214 0 0 0 +bowtie2 test1.bam 0.00493632029863651 0.481650684757865 0.984443061605476 1.1531044350267195e-24 0.9849408836341008 0.5232688298112538 nan 0.269004498068121 nan nan nan +bowtie2 test1.bam 0.00493632029863651 0.481650684757865 0.984443061605476 1.1531044350267195e-24 0.9849408836341008 0.5232688298112538 0.0 0.269004498068121 0 0 0 diff --git a/galaxy/wrapper/test-data/profiler_result1.png b/galaxy/wrapper/test-data/profiler_result1.png index dcb1d2602..a0f14d8b3 100644 Binary files a/galaxy/wrapper/test-data/profiler_result1.png and b/galaxy/wrapper/test-data/profiler_result1.png differ diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..68e9cabb4 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,73 @@ +[build-system] +requires = [ + "setuptools" +] + +[project] +name = "deepTools" +version = "3.5.3" +authors = [ + {name="Fidel Ramirez"}, + {name="Devon P Ryan"}, + {name="Björn Grüning"}, + {name="Friederike Dündar"}, + {name="Sarah Diehl"}, + {name="Vivek Bhardwaj"}, + {name="Fabian Kilpert"}, + {name="Andreas S Richter"}, + {name="Steffen Heyne"}, + {name="Thomas Manke"}, + {email="bioinfo-core@ie-freiburg.mpg.de"} +] +requires-python = ">=3.7" +dependencies = [ + "numpy >= 1.9.0", + "scipy >= 0.17.0", + "matplotlib >= 3.3.0", + "pysam >= 0.14.0", + "numpydoc >= 0.5", + "pyBigWig >= 0.2.1", + "py2bit >= 0.2.0", + "plotly >= 4.9", + "deeptoolsintervals >= 0.1.8", + "importlib-metadata" # python 3.7 support +] +description = "Useful tools for exploring deep sequencing data." +license = {file = "LICENSE.txt"} +classifiers = [ + "Intended Audience :: Science/Research", + "Topic :: Scientific/Engineering :: Bio-Informatics" +] +readme = "README.rst" + +[project.urls] +homepage = "https://pypi.python.org/pypi/deepTools/" +documentation = "https://deeptools.readthedocs.io/en/latest/" +repository = "https://github.com/deeptools/deepTools" + +[tool.setuptools] +packages = ["deeptools"] + +[project.scripts] +alignmentSieve = "deeptools.alignmentSieve:main" +bamCompare = "deeptools.bamCompare:main" +bamCoverage = "deeptools.bamCoverage:main" +bamPEFragmentSize = "deeptools.bamPEFragmentSize:main" +bigwigAverage = "deeptools.bigwigAverage:main" +bigwigCompare = "deeptools.bigwigCompare:main" +computeGCBias = "deeptools.computeGCBias:main" +computeMatrix = "deeptools.computeMatrix:main" +computeMatrixOperations = "deeptools.computeMatrixOperations:main" +correctGCBias = "deeptools.correctGCBias:main" +deeptools = "deeptools.deeptools_list_tools:main" +estimateReadFiltering = "deeptools.estimateReadFiltering:main" +estimateScaleFactor = "deeptools.estimateScaleFactor:main" +multiBamSummary = "deeptools.multiBamSummary:main" +multiBigwigSummary = "deeptools.multiBigwigSummary:main" +plotCorrelation = "deeptools.plotCorrelation:main" +plotCoverage = "deeptools.plotCoverage:main" +plotEnrichment = "deeptools.plotEnrichment:main" +plotFingerprint = "deeptools.plotFingerprint:main" +plotHeatmap = "deeptools.plotHeatmap:main" +plotPCA = "deeptools.plotPCA:main" +plotProfile = "deeptools.plotProfile:main" diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index b7af566fc..000000000 --- a/requirements.txt +++ /dev/null @@ -1,99 +0,0 @@ -# -# This file is autogenerated by pip-compile with python 3.9 -# To update, run: -# -# pip-compile -# -alabaster==0.7.12 - # via sphinx -babel==2.10.1 - # via sphinx -certifi==2022.12.7 - # via requests -charset-normalizer==2.0.12 - # via requests -cycler==0.11.0 - # via matplotlib -deeptoolsintervals==0.1.9 - # via -r requirements.in -docutils==0.17.1 - # via sphinx -fonttools==4.33.3 - # via matplotlib -idna==3.3 - # via requests -imagesize==1.3.0 - # via sphinx -importlib-metadata==4.11.3 - # via sphinx -jinja2==3.1.2 - # via - # numpydoc - # sphinx -kiwisolver==1.4.2 - # via matplotlib -markupsafe==2.1.1 - # via jinja2 -matplotlib>=3.5<3.6 - # We're manually setting matplitlib <3.6 to avoide issue #1172 -numpy==1.22.3 - # via - # -r requirements.in - # matplotlib - # scipy -numpydoc==1.3.1 - # via -r requirements.in -packaging==21.3 - # via - # matplotlib - # sphinx -pillow==9.3.0 - # via matplotlib -plotly==5.7.0 - # via -r requirements.in -py2bit==0.3.0 - # via -r requirements.in -pybigwig==0.3.18 - # via -r requirements.in -pygments==2.12.0 - # via sphinx -pyparsing==3.0.8 - # via - # matplotlib - # packaging -pysam==0.19.0 - # via -r requirements.in -python-dateutil==2.8.2 - # via matplotlib -pytz==2022.1 - # via babel -requests==2.27.1 - # via sphinx -scipy==1.8.0 - # via -r requirements.in -six==1.16.0 - # via - # plotly - # python-dateutil -snowballstemmer==2.2.0 - # via sphinx -sphinx==4.5.0 - # via numpydoc -sphinxcontrib-applehelp==1.0.2 - # via sphinx -sphinxcontrib-devhelp==1.0.2 - # via sphinx -sphinxcontrib-htmlhelp==2.0.0 - # via sphinx -sphinxcontrib-jsmath==1.0.1 - # via sphinx -sphinxcontrib-qthelp==1.0.3 - # via sphinx -sphinxcontrib-serializinghtml==1.1.5 - # via sphinx -tenacity==8.0.1 - # via plotly -urllib3==1.26.9 - # via requests -zipp==3.8.0 - # via importlib-metadata \ No newline at end of file diff --git a/setup.py b/setup.py deleted file mode 100755 index 76514364d..000000000 --- a/setup.py +++ /dev/null @@ -1,97 +0,0 @@ -# -*- coding: utf-8 -*- - -import re - -from setuptools import setup, find_packages -from setuptools.command.sdist import sdist as _sdist -from setuptools.command.install import install as _install - -VERSION_PY = """ -# This file is originally generated from Git information by running 'setup.py -# version'. Distribution tarballs contain a pre-generated copy of this file. - -__version__ = '%s' -""" - - -def get_version(): - try: - f = open("deeptools/_version.py") - except EnvironmentError: - return None - for line in f.readlines(): - mo = re.match("__version__ = '([^']+)'", line) - if mo: - ver = mo.group(1) - return ver - return None - - -class sdist(_sdist): - - def run(self): - self.distribution.metadata.version = get_version() - return _sdist.run(self) - - -class install(_install): - - def run(self): - self.distribution.metadata.version = get_version() - _install.run(self) - return - - -def openREADME(): - """ - This is only needed because README.rst is UTF-8 encoded and that won't work - under python3 iff sys.getfilesystemencoding() returns 'ascii' - - Since open() doesn't accept an encoding in python2... - """ - try: - f = open("README.rst", encoding="utf-8") - except: - f = open("README.rst") - - foo = f.read() - f.close() - return foo - - -setup( - name='deepTools', - version=get_version(), - author='Fidel Ramirez, Devon P Ryan, Björn Grüning, Friederike Dündar, Sarah Diehl,' - ' Vivek Bhardwaj, Fabian Kilpert, Andreas S Richter, Steffen Heyne, Thomas Manke', - author_email='dpryan79@gmail.com', - packages=find_packages(), - scripts=['bin/bamCompare', 'bin/bamCoverage', 'bin/multiBamSummary', - 'bin/plotHeatmap', 'bin/plotFingerprint', 'bin/estimateScaleFactor', - 'bin/bamPEFragmentSize', 'bin/computeMatrix', 'bin/plotProfile', - 'bin/computeGCBias', 'bin/correctGCBias', 'bin/multiBigwigSummary', - 'bin/bigwigCompare', 'bin/plotCoverage', 'bin/plotPCA', 'bin/plotCorrelation', - 'bin/plotEnrichment', 'bin/deeptools', 'bin/computeMatrixOperations', - 'bin/estimateReadFiltering', 'bin/alignmentSieve', 'bin/bigwigAverage'], - include_package_data=True, - url='http://pypi.python.org/pypi/deepTools/', - license='LICENSE.txt', - description='Useful tools for exploring deep sequencing data ', - long_description=openREADME(), - classifiers=[ - 'Intended Audience :: Science/Research', - 'Topic :: Scientific/Engineering :: Bio-Informatics'], - install_requires=[ - "numpy >= 1.9.0", - "scipy >= 0.17.0", - "matplotlib >= 3.3.0", - "pysam >= 0.14.0", - "numpydoc >= 0.5", - "pyBigWig >= 0.2.1", - "py2bit >= 0.2.0", - "plotly >= 4.9", - "deeptoolsintervals >= 0.1.8" - ], - zip_safe=True, - cmdclass={'sdist': sdist, 'install': install} -)