Skip to content

Commit

Permalink
CI: Update cudf python to 0.16 nightly (NVIDIA#790)
Browse files Browse the repository at this point in the history
And make the pool size and max pool size to be multiple of 256 bytes,
required by the cudf 0.16

Signed-off-by: Firestarman <firestarmanllc@gmail.com>
  • Loading branch information
firestarman authored Sep 18, 2020
1 parent 4aaf0b5 commit 5bfd67b
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 15 deletions.
25 changes: 11 additions & 14 deletions jenkins/Dockerfile.integration.centos7
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,10 @@ ARG CUDA_VER=10.1

FROM nvidia/cuda:${CUDA_VER}-runtime-centos7

ARG CUDA_VER=10.1
ARG CUDF_VER
ARG URM_URL

#Install java-8, maven, docker image
RUN yum update -y && \
yum install -y centos-release-scl && \
Expand All @@ -31,27 +35,20 @@ RUN yum update -y && \
# The default mvn verision is 3.05 on centos7 docker container.
# The plugin: net.alchim31.maven requires a higher mvn version.
ENV MAVEN_HOME "/usr/local/apache-maven-3.6.3"
ARG URM_URL
RUN wget ${URM_URL}/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.tar.gz -P /usr/local && \
tar xzvf $MAVEN_HOME-bin.tar.gz -C /usr/local && \
rm -f $MAVEN_HOME-bin.tar.gz

ENV PATH "$MAVEN_HOME/bin:$PATH"

RUN wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh && \
/bin/bash ~/miniconda.sh -b -p /opt/conda

ENV PATH="/opt/conda/bin:${PATH}"
/bin/bash ~/miniconda.sh -b -p /opt/conda && \
rm -f ~/miniconda.sh

RUN . /opt/conda/bin/activate && \
conda init && \
conda --version
ENV PATH="/opt/conda/bin:$MAVEN_HOME/bin:${PATH}"

ARG CUDA_TOOLKIT_VER=10.1
RUN conda install -y -c rapidsai -c nvidia -c conda-forge -c defaults cudf=0.15 python=3.7 cudatoolkit=${CUDA_TOOLKIT_VER} && \
conda install -y spacy && \
python -m spacy download en_core_web_sm && \
conda install -y -c anaconda pytest requests pandas pyarrow && \
# 'pyarrow' and 'pandas' will be installed as the dependencies of cudf below
RUN conda install -y -c rapidsai -c rapidsai-nightly -c nvidia -c conda-forge -c defaults cudf=${CUDF_VER} python=3.7 cudatoolkit=${CUDA_VER} && \
conda install -y spacy && python -m spacy download en_core_web_sm && \
conda install -y -c anaconda pytest requests && \
conda install -y -c conda-forge sre_yield && \
conda clean -ay

Expand Down
4 changes: 3 additions & 1 deletion jenkins/Jenkinsfile.integration
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,8 @@ pipeline {
agent { label 'docker-gpu' }
steps {
script {
def CUDF_VER=sh(returnStdout: true,
script: '. jenkins/version-def.sh>&2 && echo -n $CUDF_VER') - "-SNAPSHOT"
def CUDA_NAME=sh(returnStdout: true,
script: '. jenkins/version-def.sh>&2 && echo -n $CUDA_CLASSIFIER | sed "s/-/./g"')
def IMAGE_NAME="$ARTIFACTORY_NAME/sw-spark-docker/plugin:it-centos7-$CUDA_NAME"
Expand All @@ -67,7 +69,7 @@ pipeline {
// Speed up Docker building via '--cache-from $IMAGE_NAME'
def buildImage=docker.build(IMAGE_NAME,
"-f jenkins/Dockerfile.integration.centos7 --build-arg CUDA_VER=$CUDA_VER \
--build-arg URM_URL=$URM_URL --cache-from $IMAGE_NAME -t $IMAGE_NAME .")
--build-arg URM_URL=$URM_URL --build-arg CUDF_VER=$CUDF_VER --cache-from $IMAGE_NAME -t $IMAGE_NAME .")
def buildImageID=sh(returnStdout: true, script: "docker inspect -f {{'.Id'}} $IMAGE_NAME")
if (! buildImageID.equals(urmImageID)) {
echo "Dockerfile updated, upload docker image to URM"
Expand Down
2 changes: 2 additions & 0 deletions python/rapids/worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,8 @@ def initialize_gpu_mem():
"`RAPIDS_POOLED_MEM_SIZE`.")
if pool_max_size == 0:
pool_max_size = max_size
pool_max_size = pool_max_size >> 8 << 8
pool_size = pool_size >> 8 << 8
print("DEBUG: Pooled memory, pool size: {} MiB, max size: {} MiB".format(
pool_size / 1024.0 / 1024,
('unlimited' if pool_max_size == max_size else pool_max_size / 1024.0 / 1024)))
Expand Down

0 comments on commit 5bfd67b

Please sign in to comment.