add support for multi arch #116
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: build-docker-images-gpus | |
| on: | |
| push: | |
| branches: [ "main" ] | |
| paths-ignore: [ "*.md" ] | |
| pull_request: | |
| branches: [ "main" ] | |
| paths-ignore: [ "*.md" ] | |
| workflow_dispatch: # Allows you to run this workflow manually from the Actions tab | |
| concurrency: | |
| group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} | |
| cancel-in-progress: true | |
| env: | |
| BUILDKIT_PROGRESS: "plain" # Full logs for CI build. | |
| REGISTRY_SRC: ${{ vars.REGISTRY_SRC || 'quay.io' }} # For BASE_NAMESPACE of images: where to pull base images from, docker.io or other source registry URL. | |
| REGISTRY_DST: ${{ vars.REGISTRY_DST || 'quay.io' }} # For tags of built images: where to push images to, docker.io or other destination registry URL. | |
| # DOCKER_REGISTRY_USERNAME and DOCKER_REGISTRY_PASSWORD is required for docker image push, they should be set in CI secrets. | |
| DOCKER_REGISTRY_USERNAME: ${{ vars.DOCKER_REGISTRY_USERNAME }} | |
| DOCKER_REGISTRY_PASSWORD: ${{ secrets.DOCKER_REGISTRY_PASSWORD }} | |
| # used to sync image to mirror registry | |
| DOCKER_MIRROR_REGISTRY_USERNAME: ${{ vars.DOCKER_MIRROR_REGISTRY_USERNAME }} | |
| DOCKER_MIRROR_REGISTRY_PASSWORD: ${{ secrets.DOCKER_MIRROR_REGISTRY_PASSWORD }} | |
| CI_PROJECT_NAME: ${{ vars.CI_PROJECT_NAME || 'LabNow/lab-foundation' }} | |
| jobs: | |
| # cuda docker image tags: https://hub.docker.com/r/nvidia/cuda/tags | |
| # latest cuda supported by torch: https://pytorch.org/get-started/locally/ | |
| # latest cuda supported by tensorflow: https://tensorflow.google.cn/install/source?hl=en#gpu | |
| # latest cuda supported by paddlepadle: https://www.paddlepaddle.org.cn/ | |
| # latest cuda supported by vllm: https://docs.vllm.ai/en/latest/getting_started/installation/gpu.html?device=cuda | |
| # reserved for vllm: https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile | |
| job-cuda_128: | |
| name: 'cuda_12.8,cuda,nvidia-cuda' | |
| strategy: | |
| matrix: | |
| include: [{arch: amd64, runner: ubuntu-latest}] # , {arch: arm64, runner: ubuntu-24.04-arm} | |
| runs-on: ${{ matrix.runner }} | |
| steps: | |
| - uses: actions/checkout@v4 | |
| - run: | | |
| source ./tool.sh | |
| build_image_no_tag nvidia-cuda 12.8.1-cudnn-devel-ubuntu24.04 docker_atom/Dockerfile --build-arg "BASE_IMG=nvidia/cuda:12.8.1-cudnn-devel-ubuntu24.04" && clear_images nvidia/cuda | |
| export IMG_PREFIX_SRC="${IMG_PREFIX_DST}" | |
| PUSH=false TAG_VER=false build_image tmp latest docker_base/Dockerfile --build-arg "BASE_IMG=nvidia-cuda:12.8.1-cudnn-devel-ubuntu24.04" | |
| build_image cuda_12.8 latest docker_cuda/nvidia-cuda.Dockerfile --build-arg "BASE_IMG=tmp" --build-context ${IMG_PREFIX_SRC}/tmp:latest=docker-image://${IMG_PREFIX_SRC}/tmp:latest | |
| job-cuda_126: | |
| name: 'cuda_12.6' | |
| strategy: | |
| matrix: | |
| include: [{arch: amd64, runner: ubuntu-latest}] # , {arch: arm64, runner: ubuntu-24.04-arm} | |
| runs-on: ${{ matrix.runner }} | |
| steps: | |
| - uses: actions/checkout@v4 | |
| - run: | | |
| source ./tool.sh | |
| build_image_no_tag nvidia-cuda 12.6.3-cudnn-devel-ubuntu24.04 docker_atom/Dockerfile --build-arg "BASE_IMG=nvidia/cuda:12.6.3-cudnn-devel-ubuntu24.04" && clear_images nvidia/cuda | |
| export IMG_PREFIX_SRC="${IMG_PREFIX_DST}" | |
| PUSH=false TAG_VER=false build_image tmp latest docker_base/Dockerfile --build-arg "BASE_IMG=nvidia-cuda:12.6.3-cudnn-devel-ubuntu24.04" | |
| build_image cuda_12.6 latest docker_cuda/nvidia-cuda.Dockerfile --build-arg "BASE_IMG=tmp" --build-context ${IMG_PREFIX_SRC}/tmp:latest=docker-image://${IMG_PREFIX_SRC}/tmp:latest | |
| build_image cuda latest docker_cuda/nvidia-cuda.Dockerfile --build-arg "BASE_IMG=tmp" --build-context ${IMG_PREFIX_SRC}/tmp:latest=docker-image://${IMG_PREFIX_SRC}/tmp:latest | |
| job-tf2: | |
| name: 'tf2,tf2-cuda126' | |
| needs: job-cuda_126 | |
| runs-on: ubuntu-latest | |
| steps: | |
| - uses: actions/checkout@v4 | |
| - run: | | |
| source ./tool.sh | |
| build_image tf2-cuda126 latest docker_core/Dockerfile --build-arg "BASE_IMG=cuda_12.6" --build-arg "ARG_PROFILE_PYTHON=tf2" | |
| build_image tf2 latest docker_core/Dockerfile --build-arg "BASE_IMG=cuda_12.6" --build-arg "ARG_PROFILE_PYTHON=tf2" | |
| job-torch_cuda128: | |
| name: 'torch,torch-cuda128' | |
| needs: job-cuda_128 | |
| runs-on: ubuntu-latest | |
| steps: | |
| - uses: actions/checkout@v4 | |
| - run: | | |
| source ./tool.sh | |
| build_image torch-cuda128 latest docker_core/Dockerfile --build-arg "BASE_IMG=cuda_12.8" --build-arg "ARG_PROFILE_PYTHON=torch" | |
| build_image torch latest docker_core/Dockerfile --build-arg "BASE_IMG=cuda_12.8" --build-arg "ARG_PROFILE_PYTHON=torch" | |
| job-paddle_cuda126: | |
| name: 'paddle-cuda126,paddle-3.0' | |
| needs: job-cuda_126 | |
| runs-on: ubuntu-latest | |
| steps: | |
| - uses: actions/checkout@v4 | |
| - run: | | |
| source ./tool.sh | |
| build_image paddle-cuda126 latest docker_core/Dockerfile --build-arg "BASE_IMG=cuda_12.6" --build-arg "ARG_PROFILE_PYTHON=paddle,mkl" | |
| build_image paddle-3.0 latest docker_core/Dockerfile --build-arg "BASE_IMG=cuda_12.6" --build-arg "ARG_PROFILE_PYTHON=paddle,mkl" | |
| job-py-nlp: | |
| name: 'py-nlp,py-nlp-cuda128' | |
| needs: job-cuda_128 | |
| runs-on: ubuntu-latest | |
| steps: | |
| - uses: actions/checkout@v4 | |
| - run: | | |
| source ./tool.sh && export IMG_PREFIX_SRC="${IMG_PREFIX_DST}" | |
| build_image py-nlp-cuda128 latest docker_core/Dockerfile --build-arg "BASE_IMG=cuda_12.8" --build-arg "ARG_PROFILE_PYTHON=datascience,mkl,torch,nlp" | |
| build_image py-nlp latest docker_core/Dockerfile --build-arg "BASE_IMG=cuda_12.8" --build-arg "ARG_PROFILE_PYTHON=datascience,mkl,torch,nlp" | |
| job-py-cv: | |
| name: 'py-cv' | |
| needs: job-cuda_128 | |
| runs-on: ubuntu-latest | |
| steps: | |
| - uses: actions/checkout@v4 | |
| - run: | | |
| source ./tool.sh && export IMG_PREFIX_SRC="${IMG_PREFIX_DST}" | |
| build_image py-cv latest docker_core/Dockerfile --build-arg "BASE_IMG=cuda_12.8" --build-arg "ARG_PROFILE_PYTHON=datascience,mkl,torch,cv" | |
| job-core-cuda: | |
| name: 'core-cuda,full-cuda-12.8' | |
| needs: job-cuda_128 | |
| runs-on: ubuntu-latest | |
| steps: | |
| - uses: actions/checkout@v4 | |
| - run: | | |
| source ./tool.sh && free_diskspace && export IMG_PREFIX_SRC="${IMG_PREFIX_DST}" | |
| build_image full-cuda-12.8 latest docker_core/Dockerfile \ | |
| --build-arg "BASE_IMG=cuda_12.8" \ | |
| --build-arg "ARG_PROFILE_PYTHON=base,datascience,mkl,database,nlp,cv,chem,tf2,torch" \ | |
| --build-arg "ARG_PROFILE_R=base,datascience" \ | |
| --build-arg "ARG_PROFILE_NODEJS=base" \ | |
| --build-arg "ARG_PROFILE_JAVA=base,maven" \ | |
| --build-arg "ARG_PROFILE_LATEX=base,cjk" | |
| build_image core-cuda latest docker_core/Dockerfile \ | |
| --build-arg "BASE_IMG=cuda_12.8" \ | |
| --build-arg "ARG_PROFILE_PYTHON=base,datascience,mkl,database,nlp,cv,chem,tf2,torch" \ | |
| --build-arg "ARG_PROFILE_R=base,datascience" \ | |
| --build-arg "ARG_PROFILE_NODEJS=base" \ | |
| --build-arg "ARG_PROFILE_JAVA=base,maven" \ | |
| --build-arg "ARG_PROFILE_LATEX=base,cjk" | |
| job-nvidia-ctk: | |
| name: 'nvidia-ctk' | |
| runs-on: ubuntu-latest | |
| steps: | |
| - uses: actions/checkout@v4 | |
| - run: | | |
| source ./tool.sh && export IMG_PREFIX_SRC="docker.io/library" | |
| build_image nvidia-ctk latest docker_cuda/nvidia-ctk.Dockerfile | |
| ## Sync all images in this build (listed by "names") to mirror registry. | |
| sync_images: | |
| needs: ["job-core-cuda", "job-py-cv", "job-py-nlp", "job-torch_cuda128", "job-nvidia-ctk"] | |
| runs-on: ubuntu-latest | |
| steps: | |
| - uses: actions/checkout@v4 | |
| - env: | |
| AUTH_FILE_CONTENT: ${{ secrets.AUTH_FILE_CONTENT }} | |
| DOCKER_MIRROR_REGISTRY: ${{ vars.DOCKER_MIRROR_REGISTRY }} | |
| run: | | |
| source ./tool.sh | |
| printf '%s' "$AUTH_FILE_CONTENT" > .github/workflows/auth.json && ls -alh ./.github/workflows | |
| printenv | grep -v 'PATH' > /tmp/docker.env && echo "REGISTRY_URL=${REGISTRY_DST}" >> /tmp/docker.env | |
| docker run --rm --env-file /tmp/docker.env -v $(pwd):/tmp -w /tmp ${IMG_PREFIX_DST:-labnow}/docker-kit \ | |
| python /opt/utils/image-syncer/run_jobs.py --auth-file=/tmp/.github/workflows/auth.json \ | |
| --workflow-file=".github/workflows/build-docker-gpu.yml" |