Skip to content

🤖 Update infrastructure file(s) #2260

🤖 Update infrastructure file(s)

🤖 Update infrastructure file(s) #2260

Workflow file for this run

name: test
on:
pull_request:
push:
branches:
- main
concurrency:
# Concurrency group that uses the workflow name and PR number if available
# or commit SHA as a fallback. If a new build is triggered under that
# concurrency group while a previous build is running it will be canceled.
# Repeated pushes to a PR will cancel all previous builds, while multiple
# merges to main will not cancel.
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
env:
# See also the same variable in integration-test.yml
MICROMAMBA_VERSION: 'latest'
jobs:
test:
name: pytest py${{ matrix.python-version }} on ${{ matrix.os }} (${{ matrix.pytest-split-group-index }}/${{ matrix.pytest-split-group-size }})
runs-on: ${{ matrix.os }}-latest
strategy:
fail-fast: false
matrix:
os: [ ubuntu, macos, windows ]
python-version: [ "3.9", "3.13" ]
# pytest-split handles dividing the tests into n groups indexed 1...n.
# The tests are automatically split so that the expected duration of each
# group is roughly the same.
# See the "exclude" section below for pruning to the group sizes,
# and the "include" section for defining the group sizes.
pytest-split-group-index: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
exclude:
# Prune the indices so that we only run the splits up to the group size
# defined below in "include". (This is ugly but effective.)
- os: ubuntu
pytest-split-group-index: 4
- os: ubuntu
pytest-split-group-index: 5
- os: ubuntu
pytest-split-group-index: 6
- os: ubuntu
pytest-split-group-index: 7
- os: ubuntu
pytest-split-group-index: 8
- os: ubuntu
pytest-split-group-index: 9
- os: ubuntu
pytest-split-group-index: 10
- os: macos
pytest-split-group-index: 4
- os: macos
pytest-split-group-index: 5
- os: macos
pytest-split-group-index: 6
- os: macos
pytest-split-group-index: 7
- os: macos
pytest-split-group-index: 8
- os: macos
pytest-split-group-index: 9
- os: macos
pytest-split-group-index: 10
include:
- os: ubuntu
pytest-split-group-size: 3
- os: macos
pytest-split-group-size: 3
- os: windows
pytest-split-group-size: 10
defaults:
run:
shell: bash -eo pipefail -l {0}
env:
PYTHONUNBUFFERED: "1"
FORCE_COLOR: "1"
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
with:
fetch-depth: 0
- name: Set Conda platform
run: |
if [ "$RUNNER_OS" == "Linux" ]; then
echo "CONDA_PLATFORM=linux-64" >> $GITHUB_ENV
elif [ "$RUNNER_OS" == "macOS" ]; then
echo "CONDA_PLATFORM=osx-arm64" >> $GITHUB_ENV
elif [ "$RUNNER_OS" == "Windows" ]; then
echo "CONDA_PLATFORM=win-64" >> $GITHUB_ENV
fi
- name: Verify Conda platform
run: echo "Conda platform is $CONDA_PLATFORM"
- name: Rename conda-lock-python-${{ matrix.python-version }}.yaml to conda-lock.yml
# Otherwise, micromamba parses an empty environment.
# TODO: Open an issue
run: |
LOCKFILE_DIR="${RUNNER_TEMP}/conda-lock-dir"
mkdir -p "${LOCKFILE_DIR}"
cp environments/conda-lock-python-${{ matrix.python-version }}.yaml "${LOCKFILE_DIR}/conda-lock.yml"
echo "LOCKFILE_DIR=${LOCKFILE_DIR}"
ls -al "${LOCKFILE_DIR}"
echo "LOCKFILE_DIR=${LOCKFILE_DIR}" >> $GITHUB_ENV
- uses: mamba-org/setup-micromamba@v2
with:
micromamba-version: ${{ env.MICROMAMBA_VERSION }}
# # Use micromamba-url instead of micromamba-version to test prereleases
# micromamba-url: https://github.com/mamba-org/micromamba-releases/releases/download/${{ env.MICROMAMBA_VERSION }}/micromamba-${{ env.CONDA_PLATFORM }}
environment-file: ${{ env.LOCKFILE_DIR }}/conda-lock.yml
environment-name: conda-lock-dev
init-shell: bash
cache-environment: true
create-args: >-
--category=main --category=dev
- name: install conda-lock
run: |
which pip
pip install -e . --no-deps
- name: run pip check
if: ${{ matrix.python-version != '3.13' }}
# Ignore wrong version of conda-libmamba-solver for now.
# <https://github.com/conda-forge/mamba-feedstock/pull/310#issuecomment-2768731938>
run: pip check
- name: run-test
run: |
mkdir -p tests/durations
set -x
which pytest
pytest \
--cov=conda_lock --cov-branch --cov-report=xml --cov-report=term \
--store-durations \
--clean-durations \
--durations-path "tests/durations/${{ matrix.os }}-py${{ matrix.python-version }}.json" \
--splits="${{ matrix.pytest-split-group-size }}" \
--group="${{ matrix.pytest-split-group-index }}"
- uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
- name: Print test durations
run: |
ls -al tests/durations
cat tests/durations/${{ matrix.os }}-py${{ matrix.python-version }}.json
- name: Store test durations
uses: actions/upload-artifact@v4
with:
name: test-durations-${{ matrix.os }}-py${{ matrix.python-version }}-${{ matrix.pytest-split-group-index }}
path: tests/durations/${{ matrix.os }}-py${{ matrix.python-version }}.json
aggregate-durations:
name: Aggregate test durations
runs-on: ubuntu-latest
needs: test
steps:
- name: Download test durations
uses: actions/download-artifact@v4
# All the artifacts are downloaded into various subdirectories.
# For each filename that occurs, we need to find all the files in the
# subdirectories with the same name, group those, and merge them.
- name: Construct the list of filenames to aggregate and write them to temp/filenames.txt
id: construct-filenames
run: |
mkdir temp
find . -type f -name '*.json' | xargs -n1 basename | sort | uniq > temp/filenames.txt
cat temp/filenames.txt
- name: Aggregate test durations and sort by key (test name)
run: |
mkdir aggregated
while read -r filename; do
jq -s 'add | to_entries | sort_by(.key) | from_entries' $(find . -type f -name "$filename") > "aggregated/$filename"
done < temp/filenames.txt
- name: Upload aggregated test durations
uses: actions/upload-artifact@v4
with:
name: aggregated-test-durations
path: aggregated